repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
poincare_glove | poincare_glove-master/util_scripts/compute_avg_delta_hyperbolicity.py | import argparse
from gensim.models.keyedvectors import VanillaWordEmbeddingsKeyedVectors, Vocab
from glove_code.src.glove_inner import read_all
from numpy import array, uint32, load, sort, log, sqrt, arccosh, exp
from timeit import default_timer
PRINT_EVERY = 1000000
graph_map = {}
max_coocc_count = 0.0
USE_PROBS = False
# Function h on which we will compute avg delta hyp that corresponds to f(dist)=dist and g(coocc_count)=log
# coocc can be either coocc_count or coocc_probability
def h_id_log(coocc):
global max_coocc_count, USE_PROBS
if USE_PROBS:
return -log(coocc)
else:
return log(max_coocc_count / coocc)
# Function h on which we will compute avg delta hyp that corresponds to f(dist)=-dist^2 and g(coocc)=log
# coocc can be either coocc_count or coocc_probability
def h_sq_log(coocc):
global max_coocc_count, USE_PROBS
if USE_PROBS:
return sqrt(-log(coocc))
else:
return sqrt(log(max_coocc_count / coocc))
# Function h on which we will compute avg delta hyp that corresponds to f(dist)=-cosh(dist) and g(coocc)=log
# coocc can be either coocc_count or coocc_probability
def h_cosh_log(coocc):
global max_coocc_count, USE_PROBS
if USE_PROBS:
return arccosh(1-log(coocc))
else:
return arccosh(1-log(max_coocc_count / coocc))
# Function h on which we will compute avg delta hyp that corresponds to f(dist)=-cosh(dist)^2 and g(coocc)=log
# coocc can be either coocc_count or coocc_probability
def h_cosh_sq_log(coocc):
global max_coocc_count, USE_PROBS
if USE_PROBS:
return arccosh(1+sqrt(-log(coocc)))
else:
return arccosh(1+sqrt(log(max_coocc_count / coocc)))
# Function h on which we will compute avg delta hyp that corresponds to f(dist)=-log(dist^2 + 1) and g(coocc)=log
# coocc can be either coocc_count or coocc_probability
def h_log_of_sq_minus_one_log(coocc):
global max_coocc_count, USE_PROBS
if USE_PROBS:
return sqrt(exp(-log(coocc)) - 1)
else:
return sqrt(exp(log(max_coocc_count / coocc)) - 1)
# Function h on which we will compute avg delta hyp that corresponds to f(dist)=-log(dist^2) and g(coocc)=log
# coocc can be either coocc_count or coocc_probability
def h_log_of_sq_log(coocc):
global max_coocc_count, USE_PROBS
if USE_PROBS:
return sqrt(exp(-log(coocc)))
else:
return sqrt(exp(log(max_coocc_count / coocc)))
def load_vocab(wv, vocab_file, use_glove_format, restrict_vocab):
# Read vocab.
vocab_size = 0
with open(vocab_file, "r") as f:
wv.index2freq = []
all_lines = f.readlines()[:restrict_vocab] if restrict_vocab > 0 else f.readlines()
for index, line in enumerate(all_lines):
if use_glove_format:
word, count = line.strip().split(" ") # vocab is indexed from 0; for co-occ we use 1-based indexing
index = index
else:
index, word, count = line.strip().split("\t")
index = int(index) - 1 # indexing starts at 1 in the file; for co-occ we use 0-based indexing
wv.index2word.append(word)
wv.vocab[word] = Vocab(index=index, count=int(count))
wv.index2freq.append(count)
vocab_size += 1
wv.index2freq = array(wv.index2freq).astype(uint32)
# Unused members from VanillaWordEmbeddingsKeyedVectors.
wv.vectors_norm = None
print("Loaded vocabulary with {} words".format(vocab_size))
return vocab_size
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--use_our_format', dest='use_glove_format', action='store_false',
help='Use our format for reading the vocabulary and the co-occ matrix, instead of the format '
'from the original GloVe code.')
parser.add_argument('--vocab_file', type=str,
help='Filename which contains the vocabulary.')
parser.add_argument('--coocc_file', type=str,
help='Filename which contains the coocc pairs.')
parser.add_argument('--quad_file', type=str,
help='Filename from which to load the list of quads.')
parser.add_argument('--root', type=str,
default='~/Documents/Master/Thesis',
help='Path to the root folder that contains msc_tifreaa, data etc.')
parser.add_argument('--restrict_vocab', type=int, default=400000,
help='Only use the `restrict_vocab` most frequent words')
parser.add_argument('--use_probs', dest='use_probs', action='store_true',
help='Use log-probabilities log(P_ij) as edge weights instead of log-counts log(X_ij)')
parser.set_defaults(use_glove_format=True, use_probs=False)
args = parser.parse_args()
USE_PROBS = args.use_probs
wv = VanillaWordEmbeddingsKeyedVectors(0)
vocab_size = load_vocab(
wv,
vocab_file=args.vocab_file,
use_glove_format=args.use_glove_format,
restrict_vocab=args.restrict_vocab)
# Load all the co-occ pairs in memory, as a map
print("Reading the co-occ pairs.")
start = default_timer()
num_pairs, graph_map, max_coocc_count = read_all(
use_glove_format=args.use_glove_format, filename=args.coocc_file, return_pairs=True)
print("Finished reading {} co-occ pairs in {:.2f}. Max co-occ count = {:.4f}".format(
num_pairs, default_timer() - start, max_coocc_count
))
quad_list = load(args.quad_file)
functions = {
"h_id_log": h_id_log,
"h_sq_log": h_sq_log,
"h_cosh_log": h_cosh_log,
"h_cosh_sq_log": h_cosh_sq_log,
"h_log_of_sq_minus_one_log": h_log_of_sq_minus_one_log,
"h_log_of_sq_log": h_log_of_sq_log,
}
for func_name, func in functions.items():
d_avg, count_d_avg = 0.0, 0
delta_avg, count_delta_avg = 0.0, 0
overall_start, start = default_timer(), default_timer()
for quad in quad_list:
x, y, v, w = sort(quad)
# Extract pairwise distances.
sums = []
if USE_PROBS:
sums.append(func(graph_map[str(x)+" "+str(y)] / wv.index2freq[x-1] / wv.index2freq[y-1]) + func(graph_map[str(v)+" "+str(w)] / wv.index2freq[v-1] / wv.index2freq[w-1]))
sums.append(func(graph_map[str(x)+" "+str(v)] / wv.index2freq[x-1] / wv.index2freq[v-1]) + func(graph_map[str(y)+" "+str(w)] / wv.index2freq[y-1] / wv.index2freq[w-1]))
sums.append(func(graph_map[str(x)+" "+str(w)] / wv.index2freq[x-1] / wv.index2freq[w-1]) + func(graph_map[str(y)+" "+str(v)] / wv.index2freq[y-1] / wv.index2freq[v-1]))
else:
sums.append(func(graph_map[str(x)+" "+str(y)]) + func(graph_map[str(v)+" "+str(w)]))
sums.append(func(graph_map[str(x)+" "+str(v)]) + func(graph_map[str(y)+" "+str(w)]))
sums.append(func(graph_map[str(x)+" "+str(w)]) + func(graph_map[str(y)+" "+str(v)]))
sums = sorted(sums)
d_avg += sums[0] + sums[1] + sums[2]
delta_avg += float(sums[2] - sums[1]) / 2
count_d_avg += 6
count_delta_avg += 1
if count_delta_avg % PRINT_EVERY == 0:
print("[func={}] Processed {} quads in {:.2f} sec. avg_d={:.4f}, avg_delta={:.4f}".format(func_name,
count_delta_avg, default_timer() - start, d_avg / count_d_avg, delta_avg / count_delta_avg))
start = default_timer()
print("[func={}] Finished processing {} quads. Took {:.2f} sec".format(func_name, count_delta_avg,
default_timer() - overall_start))
d_avg = d_avg / count_d_avg
delta_avg = delta_avg / count_delta_avg
print("[func={}] d_avg = {:.4f}".format(func_name, d_avg))
print("[func={}] delta_avg = {:.4f}".format(func_name, delta_avg))
print("[func={}] 2 * delta_avg / d_avg = {:.4f}".format(func_name, 2 * delta_avg / d_avg))
| 8,128 | 42.239362 | 184 | py |
poincare_glove | poincare_glove-master/util_scripts/split_similarity_set.py | import random
import sys
init_file = sys.argv[1]
validation_file = sys.argv[2]
test_file = sys.argv[3]
VALIDATION_RATIO = 0.5
with open(init_file, "r") as fin, open(validation_file, "w") as fv, open(test_file, "w") as ft:
lines = fin.readlines()
num_validation = int(VALIDATION_RATIO * len(lines))
valid_idxs = random.sample(range(len(lines)), num_validation)
valid_lines = [lines[idx] for idx in valid_idxs]
for l in valid_lines:
fv.write(l)
for i in range(len(lines)):
if i not in valid_idxs:
ft.write(lines[i])
| 571 | 25 | 95 | py |
allosaurus | allosaurus-master/setup.py | from setuptools import setup,find_packages
setup(
name='allosaurus',
version='1.0.2',
description='a multilingual phone recognizer',
author='Xinjian Li',
author_email='xinjianl@cs.cmu.edu',
url="https://github.com/xinjli/allosaurus",
packages=find_packages(),
install_requires=[
'scipy',
'numpy',
'resampy',
'panphon',
'torch',
'editdistance',
]
)
| 412 | 19.65 | 49 | py |
allosaurus | allosaurus-master/allosaurus/app.py | from allosaurus.am.utils import *
from pathlib import Path
from allosaurus.audio import read_audio
from allosaurus.pm.factory import read_pm
from allosaurus.am.factory import read_am
from allosaurus.lm.factory import read_lm
from allosaurus.bin.download_model import download_model
from allosaurus.model import resolve_model_name, get_all_models
from argparse import Namespace
from io import BytesIO
def read_recognizer(inference_config_or_name='latest', alt_model_path=None):
if alt_model_path:
if not alt_model_path.exists():
download_model(inference_config_or_name, alt_model_path)
# download specified model automatically if no model exists
if len(get_all_models()) == 0:
download_model('latest', alt_model_path)
# create default config if input is the model's name
if isinstance(inference_config_or_name, str):
model_name = resolve_model_name(inference_config_or_name, alt_model_path)
inference_config = Namespace(model=model_name, device_id=-1, lang='ipa', approximate=False, prior=None)
else:
assert isinstance(inference_config_or_name, Namespace)
inference_config = inference_config_or_name
if alt_model_path:
model_path = alt_model_path / inference_config.model
else:
model_path = Path(__file__).parent / 'pretrained' / inference_config.model
if inference_config.model == 'latest' and not model_path.exists():
download_model(inference_config, alt_model_path)
assert model_path.exists(), f"{inference_config.model} is not a valid model"
# create pm (pm stands for preprocess model: audio -> feature etc..)
pm = read_pm(model_path, inference_config)
# create am (acoustic model: feature -> logits )
am = read_am(model_path, inference_config)
# create lm (language model: logits -> phone)
lm = read_lm(model_path, inference_config)
return Recognizer(pm, am, lm, inference_config)
class Recognizer:
def __init__(self, pm, am, lm, config):
self.pm = pm
self.am = am
self.lm = lm
self.config = config
def is_available(self, lang_id):
# check whether this lang id is available
return self.lm.inventory.is_available(lang_id)
def recognize(self, filename, lang_id='ipa', topk=1, emit=1.0, timestamp=False):
# recognize a single file
# filename check (skipping for BytesIO objects)
if not isinstance(filename, BytesIO):
assert str(filename).endswith('.wav'), "only wave file is supported in allosaurus"
# load wav audio
audio = read_audio(filename)
# extract feature
feat = self.pm.compute(audio)
# add batch dim
feats = np.expand_dims(feat, 0)
feat_len = np.array([feat.shape[0]], dtype=np.int32)
tensor_batch_feat, tensor_batch_feat_len = move_to_tensor([feats, feat_len], self.config.device_id)
tensor_batch_lprobs = self.am(tensor_batch_feat, tensor_batch_feat_len)
if self.config.device_id >= 0:
batch_lprobs = tensor_batch_lprobs.cpu().detach().numpy()
else:
batch_lprobs = tensor_batch_lprobs.detach().numpy()
token = self.lm.compute(batch_lprobs[0], lang_id, topk, emit=emit, timestamp=timestamp)
return token
| 3,316 | 35.450549 | 111 | py |
allosaurus | allosaurus-master/allosaurus/audio.py | import wave
import numpy as np
from pathlib import Path
import resampy
def read_audio(filename, header_only=False, channel=0):
"""
read_audio will read a raw wav and return an Audio object
:param header_only: only load header without samples
"""
if isinstance(filename, Path):
filename = str(filename)
wf = wave.open(filename)
# initialize audio
audio = Audio()
# set stream basic info
channel_number = wf.getnchannels()
# check the input channel is valid
assert channel < channel_number
# set wav header
audio.set_header(sample_rate=wf.getframerate(), sample_size=wf.getnframes(), channel_number=1,
sample_width=wf.getsampwidth())
# set audio
if not header_only:
x = wf.readframes(wf.getnframes())
assert (channel_number <= 2)
audio_bytes = np.frombuffer(x, dtype='int16')
# get the first channel if stereo
if channel_number == 2:
audio_bytes = audio_bytes[channel::2]
audio.samples = audio_bytes
# when some utils piping to stdout, sample size might not be correct (e.g: lame --decode)
audio.sample_size = len(audio.samples)
wf.close()
return audio
def resample_audio(audio, target_sample_rate):
"""
resample the audio by the target_sample_rate
:param audio:
:param target_sample_rate:
:return:
"""
# return the origin audio if sample rate is identical
if audio.sample_rate == target_sample_rate:
return audio
new_samples = resampy.resample(audio.samples, audio.sample_rate, target_sample_rate)
new_audio = Audio(new_samples, target_sample_rate)
return new_audio
class Audio:
def __init__(self, samples=None, sample_rate=8000):
"""
Audio is the basic data structure used in this package.
It is used to capture fundamental info about audio files such as frequency and samples.
:param samples:
:param sample_rate:
:param stream_name:
"""
# default parameters
if samples is None:
samples = []
self.sample_rate = sample_rate
self.channel_number = 1
self.sample_width = 2
# segments
self.segments = []
# all samples
self.set_samples(samples)
def __str__(self):
wave_info = "<Audio sample rate: "+str(self.sample_rate)+", samples: "\
+ str(self.sample_size) + ", second: " + str(self.sample_size/self.sample_rate) + " > "
return wave_info
def __repr__(self):
return self.__str__()
def __len__(self):
return self.sample_size
def set_samples(self, samples):
self.samples = samples
self.sample_size = len(samples)
def empty(self):
return self.samples is None or self.sample_size == 0
def clear(self):
self.set_samples([])
def extend(self, new_audio):
"""
extend wave stream
:param new_audio:
:return:
"""
# resample if sample_rate does not match
if self.sample_rate != new_audio.sample_rate:
audio = resample_audio(new_audio, self.sample_rate)
samples = audio.samples
else:
samples = new_audio.samples
# extend
new_samples = np.append(self.samples, samples)
self.set_samples(new_samples)
def set_header(self, sample_rate=8000, sample_size=0, channel_number=1, sample_width=2):
self.sample_rate = sample_rate
self.sample_size = sample_size
self.channel_number = channel_number
self.sample_width = sample_width
def duration(self):
return self.sample_size/self.sample_rate | 3,766 | 24.281879 | 107 | py |
allosaurus | allosaurus-master/allosaurus/model.py | from pathlib import Path
import shutil
def get_all_models(alt_model_path=None):
"""
get all local models
:return:
"""
if alt_model_path:
model_dir = alt_model_path
else:
model_dir = Path(__file__).parent / 'pretrained'
models = list(sorted(model_dir.glob('*'), reverse=True))
#assert len(models) > 0, "No models are available, you can maually download a model with download command or just run inference to download the latest one automatically"
return models
def get_model_path(model_name, alt_model_path=None):
"""
get model path by name, verify its a valid path
:param model_name: str
:return: model path
"""
if alt_model_path:
model_dir = alt_model_path
else:
model_dir = Path(__file__).parent / 'pretrained'
resolved_model_name = resolve_model_name(model_name)
assert resolved_model_name != "none", model_name+" is not a valid model name. please check by list_model"
return model_dir / resolved_model_name
def copy_model(src_model_name, tgt_model_name):
"""
copy a model to a new model
:param src_model_name:
:param tgt_model_name:
:return:
"""
# verify the source path is not empty
src_model_path = get_model_path(src_model_name)
# verify the target path is empty
model_dir = Path(__file__).parent / 'pretrained'
tgt_model_path = model_dir / tgt_model_name
assert not tgt_model_path.exists(), \
"provided model name "+tgt_model_name+" has already exist. Consider another name or delete the existing one"
shutil.copytree(str(src_model_path), str(tgt_model_path))
def delete_model(model_name):
model_path = get_model_path(model_name)
answer = input(f"you will delete {model_path}? [Y|N]")
if answer.lower() in ['y', 'yes', 'true']:
print("deleting ", model_path)
shutil.rmtree(str(model_path))
def resolve_model_name(model_name='latest', alt_model_path=None):
"""
select the model
:param model_name:
:return:
"""
models = get_all_models(alt_model_path)
# get the latest model in local
if model_name == 'latest':
return models[0].name
# match model name
for model in models:
if model.name == model_name:
return model_name
return "none" | 2,331 | 25.5 | 173 | py |
allosaurus | allosaurus-master/allosaurus/run.py | from allosaurus.app import read_recognizer
from allosaurus.model import get_all_models, resolve_model_name
from allosaurus.bin.download_model import download_model
from pathlib import Path
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser('Allosaurus phone recognizer')
parser.add_argument('-d', '--device_id', type=int, default=-1, help='specify cuda device id to use, -1 means no cuda and will use cpu for inference')
parser.add_argument('-m', '--model', type=str, default='latest', help='specify which model to use. default is to use the latest local model')
parser.add_argument('-l', '--lang', type=str, default='ipa',help='specify which language inventory to use for recognition. default is to use all phone inventory')
parser.add_argument('-i', '--input', type=str, required=True, help='specify your input wav file/directory')
parser.add_argument('-o', '--output', type=str, default='stdout', help='specify output file. the default will be stdout')
parser.add_argument('-k', '--topk', type=int, default=1, help='output k phone for each emitting frame')
parser.add_argument('-t', '--timestamp', type=bool, default=False, help='attach *approximate* timestamp for each phone, note that the timestamp might not be accurate')
parser.add_argument('-p', '--prior', type=str, required=False, default=None, help='supply prior to adjust phone predictions')
parser.add_argument('-e', '--emit', type=float, required=False, default=1.0, help='specify how many phones to emit. A larger number can emit more phones and a smaller number would suppress emission, default is 1.0')
parser.add_argument('-a', '--approximate', type=bool, default=False, help='the phone inventory can still hardly to cover all phones. You can use turn on this flag to map missing phones to other similar phones to recognize. The similarity is measured with phonological features')
args = parser.parse_args()
# download specified model automatically if no model exists
if len(get_all_models()) == 0:
download_model('latest')
# resolve model's name
model_name = resolve_model_name(args.model)
if model_name == "none":
print("Model ", model_name, " does not exist. Please download this model or use an existing model in list_model")
exit(0)
args.model = model_name
# create recognizer
recognizer = read_recognizer(args)
# output file descriptor
output_fd = None
if args.output != 'stdout':
output_fd = open(args.output, 'w', encoding='utf-8')
# input file/path
input_path = Path(args.input)
if input_path.is_dir():
wav_list = sorted(list(input_path.glob('*.wav')))
for wav_path in wav_list:
phones = recognizer.recognize(str(wav_path), args.lang, args.topk, args.emit, args.timestamp)
# output format would be different when using timestamp
if args.timestamp:
if output_fd:
output_fd.write('#'+wav_path.name+'\n'+phones+'\n')
else:
print('#'+wav_path.name+'\n'+phones)
else:
# save to file or print to stdout
if output_fd:
output_fd.write(wav_path.name+' '+phones+'\n')
else:
print(wav_path.name+' '+phones)
else:
# check file format
assert args.input.endswith('.wav'), " Error: Please use a wav file. other audio files can be converted to wav by sox"
# run inference
phones = recognizer.recognize(args.input, args.lang, args.topk, args.emit, args.timestamp)
if output_fd:
output_fd.write(phones+'\n')
else:
print(phones)
if output_fd:
output_fd.close() | 3,809 | 47.227848 | 282 | py |
allosaurus | allosaurus-master/allosaurus/__init__.py | 0 | 0 | 0 | py | |
allosaurus | allosaurus-master/allosaurus/pm/utils.py | import numpy as np
def feature_cmvn(feature):
frame_cnt = feature.shape[0]
spk_sum = np.sum(feature, axis=0)
spk_mean = spk_sum / frame_cnt
spk_square_sum = np.sum(feature*feature, axis=0)
spk_std = (spk_square_sum / frame_cnt - spk_mean * spk_mean) ** 0.5
return (feature - spk_mean)/spk_std
def feature_window(feature, window_size=3):
assert window_size == 3, "only window size 3 is supported"
feature = np.concatenate((np.roll(feature, 1, axis=0), feature, np.roll(feature, -1, axis=0)), axis=1)
feature = feature[::3, ]
return feature
| 588 | 24.608696 | 106 | py |
allosaurus | allosaurus-master/allosaurus/pm/mfcc.py | from allosaurus.pm.feature import mfcc
from allosaurus.pm.utils import *
from allosaurus.audio import resample_audio
import numpy as np
class MFCC:
def __init__(self, config):
self.model = config.model
# feature model config
self.config = config
# sample rate
self.sample_rate = config.sample_rate
# cmvn
self.cmvn = config.cmvn
# samples in each window
self.window_size = int(config.window_size * config.sample_rate)
# overlap between windows
self.window_shift = int(config.window_shift * config.sample_rate)
# feature window
self.feature_window = config.feature_window
# last complete window starting sample (index of sample)
self.prev_window_sample = 0
# last complete mfcc window index (index of window)
self.prev_window_index = 0
# list of mfcc features
self.mfcc_windows = []
# float32 or float64
self.dtype = config.dtype
def __str__(self):
return "MFCC ("+str(vars(self.config))+")"
def __repr__(self):
return self.__str__()
def compute(self, audio):
"""
compute feature for audio
:param audio:
:return: mfcc feature
"""
# make sample rate consistent
audio = resample_audio(audio, self.sample_rate)
# validate sample rate
assert self.config.sample_rate == audio.sample_rate, " sample rate of audio is "+str(audio.sample_rate)+" , but model is "+str(self.config.sample_rate)
# get feature and convert into correct type (usually float32)
feat = mfcc(audio.samples, samplerate=self.config.sample_rate, numcep=self.config.cep_size, nfilt=self.config.bank_size,
lowfreq=self.config.low_freq, highfreq=self.config.high_freq, useEnergy=self.config.use_energy, dither=self.config.dither).astype(self.dtype)
# apply cmvn if specified
if self.cmvn == 'speaker':
feat = feature_cmvn(feat)
# subsampling and windowing
if self.feature_window == 3:
feat = feature_window(feat)
return feat | 2,179 | 28.066667 | 161 | py |
allosaurus | allosaurus-master/allosaurus/pm/factory.py | from allosaurus.pm.mfcc import MFCC
import json
from argparse import Namespace
def read_pm(model_path, inference_config):
"""
read feature extraction model
:param pm_config:
:return:
"""
pm_config = Namespace(**json.load(open(str(model_path / 'pm_config.json'))))
assert pm_config.model == 'mfcc_hires', 'only mfcc_hires is supported for allosaurus now'
assert pm_config.backend == 'numpy', 'only numpy backend is supported for allosaurus now'
model = MFCC(pm_config)
return model | 525 | 26.684211 | 93 | py |
allosaurus | allosaurus-master/allosaurus/pm/__init__.py | 0 | 0 | 0 | py | |
allosaurus | allosaurus-master/allosaurus/pm/kdict.py | import numpy
import struct
import functools
import os.path
import gzip
import bz2
from pathlib import Path
class KaldiWriter:
def __init__(self, path=None, scp=True):
"""
writer of BOTH ark and scp
:param path:
:param scp:
"""
self.scp = scp
if path:
self.open(path)
self.ark_offset = 0
def open(self, path):
# remove scp or ark suffix
path = str(path)
if path.endswith(".scp") or path.endswith(".ark"):
path = path[:-4]
# mkdir
p = Path(path).parent
p.mkdir(parents=True, exist_ok=True)
self.ark_path = str(path) + '.ark'
self.scp_path = str(path) + '.scp'
# delete old files
if os.path.exists(self.ark_path):
os.remove(self.ark_path)
if os.path.exists(self.scp_path):
os.remove(self.scp_path)
self.ark_writer = open(self.ark_path, "ab")
self.scp_writer = open(self.scp_path, "a")
self.ark_offset = self.ark_writer.tell()
def write(self, utt_ids, feats):
# write batch
if isinstance(utt_ids, list):
pointers = []
for utt_id, feat in zip(utt_ids, feats):
self.ark_offset += write_string(self.ark_writer, utt_id)
pointers.append("%s:%d" % (self.ark_path, self.ark_offset))
self.ark_offset += write_matrix(self.ark_writer, feat)
#self.ark_writer.flush()
for utt_id, pointer in zip(utt_ids, pointers):
self.scp_writer.write("%s %s\n" % (utt_id, pointer))
#self.scp_writer.flush()
else:
# write single instance
utt_id = utt_ids
feat = feats
self.ark_offset += write_string(self.ark_writer, utt_id)
pointer = "%s:%d" % (self.ark_path, self.ark_offset)
self.ark_offset += write_matrix(self.ark_writer, feat)
self.scp_writer.write("%s %s\n" % (utt_id, pointer))
def close(self):
self.ark_writer.close()
self.scp_writer.close()
def smart_open(filename, mode='rb', *args, **kwargs):
'''
Opens a file "smartly":
* If the filename has a ".gz" or ".bz2" extension, compression is handled
automatically;
* If the file is to be read and does not exist, corresponding files with
a ".gz" or ".bz2" extension will be attempted.
'''
readers = {'.gz': gzip.GzipFile, '.bz2': bz2.BZ2File}
if 'r' in mode and not os.path.exists(filename):
for ext in readers:
if os.path.exists(filename + ext):
filename += ext
break
extension = os.path.splitext(filename)[1]
return readers.get(extension, open)(filename, mode, *args, **kwargs)
def read_string(f):
s = ""
while True:
c = f.read(1).decode('utf-8')
if c == "": raise ValueError("EOF encountered while reading a string.")
if c == " ": return s
s += c
def read_integer(f):
n = ord(f.read(1))
# return reduce(lambda x, y: x * 256 + ord(y), f.read(n)[::-1], 0)
a = f.read(n)[::-1]
try:
return int.from_bytes(a, byteorder='big', signed=False)
except:
return functools.reduce(lambda x, y: x * 256 + ord(y), a, 0)
# return functools.reduce(lambda x, y: x * 256 + ord(y), f.read(n)[::-1].decode('windows-1252'), 0)
# try:
# a=f.read(n)[::-1]
# b=int.from_bytes(a, byteorder='big', signed=False)
# print(a,type(a),b)
# return functools.reduce(lambda x, y: x * 256 + ord(y), a[::-1], 0)
# return functools.reduce(lambda x, y: x * 256 + ord(y), f.read(n)[::-1], 0)
# except:
# return functools.reduce(lambda x, y: x * 256 + ord(y), f.read(n)[::-1].decode('windows-1252'), 0)
def read_compressed_matrix(fd, format):
""" Read a compressed matrix,
see: https://github.com/kaldi-asr/kaldi/blob/master/src/matrix/compressed-matrix.h
methods: CompressedMatrix::Read(...), CompressedMatrix::CopyToMat(...),
"""
# Format of header 'struct',
global_header = numpy.dtype([('minvalue', 'float32'), ('range', 'float32'), ('num_rows', 'int32'),
('num_cols', 'int32')]) # member '.format' is not written,
per_col_header = numpy.dtype([('percentile_0', 'uint16'), ('percentile_25', 'uint16'), ('percentile_75', 'uint16'),
('percentile_100', 'uint16')])
# Read global header,
globmin, globrange, rows, cols = numpy.frombuffer(fd.read(16), dtype=global_header, count=1)[0]
# The data is structed as [Colheader, ... , Colheader, Data, Data , .... ]
# { cols }{ size }
col_headers = numpy.frombuffer(fd.read(cols * 8), dtype=per_col_header, count=cols)
col_headers = numpy.array(
[numpy.array([x for x in y]) * globrange * 1.52590218966964e-05 + globmin for y in col_headers],
dtype=numpy.float32)
data = numpy.reshape(numpy.frombuffer(fd.read(cols * rows), dtype='uint8', count=cols * rows),
newshape=(cols, rows)) # stored as col-major,
mat = numpy.zeros((cols, rows), dtype='float32')
p0 = col_headers[:, 0].reshape(-1, 1)
p25 = col_headers[:, 1].reshape(-1, 1)
p75 = col_headers[:, 2].reshape(-1, 1)
p100 = col_headers[:, 3].reshape(-1, 1)
mask_0_64 = (data <= 64)
mask_193_255 = (data > 192)
mask_65_192 = (~(mask_0_64 | mask_193_255))
mat += (p0 + (p25 - p0) / 64. * data) * mask_0_64.astype(numpy.float32)
mat += (p25 + (p75 - p25) / 128. * (data - 64)) * mask_65_192.astype(numpy.float32)
mat += (p75 + (p100 - p75) / 63. * (data - 192)) * mask_193_255.astype(numpy.float32)
return mat.T # transpose! col-major -> row-major,
def read_compressed_matrix_shape(fd):
""" Read a compressed matrix,
see: https://github.com/kaldi-asr/kaldi/blob/master/src/matrix/compressed-matrix.h
methods: CompressedMatrix::Read(...), CompressedMatrix::CopyToMat(...),
"""
# Format of header 'struct',
global_header = numpy.dtype([('minvalue', 'float32'), ('range', 'float32'), ('num_rows', 'int32'),
('num_cols', 'int32')]) # member '.format' is not written,
per_col_header = numpy.dtype([('percentile_0', 'uint16'), ('percentile_25', 'uint16'), ('percentile_75', 'uint16'),
('percentile_100', 'uint16')])
# Read global header,
globmin, globrange, rows, cols = numpy.frombuffer(fd.read(16), dtype=global_header, count=1)[0]
return rows, cols
def read_matrix(f, dtype=numpy.float32):
header = f.read(2).decode('utf-8')
if header != "\0B":
raise ValueError("Binary mode header ('\0B') not found when attempting to read a matrix.")
format = read_string(f)
if format == "DM":
nRows = read_integer(f)
nCols = read_integer(f)
data = struct.unpack("<%dd" % (nRows * nCols), f.read(nRows * nCols * 8))
data = numpy.array(data, dtype=dtype)
return data.reshape(nRows, nCols)
elif format == "FM":
nRows = read_integer(f)
nCols = read_integer(f)
data = struct.unpack("<%df" % (nRows * nCols), f.read(nRows * nCols * 4))
data = numpy.array(data, dtype=dtype)
return data.reshape(nRows, nCols)
elif format == "CM":
data = read_compressed_matrix(f, format)
return data
else:
raise ValueError(
"Unknown matrix format '%s' encountered while reading; currently supported formats are DM (float64) and FM (float32)." % format)
def read_matrix_format(f):
header = f.read(2).decode('utf-8')
if header != "\0B":
raise ValueError("Binary mode header ('\0B') not found when attempting to read a matrix.")
format = read_string(f)
return format
def read_matrix_shape(f):
header = f.read(2).decode('utf-8')
if header != "\0B":
raise ValueError("Binary mode header ('\0B') not found when attempting to read a matrix.")
format = read_string(f)
# for compressed shape
if format == 'CM':
return read_compressed_matrix_shape(f)
# for non compression shape
nRows = read_integer(f)
nCols = read_integer(f)
if format == "DM":
f.seek(nRows * nCols * 8, os.SEEK_CUR)
elif format == "FM":
f.seek(nRows * nCols * 4, os.SEEK_CUR)
else:
raise ValueError(
"Unknown matrix format '%s' encountered while reading; currently supported formats are DM (float64) and FM (float32)." % format)
return nRows, nCols
def write_string(f, s):
return f.write((s + " ").encode('utf-8'))
def write_integer(f, a):
s = struct.pack("<i", a)
return f.write(chr(len(s)).encode('utf-8') + s)
def write_matrix(f, data):
cnt = 0
cnt += f.write('\0B'.encode('utf-8')) # Binary data header
if str(data.dtype) == "float64":
cnt += write_string(f, "DM")
cnt += write_integer(f, data.shape[0])
cnt += write_integer(f, data.shape[1])
cnt += f.write(struct.pack("<%dd" % data.size, *data.ravel()))
elif str(data.dtype) == "float32":
cnt += write_string(f, "FM")
cnt += write_integer(f, data.shape[0])
cnt += write_integer(f, data.shape[1])
cnt += f.write(struct.pack("<%df" % data.size, *data.ravel()))
else:
raise ValueError(
"Unsupported matrix format '%s' for writing; currently supported formats are float64 and float32." % str(
data.dtype))
return cnt
def read_matrix_by_offset(arkfile, offset, dtype=numpy.float32):
with smart_open(arkfile, "rb") as g:
g.seek(offset)
feature = read_matrix(g, dtype)
return feature
def read_scp_offset(filename, limit=numpy.inf):
utt_ids = []
ark_files = []
offsets = []
with smart_open(filename, "r") as f:
for line in f:
utt_id, pointer = line.strip().split()
p = pointer.rfind(":")
arkfile, offset = pointer[:p], int(pointer[p + 1:])
ark_files.append(arkfile)
offsets.append(offset)
utt_ids.append(utt_id)
if len(utt_ids) == limit: break
return utt_ids, ark_files, offsets | 10,350 | 32.716612 | 140 | py |
allosaurus | allosaurus-master/allosaurus/pm/preprocess.py | # This file includes routines for basic signal processing including framing and computing power spectra.
# Author: James Lyons 2012
import decimal
import numpy as np
import math
import logging
def round_up_power_of_two(x):
return 1 if x == 0 else 2**(x - 1).bit_length()
def round_half_up(number):
return int(decimal.Decimal(number).quantize(decimal.Decimal('1'), rounding=decimal.ROUND_HALF_UP))
def rolling_window(a, window, step=1):
# http://ellisvalentiner.com/post/2017-03-21-np-strides-trick
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)[::step]
def framesig(sig, frame_len, frame_step, dither=1.0, preemph=0.97, remove_dc_offset=True, wintype='hamming',
stride_trick=True):
"""Frame a signal into overlapping frames.
:param sig: the audio signal to frame.
:param frame_len: length of each frame measured in samples.
:param frame_step: number of samples after the start of the previous frame that the next frame should begin.
:param winfunc: the analysis window to apply to each frame. By default no window is applied.
:param stride_trick: use stride trick to compute the rolling window and window multiplication faster
:returns: an array of frames. Size is NUMFRAMES by frame_len.
"""
slen = len(sig)
frame_len = int(round_half_up(frame_len))
frame_step = int(round_half_up(frame_step))
if slen <= frame_len:
numframes = 1
else:
numframes = 1 + ((slen - frame_len) // frame_step)
# check kaldi/src/feat/feature-window.h
# the last frame_len-1 points might be cutoff
padsignal = sig[:(numframes - 1) * frame_step + frame_len]
if wintype is 'povey':
win = np.empty(frame_len)
for i in range(frame_len):
win[i] = (0.5 - 0.5 * np.cos(2 * np.pi / (frame_len - 1) * i)) ** 0.85
else: # the hamming window
win = np.hamming(frame_len)
if stride_trick:
frames = rolling_window(padsignal, window=frame_len, step=frame_step)
else:
indices = np.tile(np.arange(0, frame_len), (numframes, 1)) + np.tile(
np.arange(0, numframes * frame_step, frame_step), (frame_len, 1)).T
indices = np.array(indices, dtype=np.int32)
frames = padsignal[indices]
win = np.tile(win, (numframes, 1))
frames = frames.astype(np.float32)
raw_frames = np.zeros(frames.shape)
for frm in range(frames.shape[0]):
# frames[frm, :] = do_dither(frames[frm, :], dither) # dither
frames[frm, :] = do_remove_dc_offset(frames[frm, :]) # remove dc offset
raw_frames[frm, :] = frames[frm, :]
frames[frm, :] = do_preemphasis(frames[frm, :], preemph) # preemphasize
return frames * win, raw_frames
def deframesig(frames, siglen, frame_len, frame_step, winfunc=lambda x: np.ones((x,))):
"""Does overlap-add procedure to undo the action of framesig.
:param frames: the array of frames.
:param siglen: the length of the desired signal, use 0 if unknown. Output will be truncated to siglen samples.
:param frame_len: length of each frame measured in samples.
:param frame_step: number of samples after the start of the previous frame that the next frame should begin.
:param winfunc: the analysis window to apply to each frame. By default no window is applied.
:returns: a 1-D signal.
"""
frame_len = round_half_up(frame_len)
frame_step = round_half_up(frame_step)
numframes = np.shape(frames)[0]
assert np.shape(frames)[1] == frame_len, '"frames" matrix is wrong size, 2nd dim is not equal to frame_len'
indices = np.tile(np.arange(0, frame_len), (numframes, 1)) + np.tile(
np.arange(0, numframes * frame_step, frame_step), (frame_len, 1)).T
indices = np.array(indices, dtype=np.int32)
padlen = (numframes - 1) * frame_step + frame_len
if siglen <= 0: siglen = padlen
rec_signal = np.zeros((padlen,))
window_correction = np.zeros((padlen,))
win = winfunc(frame_len)
for i in range(0, numframes):
window_correction[indices[i, :]] = window_correction[
indices[i, :]] + win + 1e-15 # add a little bit so it is never zero
rec_signal[indices[i, :]] = rec_signal[indices[i, :]] + frames[i, :]
rec_signal = rec_signal / window_correction
return rec_signal[0:siglen]
def magspec(frames, NFFT):
"""Compute the magnitude spectrum of each frame in frames. If frames is an NxD matrix, output will be Nx(NFFT/2+1).
:param frames: the array of frames. Each row is a frame.
:param NFFT: the FFT length to use. If NFFT > frame_len, the frames are zero-padded.
:returns: If frames is an NxD matrix, output will be Nx(NFFT/2+1). Each row will be the magnitude spectrum of the corresponding frame.
"""
if np.shape(frames)[1] > NFFT:
logging.warn(
'frame length (%d) is greater than FFT size (%d), frame will be truncated. Increase NFFT to avoid.',
np.shape(frames)[1], NFFT)
complex_spec = np.fft.rfft(frames, NFFT)
return np.absolute(complex_spec)
def powspec(frames, NFFT):
"""Compute the power spectrum of each frame in frames. If frames is an NxD matrix, output will be Nx(NFFT/2+1).
:param frames: the array of frames. Each row is a frame.
:param NFFT: the FFT length to use. If NFFT > frame_len, the frames are zero-padded.
:returns: If frames is an NxD matrix, output will be Nx(NFFT/2+1). Each row will be the power spectrum of the corresponding frame.
"""
return np.square(magspec(frames, NFFT))
def logpowspec(frames, NFFT, norm=1):
"""Compute the log power spectrum of each frame in frames. If frames is an NxD matrix, output will be Nx(NFFT/2+1).
:param frames: the array of frames. Each row is a frame.
:param NFFT: the FFT length to use. If NFFT > frame_len, the frames are zero-padded.
:param norm: If norm=1, the log power spectrum is normalised so that the max value (across all frames) is 0.
:returns: If frames is an NxD matrix, output will be Nx(NFFT/2+1). Each row will be the log power spectrum of the corresponding frame.
"""
ps = powspec(frames, NFFT);
ps[ps <= 1e-30] = 1e-30
lps = 10 * np.log10(ps)
if norm:
return lps - np.max(lps)
else:
return lps
def do_dither(signal, dither_value=1.0):
signal += np.random.normal(size=signal.shape) * dither_value
return signal
def do_remove_dc_offset(signal):
signal -= np.mean(signal)
return signal
def do_preemphasis(signal, coeff=0.97):
"""perform preemphasis on the input signal.
:param signal: The signal to filter.
:param coeff: The preemphasis coefficient. 0 is no filter, default is 0.95.
:returns: the filtered signal.
"""
return np.append((1 - coeff) * signal[0], signal[1:] - coeff * signal[:-1]) | 6,984 | 41.078313 | 138 | py |
allosaurus | allosaurus-master/allosaurus/pm/feature.py | # calculate filterbank features. Provides e.g. fbank and mfcc features for use in ASR applications
# Author: James Lyons 2012
import numpy
from allosaurus.pm import preprocess
from scipy.fftpack import dct
def mfcc(signal,samplerate=16000,winlen=0.025,winstep=0.01,numcep=13,
nfilt=23,lowfreq=20,highfreq=None,dither=1.0,remove_dc_offset=True,preemph=0.97,
ceplifter=22,useEnergy=True,wintype='povey'):
"""Compute MFCC features from an audio signal.
:param signal: the audio signal from which to compute features. Should be an N*1 array
:param samplerate: the samplerate of the signal we are working with.
:param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds)
:param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds)
:param numcep: the number of cepstrum to return, default 13
:param nfilt: the number of filters in the filterbank, default 26.
:param nfft: the FFT size. Default is 512.
:param lowfreq: lowest band edge of mel filters. In Hz, default is 0.
:param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2
:param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97.
:param ceplifter: apply a lifter to final cepstral coefficients. 0 is no lifter. Default is 22.
:param appendEnergy: if this is true, the zeroth cepstral coefficient is replaced with the log of the total frame energy.
:param winfunc: the analysis window to apply to each frame. By default no window is applied. You can use numpy window functions here e.g. winfunc=numpy.hamming
:returns: A numpy array of size (NUMFRAMES by numcep) containing features. Each row holds 1 feature vector.
"""
feat,energy = fbank(signal,samplerate,winlen,winstep,nfilt,lowfreq,highfreq,dither,remove_dc_offset,preemph,wintype)
feat = numpy.log(feat)
feat = dct(feat, type=2, axis=1, norm='ortho')[:,:numcep]
feat = lifter(feat,ceplifter)
if useEnergy: feat[:,0] = numpy.log(energy) # replace first cepstral coefficient with log of frame energy
return feat
def fbank(signal,samplerate=16000,winlen=0.025,winstep=0.01,
nfilt=40,lowfreq=0,highfreq=None,dither=1.0,remove_dc_offset=True, preemph=0.97,
wintype='hamming'):
"""Compute Mel-filterbank energy features from an audio signal.
:param signal: the audio signal from which to compute features. Should be an N*1 array
:param samplerate: the samplerate of the signal we are working with.
:param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds)
:param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds)
:param nfilt: the number of filters in the filterbank, default 26.
:param nfft: the FFT size. Default is 512.
:param lowfreq: lowest band edge of mel filters. In Hz, default is 0.
:param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2
:param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97.
:param winfunc: the analysis window to apply to each frame. By default no window is applied. You can use numpy window functions here e.g. winfunc=numpy.hamming
winfunc=lambda x:numpy.ones((x,))
:returns: 2 values. The first is a numpy array of size (NUMFRAMES by nfilt) containing features. Each row holds 1 feature vector. The
second return value is the energy in each frame (total energy, unwindowed)
"""
highfreq= highfreq or samplerate/2
if highfreq < 0:
highfreq = samplerate/2 + highfreq
nfft = preprocess.round_up_power_of_two(int(samplerate * winlen))
frames,raw_frames = preprocess.framesig(signal, winlen * samplerate, winstep * samplerate, dither, preemph, remove_dc_offset, wintype)
pspec = preprocess.powspec(frames, nfft) # nearly the same until this part
energy = numpy.sum(raw_frames**2,1) # this stores the raw energy in each frame
energy = numpy.where(energy == 0,numpy.finfo(float).eps,energy) # if energy is zero, we get problems with log
fb = get_filterbanks(nfilt,nfft,samplerate,lowfreq,highfreq)
feat = numpy.dot(pspec,fb.T) # compute the filterbank energies
feat = numpy.where(feat == 0,numpy.finfo(float).eps,feat) # if feat is zero, we get problems with log
return feat,energy
def logfbank(signal,samplerate=16000,winlen=0.025,winstep=0.01,
nfilt=40,lowfreq=64,highfreq=None,dither=1.0,remove_dc_offset=True,preemph=0.97,wintype='hamming'):
"""Compute log Mel-filterbank energy features from an audio signal.
:param signal: the audio signal from which to compute features. Should be an N*1 array
:param samplerate: the samplerate of the signal we are working with.
:param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds)
:param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds)
:param nfilt: the number of filters in the filterbank, default 26.
:param nfft: the FFT size. Default is 512.
:param lowfreq: lowest band edge of mel filters. In Hz, default is 0.
:param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2
:param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97.
:returns: A numpy array of size (NUMFRAMES by nfilt) containing features. Each row holds 1 feature vector.
"""
feat,energy = fbank(signal,samplerate,winlen,winstep,nfilt,lowfreq,highfreq,dither, remove_dc_offset,preemph,wintype)
return numpy.log(feat)
def hz2mel(hz):
"""Convert a value in Hertz to Mels
:param hz: a value in Hz. This can also be a numpy array, conversion proceeds element-wise.
:returns: a value in Mels. If an array was passed in, an identical sized array is returned.
"""
return 1127 * numpy.log(1+hz/700.0)
def mel2hz(mel):
"""Convert a value in Mels to Hertz
:param mel: a value in Mels. This can also be a numpy array, conversion proceeds element-wise.
:returns: a value in Hertz. If an array was passed in, an identical sized array is returned.
"""
return 700 * (numpy.exp(mel/1127.0)-1)
def get_filterbanks(nfilt=26,nfft=512,samplerate=16000,lowfreq=0,highfreq=None):
"""Compute a Mel-filterbank. The filters are stored in the rows, the columns correspond
to fft bins. The filters are returned as an array of size nfilt * (nfft/2 + 1)
:param nfilt: the number of filters in the filterbank, default 20.
:param nfft: the FFT size. Default is 512.
:param samplerate: the samplerate of the signal we are working with. Affects mel spacing.
:param lowfreq: lowest band edge of mel filters, default 0 Hz
:param highfreq: highest band edge of mel filters, default samplerate/2
:returns: A numpy array of size nfilt * (nfft/2 + 1) containing filterbank. Each row holds 1 filter.
"""
highfreq= highfreq or samplerate/2
assert highfreq <= samplerate/2, "highfreq is greater than samplerate/2"
# compute points evenly spaced in mels
lowmel = hz2mel(lowfreq)
highmel = hz2mel(highfreq)
# check kaldi/src/feat/Mel-computations.h
fbank = numpy.zeros([nfilt,nfft//2+1])
mel_freq_delta = (highmel-lowmel)/(nfilt+1)
for j in range(0,nfilt):
leftmel = lowmel+j*mel_freq_delta
centermel = lowmel+(j+1)*mel_freq_delta
rightmel = lowmel+(j+2)*mel_freq_delta
for i in range(0,nfft//2):
mel=hz2mel(i*samplerate/nfft)
if mel>leftmel and mel<rightmel:
if mel<centermel:
fbank[j,i]=(mel-leftmel)/(centermel-leftmel)
else:
fbank[j,i]=(rightmel-mel)/(rightmel-centermel)
return fbank
def lifter(cepstra, L=22):
"""Apply a cepstral lifter the the matrix of cepstra. This has the effect of increasing the
magnitude of the high frequency DCT coeffs.
:param cepstra: the matrix of mel-cepstra, will be numframes * numcep in size.
:param L: the liftering coefficient to use. Default is 22. L <= 0 disables lifter.
"""
if L > 0:
nframes,ncoeff = numpy.shape(cepstra)
n = numpy.arange(ncoeff)
lift = 1 + (L/2.)*numpy.sin(numpy.pi*n/L)
return lift*cepstra
else:
# values of L <= 0, do nothing
return cepstra
def delta(feat, N):
"""Compute delta features from a feature vector sequence.
:param feat: A numpy array of size (NUMFRAMES by number of features) containing features. Each row holds 1 feature vector.
:param N: For each frame, calculate delta features based on preceding and following N frames
:returns: A numpy array of size (NUMFRAMES by number of features) containing delta features. Each row holds 1 delta feature vector.
"""
if N < 1:
raise ValueError('N must be an integer >= 1')
NUMFRAMES = len(feat)
denominator = 2 * sum([i**2 for i in range(1, N+1)])
delta_feat = numpy.empty_like(feat)
padded = numpy.pad(feat, ((N, N), (0, 0)), mode='edge') # padded version of feat
for t in range(NUMFRAMES):
delta_feat[t] = numpy.dot(numpy.arange(-N, N+1), padded[t : t+2*N+1]) / denominator # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]
return delta_feat
| 9,424 | 53.796512 | 163 | py |
allosaurus | allosaurus-master/allosaurus/bin/update_phone.py | from pathlib import Path
from allosaurus.lm.inventory import Inventory
from allosaurus.model import get_model_path
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser('Update language inventory')
parser.add_argument('-l', '--lang', type=str, required=True, help='specify which language inventory to update.')
parser.add_argument('-m', '--model', type=str, default='latest', help='specify which model inventory')
parser.add_argument('-i', '--input', type=str, required=True, help='your new inventory file')
args = parser.parse_args()
model_path = get_model_path(args.model)
inventory = Inventory(model_path)
lang = args.lang
# verify lang is not ipa as it is an alias to the entire inventory
assert args.lang != 'ipa', "ipa is not a proper lang to update. use list_lang to find a proper language"
assert lang.lower() in inventory.lang_ids or lang.lower() in inventory.glotto_ids, f'language {args.lang} is not supported. Please verify it is in the language list'
new_unit_file = Path(args.input)
# check existence of the file
assert new_unit_file.exists(), args.input+' does not exist'
# update this new unit
inventory.update_unit(lang, new_unit_file) | 1,250 | 38.09375 | 169 | py |
allosaurus | allosaurus-master/allosaurus/bin/restore_phone.py | from pathlib import Path
from allosaurus.lm.inventory import Inventory
from allosaurus.model import get_model_path
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser('Restore language inventory')
parser.add_argument('-l', '--lang', type=str, required=True, help='specify which language inventory to restore.')
parser.add_argument('-m', '--model', type=str, default='latest', help='specify which model inventory')
args = parser.parse_args()
model_path = get_model_path(args.model)
inventory = Inventory(model_path)
lang = args.lang
# verify lang is not ipa as it is an alias to the entire inventory
assert args.lang != 'ipa', "ipa is not a proper lang to update. use list_lang to find a proper language"
assert lang.lower() in inventory.lang_ids or lang.lower() in inventory.glotto_ids, f'language {args.lang} is not supported. Please verify it is in the language list'
# restore this lang's inventory
inventory.restore_unit(lang) | 1,012 | 37.961538 | 169 | py |
allosaurus | allosaurus-master/allosaurus/bin/prep_feat.py | import argparse
from pathlib import Path
from allosaurus.model import resolve_model_name
from allosaurus.audio import read_audio
from allosaurus.pm.factory import read_pm
from allosaurus.pm.kdict import KaldiWriter
from tqdm import tqdm
def prepare_feature(data_path, model):
model_path = Path(__file__).parent.parent / 'pretrained' / model
# create pm (pm stands for preprocess model: audio -> feature etc..)
pm = read_pm(model_path, None)
# data path should be pointing the absolute path
data_path = data_path.absolute()
# writer for feats
feat_writer = KaldiWriter(data_path / 'feat')
# writer for the shape of each utterance
# format: utt_id shape[0] shape[1]
shape_writer = open(data_path / 'shape', 'w')
for line in tqdm(open(data_path / 'wave', 'r', encoding='utf-8').readlines()):
fields = line.strip().split()
utt_id = fields[0]
audio_path = fields[1]
assert Path(audio_path).exists(), audio_path+" does not exist!"
audio = read_audio(audio_path)
# extract feature
feat = pm.compute(audio)
# write shape
shape_writer.write(f'{utt_id} {feat.shape[0]} {feat.shape[1]}\n')
feat_writer.write(utt_id, feat)
feat_writer.close()
shape_writer.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser('allosaurus tool to extract audio feature for fine-tuning')
parser.add_argument('--path', required=True, type=str, help='path to the directory containing the wave file')
parser.add_argument('--model', type=str, default='latest', help='specify the model you want to fine-tune')
args = parser.parse_args()
data_path = Path(args.path)
wave_path = data_path / 'wave'
assert wave_path.exists(), "the path directory should contain a wave file, please check README.md for details"
# resolve model's name
model_name = resolve_model_name(args.model)
if model_name == "none":
print("Model ", model_name, " does not exist. Please download this model or use an existing model in list_model")
exit(0)
args.model = model_name
# extract feature
prepare_feature(data_path, args.model)
| 2,205 | 29.638889 | 121 | py |
allosaurus | allosaurus-master/allosaurus/bin/prep_token.py | import argparse
from pathlib import Path
from allosaurus.model import resolve_model_name
from allosaurus.lm.inventory import *
from tqdm import tqdm
def prepare_token(data_path, model, lang_id):
model_path = Path(__file__).parent.parent / 'pretrained' / model
#assert model_path.exists(), f"{model} is not a valid model"
inventory = Inventory(model_path)
unit = inventory.get_unit(lang_id)
writer = open(str(data_path / 'token'), 'w', encoding='utf-8')
for line in tqdm(open(data_path / 'text', 'r', encoding='utf-8').readlines()):
fields = line.strip().split()
utt_id = fields[0]
phones = fields[1:]
id_lst = unit.get_ids(phones)
writer.write(utt_id+' '+' '.join(map(str, id_lst))+'\n')
writer.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser('allosaurus tool to assign token id for fine-tuning')
parser.add_argument('--path', required=True, type=str, help='path to the directory containing the text file')
parser.add_argument('--model', type=str, default='latest', help='specify the model you want to fine-tune')
parser.add_argument('--lang', type=str, default='epi', help='specify the ISO language id for your target language')
args = parser.parse_args()
data_path = Path(args.path)
text_path = data_path / 'text'
assert text_path.exists(), "the path directory should contain a text file, please check README.md for details"
# resolve model's name
model_name = resolve_model_name(args.model)
if model_name == "none":
print("Model ", model_name, " does not exist. Please download this model or use an existing model in list_model")
exit(0)
args.model = model_name
# extract token
prepare_token(data_path, args.model, args.lang) | 1,806 | 33.09434 | 121 | py |
allosaurus | allosaurus-master/allosaurus/bin/download_model.py | from pathlib import Path
import tarfile
from urllib.request import urlopen
import io
import argparse
import os
def download_model(model_name=None, alt_model_path=None):
if model_name is None:
model_name = 'latest'
if alt_model_path:
model_dir = alt_model_path
else:
model_dir = (Path(__file__).parent.parent) / 'pretrained'
if not (model_dir / model_name).exists():
try:
url = 'https://github.com/xinjli/allosaurus/releases/download/v1.0/' + model_name + '.tar.gz'
print("downloading model ", model_name)
print("from: ", url)
print("to: ", str(model_dir))
print("please wait...")
resp = urlopen(url)
compressed_files = io.BytesIO(resp.read())
files = tarfile.open(fileobj=compressed_files)
files.extractall(str(model_dir))
except Exception as e:
print("Error: could not download the model", e)
(model_dir / model_name).rmdir()
if __name__ == '__main__':
parser = argparse.ArgumentParser('a utility to download pretrained models')
parser.add_argument('-m', '--model', default='latest', help='specify which model to download. A list of downloadable models are available on Github')
args = parser.parse_args()
download_model(args.model) | 1,350 | 31.166667 | 154 | py |
allosaurus | allosaurus-master/allosaurus/bin/list_lang.py | from pathlib import Path
from allosaurus.model import get_model_path
from allosaurus.lm.inventory import Inventory
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser('List language phone inventory')
parser.add_argument('-l', '--lang', type=str, default='ipa', help='specify which language inventory to use for recognition. default "ipa" is to use all phone inventory')
parser.add_argument('-m', '--model', type=str, default='latest', help='specify which model inventory')
args = parser.parse_args()
model_path = get_model_path(args.model)
inventory = Inventory(model_path)
print("Available Languages")
for lang_id, glotto_id, lang_name in zip(inventory.lang_ids, inventory.glotto_ids, inventory.lang_names):
lang_name = lang_name.encode('ascii', 'ignore')
print('- ISO639-3: ', lang_id, 'Glotto Code', glotto_id, ' name: ', lang_name)
| 926 | 43.142857 | 182 | py |
allosaurus | allosaurus-master/allosaurus/bin/list_model.py | from allosaurus.model import get_all_models
if __name__ == '__main__':
models = get_all_models()
if len(models) == 0:
print("No models are available, you can maually download a model with download command or just run inference to download the latest one automatically")
else:
print("Available Models")
for i, model in enumerate(models):
if i == 0:
print('-', model.name, "(default)")
else:
print('-', model.name) | 508 | 30.8125 | 159 | py |
allosaurus | allosaurus-master/allosaurus/bin/list_phone.py | from pathlib import Path
from allosaurus.lm.inventory import Inventory
from allosaurus.model import get_model_path
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser('List language phone inventory')
parser.add_argument('-l', '--lang', type=str, default='ipa', help='specify which language inventory to use for recognition. default "ipa" is to use all phone inventory')
parser.add_argument('-m', '--model', type=str, default='latest', help='specify which model inventory')
parser.add_argument('-a', '--approximate', type=bool, default=False, help='the phone inventory can still hardly to cover all phones. You can use turn on this flag to map missing phones to other similar phones to recognize. The similarity is measured with phonological features')
args = parser.parse_args()
model_path = get_model_path(args.model)
inventory = Inventory(model_path)
if args.lang == 'ipa':
print(' '.join(list(inventory.unit.id_to_unit.values())[1:]))
else:
lang = args.lang
assert lang.lower() in inventory.lang_ids or lang.lower() in inventory.glotto_ids, f'language {args.lang} is not supported. Please verify it is in the language list'
mask = inventory.get_mask(args.lang.lower(), approximation=args.approximate)
unit = mask.target_unit
print(' '.join(list(unit.id_to_unit.values())[1:]))
if args.approximate:
mask.print_maps() | 1,468 | 46.387097 | 282 | py |
allosaurus | allosaurus-master/allosaurus/bin/__init__.py | 0 | 0 | 0 | py | |
allosaurus | allosaurus-master/allosaurus/bin/remove_model.py | from allosaurus.model import delete_model
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser('an allosaurus util to delete model')
parser.add_argument('-m', '--model', required=True, help='model name to be deleted')
args = parser.parse_args()
delete_model(args.model) | 311 | 30.2 | 88 | py |
allosaurus | allosaurus-master/allosaurus/bin/write_phone.py | from pathlib import Path
from allosaurus.lm.inventory import Inventory
from allosaurus.lm.unit import write_unit
from allosaurus.model import get_model_path
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser('Write out current phone file')
parser.add_argument('-l', '--lang', type=str, required=True, help='specify which language inventory to update.')
parser.add_argument('-m', '--model', type=str, default='latest', help='specify which model inventory')
parser.add_argument('-o', '--output', type=str, required=True, help='write out your current phone file.')
parser.add_argument('-f', '--format', type=str, default='simple', choices=['simple', 'kaldi'], help='select your output format')
args = parser.parse_args()
model_path = get_model_path(args.model)
inventory = Inventory(model_path)
lang = args.lang
unit = inventory.get_unit(lang)
write_unit(unit, args.output, args.format) | 962 | 39.125 | 132 | py |
allosaurus | allosaurus-master/allosaurus/bin/adapt_model.py | import argparse
from pathlib import Path
from allosaurus.model import copy_model
from allosaurus.am.factory import transfer_am
from allosaurus.am.trainer import Trainer
from allosaurus.am.loader import read_loader
if __name__ == '__main__':
parser = argparse.ArgumentParser("fine-tune an existing model to your target dataset")
# required options
parser.add_argument('--pretrained_model', required=True, type=str, help='the pretrained model id which you want to start with' )
parser.add_argument('--new_model', required=True, type=str, help='the new fine-tuned model id, this id show in your model list and will be available later for your inference')
parser.add_argument('--path', required=True, type=str, help='the data path, it should contain train directory and validate directory')
parser.add_argument('--lang', required=True, type=str, help='the language id of your target dataset')
parser.add_argument('--device_id', required=True, type=int, help='gpu cuda_device_id. use -1 if you do not have gpu')
# non required options
parser.add_argument('--batch_frame_size', type=int, default=6000, help='this indicates how many frame in each batch, if you get any memory related errors, please use a lower value for this size')
parser.add_argument('--criterion', type=str, default='ctc', choices=['ctc'], help='criterion, only ctc now')
parser.add_argument('--optimizer', type=str, default='sgd', choices=['sgd'], help='optimizer, only sgd now')
parser.add_argument('--lr', type=float, default=0.01, help='learning rate')
parser.add_argument('--grad_clip', type=float, default=5.0, help='grad clipping')
parser.add_argument('--epoch', type=int, default=10, help='number of epoch to run')
parser.add_argument('--log', type=str, default='none',help='file to store training logs. do not save if none')
parser.add_argument('--verbose', type=bool, default=True, help='print all training logs on stdout')
parser.add_argument('--report_per_batch', type=int, default=10, help='report training stats every N epoch')
train_config = parser.parse_args()
# prepare training and validating loaders
data_path = Path(train_config.path)
train_loader = read_loader(data_path / 'train', train_config)
validate_loader = read_loader(data_path / 'validate', train_config)
# initialize the target model path with the old model
copy_model(train_config.pretrained_model, train_config.new_model)
# setup the target model path and create model
model = transfer_am(train_config)
# setup trainer
trainer = Trainer(model, train_config)
# start training
trainer.train(train_loader, validate_loader)
# close datasets and loaders
train_loader.close()
validate_loader.close() | 2,912 | 56.117647 | 202 | py |
allosaurus | allosaurus-master/allosaurus/lm/inventory.py | import json
from allosaurus.lm.mask import *
class Inventory:
def __init__(self, model_path, inference_config=None):
self.model_path = model_path
self.lang_names = []
self.lang_ids = []
self.glotto_ids = []
self.lang2phonefile = dict()
self.inference_config = inference_config
# load all available inventories
langs = json.load(open(self.model_path / 'inventory' / 'index.json', 'r', encoding='utf-8'))
# load all phones list
self.unit = read_unit(str(self.model_path / 'phone.txt'))
for lang in langs:
lang_name = lang['LanguageName']
iso_id = lang['ISO6393']
glotto_id = lang['GlottoCode']
phone_text = lang['phonelists']
self.lang_names.append(lang_name.lower())
self.lang_ids.append(iso_id.lower())
self.glotto_ids.append(glotto_id)
# register both iso_id and glottocode id
self.lang2phonefile[iso_id.lower()] = phone_text
self.lang2phonefile[glotto_id.lower()] = phone_text
def is_available(self, lang_id):
return lang_id in self.lang2phonefile
def get_unit(self, lang_id):
"""
load a unit specified by the lang_id
Args:
lang_id: ISO id
Returns:
"""
# we can also specify a unit path as lang_id, makes it easier to customize
if lang_id not in self.lang2phonefile:
if not Path(lang_id).exists():
assert lang_id in self.lang2phonefile, "Language "+lang_id+" is not available !"
else:
target_unit = read_unit(str(lang_id))
return target_unit
# search customized file first, if not exist use the default one.
updated_unit_file = self.model_path / 'inventory' / ('updated_'+self.lang2phonefile[lang_id])
if updated_unit_file.exists():
unit_file = updated_unit_file
else:
unit_file = self.model_path / 'inventory' / self.lang2phonefile[lang_id]
target_unit = read_unit(str(unit_file))
return target_unit
def update_unit(self, lang_id, unit_file):
"""
update the existing unit with a new unit file
Args:
lang_id:
unit_file:
Returns:
"""
assert lang_id in self.lang2phonefile, "Language "+lang_id+" is not available !"
# load the new unit file and validate its format
new_unit = read_unit(unit_file)
# the model path it should be stored
updated_unit_file = self.model_path / 'inventory' / ('updated_'+self.lang2phonefile[lang_id])
# save the new file
write_unit(new_unit, updated_unit_file)
def restore_unit(self, lang_id):
"""
restore the original phone units
Args:
lang_id:
Returns:
"""
assert lang_id in self.lang2phonefile, "Language "+lang_id+" is not available !"
# the updated unit file
updated_unit_file = self.model_path / 'inventory' / ('updated_'+self.lang2phonefile[lang_id])
# check whether it has an updated file
assert updated_unit_file.exists(), "language "+lang_id+" does not have any customized inventory."
# delete this file
updated_unit_file.unlink()
def get_mask(self, lang_id=None, approximation=False):
# use its unit as the mask if the recognition target is the entire inventory
if lang_id is None or lang_id == 'ipa':
target_unit = self.unit
else:
target_unit = self.get_unit(lang_id)
return UnitMask(self.unit, target_unit, approximation, self.inference_config) | 3,764 | 28.186047 | 105 | py |
allosaurus | allosaurus-master/allosaurus/lm/articulatory.py | import panphon
import numpy as np
class Articulatory:
def __init__(self):
self.feature_table = panphon.FeatureTable()
def feature(self, phone):
try:
feats = self.feature_table.word_to_vector_list(phone, numeric=True)
except:
if len(phone) == 2:
phone = phone[0]+' '+phone[1]
feats = self.feature_table.word_to_vector_list(phone, numeric=True)
else:
feats = []
if len(feats) == 0:
feats = np.zeros(24)
else:
feats = np.array(feats[0], dtype=np.float32)
return feats
def similarity(self, p1, p2):
"""
similarity between phone 1 and phone 2
:param p1:
:param p2:
:return:
"""
return np.inner(self.feature(p1), self.feature(p2))
def most_similar(self, target_phone, phone_cands):
max_phone = None
max_score = -1000000
target_feature = self.feature(target_phone)
for phone in phone_cands:
score = np.inner(self.feature(phone), target_feature)
if score > max_score:
max_phone = phone
max_score = score
return max_phone | 1,247 | 23 | 83 | py |
allosaurus | allosaurus-master/allosaurus/lm/factory.py | from allosaurus.lm.decoder import PhoneDecoder
import json
from argparse import Namespace
def read_lm(model_path, inference_config):
"""
read language model (phone inventory)
:param pm_config:
:return:
"""
lm_config = Namespace(**json.load(open(str(model_path / 'lm_config.json'))))
assert lm_config.model == 'phone_ipa', 'only phone_ipa model is supported for allosaurus now'
assert lm_config.backend == 'numpy', 'only numpy backend is supported for allosaurus now'
pm_config = Namespace(**json.load(open(str(model_path / 'pm_config.json'))))
# import configs from pm config
inference_config.window_size = pm_config.window_size + (pm_config.feature_window - 1)*pm_config.window_shift
inference_config.window_shift = pm_config.window_shift*pm_config.feature_window
model = PhoneDecoder(model_path, inference_config)
return model | 892 | 36.208333 | 112 | py |
allosaurus | allosaurus-master/allosaurus/lm/decoder.py | from allosaurus.lm.inventory import *
from pathlib import Path
from itertools import groupby
import numpy as np
class PhoneDecoder:
def __init__(self, model_path, inference_config):
"""
This class is an util for decode both phones and words
:param model_path:
"""
# lm model path
self.model_path = Path(model_path)
self.config = inference_config
# create inventory
self.inventory = Inventory(model_path, inference_config)
self.unit = self.inventory.unit
def compute(self, logits, lang_id=None, topk=1, emit=1.0, timestamp=False):
"""
decode phones from logits
:param logits: numpy array of logits
:param emit: blank factor
:return:
"""
# apply mask if lang_id specified, this is to restrict the output phones to the desired phone subset
mask = self.inventory.get_mask(lang_id, approximation=self.config.approximate)
logits = mask.mask_logits(logits)
emit_frame_idx = []
cur_max_arg = -1
# find all emitting frames
for i in range(len(logits)):
logit = logits[i]
logit[0] /= emit
arg_max = np.argmax(logit)
# this is an emitting frame
if arg_max != cur_max_arg and arg_max != 0:
emit_frame_idx.append(i)
cur_max_arg = arg_max
# decode all emitting frames
decoded_seq = []
for idx in emit_frame_idx:
logit = logits[idx]
exp_prob = np.exp(logit - np.max(logit))
probs = exp_prob / exp_prob.sum()
top_phones = logit.argsort()[-topk:][::-1]
top_probs = sorted(probs)[-topk:][::-1]
stamp = f"{self.config.window_shift*idx:.3f} {self.config.window_size:.3f} "
if topk == 1:
phones_str = ' '.join(mask.get_units(top_phones))
if timestamp:
phones_str = stamp + phones_str
decoded_seq.append(phones_str)
else:
phone_prob_lst = [f"{phone} ({prob:.3f})" for phone, prob in zip(mask.get_units(top_phones), top_probs)]
phones_str = ' '.join(phone_prob_lst)
if timestamp:
phones_str = stamp + phones_str
decoded_seq.append(phones_str)
if timestamp:
phones = '\n'.join(decoded_seq)
elif topk == 1:
phones = ' '.join(decoded_seq)
else:
phones = ' | '.join(decoded_seq)
return phones | 2,616 | 27.445652 | 120 | py |
allosaurus | allosaurus-master/allosaurus/lm/__init__.py | 0 | 0 | 0 | py | |
allosaurus | allosaurus-master/allosaurus/lm/mask.py | from .articulatory import *
from .unit import *
from pathlib import Path
def read_prior(prior_path):
prior = {}
for i, line in open(str(prior_path), 'r', encoding='utf-8'):
unit, prob = line.split()
if i == 0:
assert unit == '<blk>', 'first element should be blank'
prior[unit] = np.log(prob)
return prior
class UnitMask:
def __init__(self, domain_unit, target_unit, approximation=False, inference_config=None):
"""
MaskUnit provides interface to mask phones
:param domain_unit: all available units (phones)
:param target_unit: usually a subset of domain_unit
"""
self.inference_config = inference_config
self.domain_unit = domain_unit
self.target_unit = target_unit
# whether or not to use articulatory feature to map unseen units
self.approximation = approximation
# available index in all_unit
self.valid_mask = set()
# invalid index in all_unit
self.invalid_mask = set()
# index mapping from all_unit to target_unit
self.unit_map = dict()
# prior
self.prior = np.zeros(len(self.domain_unit), dtype=np.float32)
if self.inference_config and self.inference_config.prior and Path(self.inference_config.prior).exists():
self.create_prior()
# mask
self.create_mask()
if self.approximation:
self.articulatory = Articulatory()
self.approxmiate_phone()
# create a mask for masking numpy array
self.invalid_index_mask = sorted(list(self.invalid_mask))
def __str__(self):
return '<UnitMask: valid phone: ' + str(len(self.valid_mask)) + ', invalid phone: '+ str(len(self.invalid_mask))+'>'
def __repr__(self):
return self.__str__()
def create_mask(self):
# invalidate all phones first
self.invalid_mask = set(range(1, len(self.domain_unit)))
# <blank> is valid phone
self.valid_mask.add(0)
# <blank> should be mapped to <blank>
self.unit_map[0] = 0
# register all valid phones
for target_idx, target_phone in self.target_unit.id_to_unit.items():
if target_phone in self.domain_unit:
domain_idx = self.domain_unit.get_id(target_phone)
# this domain_idx is available
self.valid_mask.add(domain_idx)
# remove the domain_idx from the invalid_mask
self.invalid_mask -= { domain_idx }
# register this domain idx -> target_idx
self.unit_map[domain_idx] = target_idx
def create_prior(self):
for line in open(str(self.inference_config.prior), 'r', encoding='utf-8'):
phone, logprob = line.strip().split()
if phone in self.domain_unit:
domain_idx = self.domain_unit.get_id(phone)
self.prior[domain_idx] = logprob
def approxmiate_phone(self):
# register all valid phones
for target_idx, target_phone in self.target_unit.id_to_unit.items():
if target_phone not in self.domain_unit:
# find the most similar phone from the invalid set
max_domain_idx = -1
max_domain_score = -10000
for domain_idx in self.invalid_mask:
domain_phone = self.domain_unit.get_unit(domain_idx)
#print("domain ", domain_phone, " target ", target_phone)
score = self.articulatory.similarity(domain_phone, target_phone)
if score >= max_domain_score:
max_domain_score = score
max_domain_idx = domain_idx
assert max_domain_idx not in self.valid_mask
assert max_domain_idx != -1
# map max_domain_idx to target_idx
self.invalid_mask -= { max_domain_idx }
self.valid_mask.add(max_domain_idx)
#print("target phone", target_phone, ' mapped to idx ', max_domain_idx)
self.unit_map[max_domain_idx] = target_idx
def print_maps(self):
for domain_id, target_id in self.unit_map.items():
print(self.domain_unit.get_unit(domain_id)+' --> '+self.target_unit.get_unit(target_id))
def mask_logits(self, logits):
# mask inavailable logits
# apply mask
logits[:,self.invalid_index_mask] = -100000000.0
# apply prior
logits += self.prior
return logits
def get_units(self, ids):
"""
get unit from ids
:param ids: elem_id list
:return: a list of unit
"""
unit_lst = []
for idx in ids:
assert idx in self.unit_map
target_idx = self.unit_map[idx]
unit_lst.append(self.target_unit.get_unit(target_idx))
return unit_lst | 4,991 | 27.363636 | 124 | py |
allosaurus | allosaurus-master/allosaurus/lm/unit.py | import numpy as np
def read_unit(unit_path):
# load unit from units.txt
# units.txt should start from index 1 (because ctc blank is taking the 0 index)
unit_to_id = dict()
unit_to_id['<blk>'] = 0
idx = 0
for line in open(str(unit_path), 'r', encoding='utf-8'):
fields = line.strip().split()
assert len(fields) < 3, " each line should contain at most two field separated by space."
# this is simple format
if len(fields) == 1:
unit = fields[0]
idx += 1
else:
# this is kaldi format
unit = fields[0]
idx = int(fields[1])
assert unit not in unit_to_id, "there are duplicate phones."
unit_to_id[unit] = idx
unit = Unit(unit_to_id)
return unit
def write_unit(unit, unit_path, format='kaldi'):
"""
dump units to file
:param unit:
:param unit_path:
:return:
"""
# unit can be either kaldi format (each line contain two field phone and index) or
# the simple format (each line contains only phone)
assert format in ['kaldi', 'simple']
w = open(str(unit_path), 'w', encoding='utf-8')
for i in range(1, len(unit.id_to_unit)):
u = unit.id_to_unit[i]
if format == 'kaldi':
w.write(u+' '+str(i)+'\n')
else:
w.write(u+'\n')
w.close()
class Unit:
def __init__(self, unit_to_id):
"""
Unit manages bidirectional mapping from unit to id and id to unit
both are dict
:param unit_to_id:
"""
self.unit_to_id = unit_to_id
self.id_to_unit = {}
assert '<blk>' in self.unit_to_id and self.unit_to_id['<blk>'] == 0
for unit, idx in self.unit_to_id.items():
self.id_to_unit[idx] = unit
def __str__(self):
return '<Unit: ' + str(len(self.unit_to_id)) + ' elems>'
def __repr__(self):
return self.__str__()
def __getitem__(self, idx):
return self.id_to_unit[idx]
def __len__(self):
return len(self.id_to_unit)
def __contains__(self, unit):
if unit == ' ':
unit = '<space>'
return unit in self.unit_to_id
def get_id(self, unit):
# handle special units
if unit == ' ':
unit = '<space>'
assert unit in self.unit_to_id, 'unit '+unit+'is not in '+str(self.unit_to_id)
return self.unit_to_id[unit]
def get_ids(self, units):
"""
get index for a word list
:param words:
:return:
"""
return [self.get_id(unit) for unit in units]
def get_unit(self, id):
assert id >= 0 and id in self.id_to_unit
unit = self.id_to_unit[id]
# handle special units
if unit == '<space>':
unit = ' '
return unit
def get_units(self, ids):
"""
get unit from ids
:param ids: elem_id list
:return: a list of unit
"""
return [self.get_unit(id) for id in ids] | 3,050 | 22.469231 | 97 | py |
allosaurus | allosaurus-master/allosaurus/am/reporter.py | from allosaurus.model import get_model_path
class Reporter:
def __init__(self, train_config):
self.train_config = train_config
self.model_path = get_model_path(train_config.new_model)
# whether write into std
self.verbose = train_config.verbose
# log file
self.log_file = None
self.open()
def open(self):
# whether write into log file
self.log_file = None
if self.train_config.log != 'none':
self.log_file = open(self.model_path / 'log.txt', 'w', encoding='utf-8')
def close(self):
if self.log_file:
self.log_file.close()
def write(self, message):
if self.verbose:
print(message)
if self.log_file:
self.log_file.write(message+'\n') | 810 | 22.852941 | 84 | py |
allosaurus | allosaurus-master/allosaurus/am/utils.py | import torch
from collections import OrderedDict
import numpy as np
def torch_load(model, path, device_id, unit_mask=None):
"""Load torch model states.
Args:
path (str): Model path or snapshot file path to be loaded.
model (torch.nn.Module): Torch model.
device_id (int): gpu id (-1 indicates cpu only)
"""
if device_id >= 0:
model_state_dict = torch.load(str(path),map_location=torch.device(f'cuda:{device_id}'))
else:
model_state_dict = torch.load(str(path), map_location=torch.device('cpu'))
new_state_dict = OrderedDict()
for k, v in model_state_dict.items():
# no need for lang specific layer in inference model
if k.startswith('allophone_layer_dict'):
continue
if k.startswith('module.'):
name = k[7:] # remove `module.`
else:
name = k
# remap the phone_layer for fine-tuning
# it will remap phone_layer.weight and phone_layer.bias
if k.startswith('phone_layer'):
if unit_mask is not None:
phone_size = len(unit_mask.target_unit)
if len(v.shape) == 2:
# for weight
hidden_size = v.shape[1]
new_v = v.new(phone_size, hidden_size)
else:
# for bias
assert len(v.shape) == 1, 'phone_layer shape is either 2 or 1'
new_v = v.new(phone_size)
for domain_phone_id, target_phone_id in unit_mask.unit_map.items():
new_v[target_phone_id] = v[domain_phone_id]
v = new_v
new_state_dict[name] = v
if hasattr(model, 'module'):
model.module.load_state_dict(new_state_dict)
else:
model.load_state_dict(new_state_dict)
if device_id >= 0:
model = model.cuda(device_id)
del model_state_dict, new_state_dict
def torch_save(model, path):
"""Save torch model states.
Args:
path (str): Model path to be saved.
model (torch.nn.Module): Torch model.
"""
path = str(path)
if hasattr(model, 'module'):
torch.save(model.module.state_dict(), path)
else:
torch.save(model.state_dict(), path)
def apply_to_tensor(f, sample):
if len(sample) == 0:
return {}
def _apply(x):
if torch.is_tensor(x):
return f(x)
elif isinstance(x, dict):
return {
key: _apply(value)
for key, value in x.items()
}
elif isinstance(x, list):
return [_apply(x) for x in x]
elif isinstance(x, tuple):
return [_apply(x) for x in x]
else:
return x
return _apply(sample)
def apply_to_ndarray(f, sample):
if len(sample) == 0:
return {}
def _apply(x):
if isinstance(x, np.ndarray):
return f(x)
elif isinstance(x, dict):
return {
key: _apply(value)
for key, value in x.items()
}
elif isinstance(x, list):
return [_apply(x) for x in x]
elif isinstance(x, tuple):
return [_apply(x) for x in x]
else:
return x
return _apply(sample)
def tensor_to_cuda(sample, device_id=0):
def _move_to_cuda(tensor):
return tensor.to(device_id)
return apply_to_tensor(_move_to_cuda, sample)
def ndarray_to_tensor(sample):
def _move_to_tensor(dnarray):
return torch.from_numpy(dnarray)
return apply_to_ndarray(_move_to_tensor, sample)
def move_to_tensor(sample, device_id=-1):
"""
move numpy array to torch tensor
:param sample:
:param device_id: -1 means cpu, other means gpu device_id
:return:
"""
sample = ndarray_to_tensor(sample)
# move to cuda if device_id provided
if device_id >= 0:
sample = tensor_to_cuda(sample, device_id)
return sample
def move_to_ndarray(sample):
if sample.is_cuda:
sample = sample.cpu()
return sample.data.numpy()
| 4,126 | 24.475309 | 95 | py |
allosaurus | allosaurus-master/allosaurus/am/dataset.py | from allosaurus.pm.kdict import read_matrix
from pathlib import Path
from torch.utils.data import Dataset
import numpy as np
class AllosaurusDataset(Dataset):
def __init__(self, data_path):
self.data_path = Path(data_path)
required_files = ['feat.scp', 'token', 'feat.ark', 'shape']
for required_file in required_files:
assert (self.data_path / required_file).exists(), required_file+" does not exist, please run the preparation before fine-tuning"
# read all tokens
self.utt2token = {}
self._read_token()
# read all features and their shapes
self.utt2offset = {}
self.utt2shape = {}
self.ark = None
self._read_feat()
# extract all valid utt_ids
token_utt_set = set(self.utt2token.keys())
feat_utt_set = set(self.utt2offset.keys())
shape_utt_set = set(self.utt2shape.keys())
self.utt_ids = list(set.intersection(token_utt_set, feat_utt_set, shape_utt_set))
# sort all ids based on their their shape
self.utt_ids.sort(key=lambda utt_id: self.utt2shape[utt_id][0], reverse=True)
def __len__(self):
return len(self.utt_ids)
def __getitem__(self, item):
utt_id = self.utt_ids[item]
token = self.utt2token[utt_id]
offset = self.utt2offset[utt_id]
self.ark.seek(offset)
feature = read_matrix(self.ark, np.float32)
return (feature, token)
def close(self):
if self.ark:
self.ark.close()
def _read_token(self):
"""
load token from file
:param token_path:
:return:
"""
token_reader = open(str(self.data_path / 'token'), 'r', encoding='utf-8')
self.utt2token = {}
for line in token_reader:
fields = line.strip().split()
utt_id = fields[0]
tokens = list(map(int, fields[1:]))
# reject empty token or too long token
if len(tokens) == 0 or len(tokens) > 1000:
continue
self.utt2token[utt_id] = tokens
def _read_feat(self):
"""
load offsets from feat.scp
:return:
"""
#####################################################################
# read feature
#####################################################################
feat_reader = open(str(self.data_path / 'feat.scp'), 'r')
self.utt2offset = {}
for line in feat_reader:
fields = line.strip().split()
assert len(fields) == 2, " feat.scp should only contain two fields"
utt_id = fields[0]
feat = fields[1]
p = feat.rfind(":")
assert p >= 0, " offset pointer not found"
offset = int(feat[p+1:])
self.utt2offset[utt_id] = offset
feat_reader.close()
self.ark = open(self.data_path / 'feat.ark', 'rb')
#####################################################################
# read shape
#####################################################################
shape_reader = open(str(self.data_path / 'shape'), 'r')
for line in shape_reader:
fields = line.strip().split()
utt_id = fields[0]
shape = (int(fields[1]), int(fields[2]))
self.utt2shape[utt_id] = shape
shape_reader.close() | 3,451 | 26.616 | 140 | py |
allosaurus | allosaurus-master/allosaurus/am/factory.py | from allosaurus.am.allosaurus_torch import AllosaurusTorchModel
from allosaurus.am.utils import *
from allosaurus.lm.inventory import Inventory
from allosaurus.lm.unit import write_unit
import json
from argparse import Namespace
from allosaurus.model import get_model_path
def read_am(model_path, inference_config):
"""
load pretrained acoustic model
:param model_path: path to the
:return:
"""
am_config = Namespace(**json.load(open(str(model_path / 'am_config.json'))))
assert am_config.model == 'allosaurus', "This project only support allosaurus model"
model = AllosaurusTorchModel(am_config)
# load weights
torch_load(model, str(model_path / 'model.pt'), inference_config.device_id)
return model
def transfer_am(train_config):
"""
initialize the acoustic model with a pretrained model for fine-tuning
:param model_path: path to the
:return:
"""
pretrained_model_path = get_model_path(train_config.pretrained_model)
am_config = Namespace(**json.load(open(str(pretrained_model_path / 'am_config.json'))))
assert am_config.model == 'allosaurus', "This project only support allosaurus model"
# load inventory
inventory = Inventory(pretrained_model_path)
# get unit_mask which maps the full phone inventory to the target phone inventory
unit_mask = inventory.get_mask(train_config.lang, approximation=True)
# reset the new phone_size
am_config.phone_size = len(unit_mask.target_unit)
model = AllosaurusTorchModel(am_config)
# load the pretrained model and setup the phone_layer with correct weights
torch_load(model, str(pretrained_model_path / 'model.pt'), train_config.device_id, unit_mask)
# update new model
new_model = train_config.new_model
# get its path
model_path = get_model_path(new_model)
# overwrite old am_config
new_am_config_json = vars(am_config)
json.dump(new_am_config_json, open(str(model_path / 'am_config.json'), 'w'), indent=4)
# overwrite old phones
write_unit(unit_mask.target_unit, model_path / 'phone.txt')
# overwrite old model
torch_save(model, model_path / 'model.pt')
return model | 2,194 | 29.486111 | 97 | py |
allosaurus | allosaurus-master/allosaurus/am/__init__.py | 0 | 0 | 0 | py | |
allosaurus | allosaurus-master/allosaurus/am/allosaurus_torch.py | import torch
import torch.nn as nn
class AllosaurusTorchModel(nn.Module):
def __init__(self, config):
super(AllosaurusTorchModel, self).__init__()
self.hidden_size = config.hidden_size
self.layer_size = config.layer_size
self.proj_size = config.proj_size
# decide input feature size
if config.feat_size == -1:
corpus_feat_size_dict = list(config.feat_size_dict.values())[0]
self.feat_size = list(corpus_feat_size_dict.values())[0]
else:
self.feat_size = config.feat_size
assert hasattr(config, 'lang_size_dict'), " config should has the lang_size_dict property"
self.lang_size_dict = config.lang_size_dict
self.lang_output_size = dict()
self.phone_size = config.phone_size
self.config = config
self.blstm_layer = nn.LSTM(self.feat_size, self.hidden_size, num_layers=self.layer_size, bidirectional=True)
self.phone_layer = nn.Linear(self.hidden_size*2, self.phone_size)
self.phone_tensor = None
@staticmethod
def add_args(parser):
parser.add_argument('--feat_size', type=int, default=-1, help='input size in the blstm model. if -1, then it is determined automatically by loader')
parser.add_argument('--hidden_size', type=int, default=320, help='hidden size in the blstm model')
parser.add_argument('--lang_size', type=int, default=-1, help='output size in the blstm model, if -1, then it is determined automatically by loader')
parser.add_argument('--proj_size', type=int, default=0, help='projection')
parser.add_argument('--layer_size', type=int, default=5, help='layer size in the blstm model')
parser.add_argument('--l2', type=float, default=0.0, help='regularization')
parser.add_argument('--loss', type=str, default='ctc', help='ctc/warp_ctc/e2e')
parser.add_argument('--debug_model', type=str, default=False, help='print tensor info for debugging')
def forward(self, input_tensor, input_lengths, return_lstm=False, return_both=False, meta=None):
"""
:param input: an Tensor with shape (B,T,H)
:lengths: a list of length of input_tensor, if None then no padding
:meta: dictionary containing meta information (should contain lang_id in this case
:return_lstm: [list containing the output_embeddings and their respective lengths]
:return_both: tuple containing (a list containing the output_embeddings and their respective lengths and the ouptut of phone layer)
:return:
"""
#if utt_ids:
#print("utt_ids {} \n target_tensor {}".format(' '.join(utt_ids), target_tensor))
#print("input_lengths {}".format(str(input_lengths)))
#print("target_tensor {}".format(target_tensor))
#print("target_lengths {}".format(target_lengths))
# (B,T,H) -> (T,B,H)
input_tensor = input_tensor.transpose(0, 1).float()
# extract lengths
if input_lengths is None:
input_lengths = torch.LongTensor([input_tensor.shape[0]]*input_tensor.shape[1])
# keep the max length for padding
total_length = input_tensor.size(0)
#if self.config.loss == 'warp_ctc':
#target_tensor = torch.cat([target_tensor[idx,:index] for idx, index in enumerate(target_lengths)])
#if lengths.dim() == 2:
# lengths = lengths.squeeze()
# build each layer
# (T,B,H) -> PackSequence
pack_sequence = nn.utils.rnn.pack_padded_sequence(input_tensor, input_lengths.cpu())
# PackSequence -> (PackSequence, States)
self.blstm_layer.flatten_parameters()
hidden_pack_sequence, _ = self.blstm_layer(pack_sequence)
# PackSequence -> (T,B,2H), lengths
output_tensor, _ = nn.utils.rnn.pad_packed_sequence(hidden_pack_sequence, total_length=total_length)
# (T,B,2H) -> (T,B,P)
phone_tensor = self.phone_layer(output_tensor)
#added the return_lstm argument
if return_lstm:
return [output_tensor.cpu(),input_lengths.cpu()]
if return_both:
return [(output_tensor.cpu(),input_lengths.cpu()), phone_tensor.transpose(0,1)]
# return (B,T,H) for gathering
return phone_tensor.transpose(0,1) | 4,359 | 40.52381 | 159 | py |
allosaurus | allosaurus-master/allosaurus/am/criterion.py | import torch
import torch.nn as nn
def read_criterion(train_config):
assert train_config.criterion == 'ctc', 'only ctc criterion is supported now'
return CTCCriterion(train_config)
class CTCCriterion(nn.Module):
def __init__(self, train_config):
super().__init__()
self.train_config = train_config
self.logsoftmax = nn.LogSoftmax(dim=2)
self.criterion = nn.CTCLoss(reduction='sum', zero_infinity=True)
def forward(self,
output_tensor: torch.tensor,
output_lengths: torch.tensor,
target_tensor: torch.tensor,
target_lengths: torch.tensor):
output_tensor = self.logsoftmax(output_tensor).transpose(0,1)
loss = self.criterion(output_tensor, target_tensor, output_lengths, target_lengths)
return loss | 841 | 29.071429 | 91 | py |
allosaurus | allosaurus-master/allosaurus/am/loader.py | from allosaurus.am.dataset import AllosaurusDataset
import numpy as np
def read_loader(data_path, train_config):
"""
create a dataloader for data_path
:param data_path:
:param train_config:
:return:
"""
return AllosaurusLoader(data_path, train_config)
class AllosaurusLoader:
def __init__(self, data_path, train_config):
self.train_config = train_config
self.dataset = AllosaurusDataset(data_path)
self.batch_lst = []
self._prepare_batch()
def __len__(self):
return len(self.batch_lst)
def close(self):
self.dataset.close()
def shuffle(self):
np.random.shuffle(self.batch_lst)
def read_batch(self, batch_idx):
assert batch_idx < len(self.batch_lst), "batch_idx "+str(batch_idx)+" is too large!!"
batch = self.batch_lst[batch_idx]
return self._collate_batch(batch)
def _collate_batch(self, batch):
feat_lst = []
token_lst = []
for idx in batch:
feat, token = self.dataset[idx]
feat_lst.append(feat)
token_lst.append(token)
feat, feat_lengths = self._collate_feat(feat_lst)
token, token_lengths = self._collate_token(token_lst)
return (feat, feat_lengths), (token, token_lengths)
def _collate_feat(self, feat_lst):
batch_size = len(feat_lst)
frame_size = feat_lst[0].shape[0]
feat_size = feat_lst[0].shape[1]
# collate feats
feat_lengths = np.zeros(batch_size, dtype=np.int32)
feat_tensor = np.zeros([batch_size, frame_size, feat_size], dtype=np.float32)
for i, feat in enumerate(feat_lst):
feat_tensor[i,:len(feat)] = feat
feat_lengths[i] = len(feat)
return feat_tensor, feat_lengths
def _collate_token(self, token_lst):
batch_size = len(token_lst)
token_size = max([len(token) for token in token_lst])
token_tensor = np.zeros([batch_size, token_size], dtype=np.int32)
token_lengths = np.zeros(batch_size, dtype=np.int32)
for i, token in enumerate(token_lst):
token_tensor[i, :len(token)] = token
token_lengths[i] = len(token)
return token_tensor, token_lengths
def _prepare_batch(self):
batch = []
batch_frame_size = 0
for i in range(len(self.dataset)):
utt_id = self.dataset.utt_ids[i]
frame_size = self.dataset.utt2shape[utt_id][0]
# batch frame is large enough
if batch_frame_size + frame_size >= self.train_config.batch_frame_size:
# commit current batch to the list
self.batch_lst.append(batch)
# reset frame size
batch_frame_size = 0
# reset batch
batch = []
batch_frame_size += frame_size
batch.append(i)
# commit the last batch if it is a valid batch
if len(batch) > 0 and batch_frame_size < self.train_config.batch_frame_size:
self.batch_lst.append(batch) | 3,115 | 25.40678 | 93 | py |
allosaurus | allosaurus-master/allosaurus/am/trainer.py | from allosaurus.am.utils import move_to_tensor, torch_save
from allosaurus.am.criterion import read_criterion
from allosaurus.am.optimizer import read_optimizer
from allosaurus.am.reporter import Reporter
import editdistance
import numpy as np
import torch
from itertools import groupby
from allosaurus.model import get_model_path
import os
import json
class Trainer:
def __init__(self, model, train_config):
self.model = model
self.train_config = train_config
self.device_id = self.train_config.device_id
# criterion, only ctc currently
self.criterion = read_criterion(train_config)
# optimizer, only sgd currently
self.optimizer = read_optimizer(self.model, train_config)
# reporter to write logs
self.reporter = Reporter(train_config)
# best per
self.best_per = 100.0
# intialize the model
self.model_path = get_model_path(train_config.new_model)
# counter for early stopping
self.num_no_improvement = 0
def sum_edit_distance(self, output_ndarray, output_lengths_ndarray, token_ndarray, token_lengths_ndarray):
"""
compute SUM of ter in this batch
"""
error_cnt_sum = 0.0
for i in range(len(token_lengths_ndarray)):
target_list = token_ndarray[i, :token_lengths_ndarray[i]].tolist()
logit = output_ndarray[i][:output_lengths_ndarray[i]]
raw_token = [x[0] for x in groupby(np.argmax(logit, axis=1))]
decoded_token = list(filter(lambda a: a != 0, raw_token))
error_cnt_sum += editdistance.distance(target_list, decoded_token)
return error_cnt_sum
def step(self, feat_batch, token_batch):
# prepare torch tensors from numpy arrays
feat_tensor, feat_lengths_tensor = move_to_tensor(feat_batch, self.device_id)
token_tensor, token_lengths_tensor = move_to_tensor(token_batch, self.device_id)
#print(feat_tensor)
#print(feat_lengths_tensor)
output_tensor = self.model(feat_tensor, feat_lengths_tensor)
#print(output_tensor)
#print(token_tensor)
#print(token_lengths_tensor)
loss = self.criterion(output_tensor, feat_lengths_tensor, token_tensor, token_lengths_tensor)
#print(loss.item())
# extract numpy format for edit distance computing
output_ndarray = output_tensor.cpu().detach().numpy()
feat_ndarray, feat_lengths_ndarray = feat_batch
token_ndarray, token_lengths_ndarray = token_batch
phone_error_sum = self.sum_edit_distance(output_ndarray, feat_lengths_ndarray, token_ndarray,
token_lengths_ndarray)
phone_count = sum(token_lengths_ndarray)
return loss, phone_error_sum, phone_count
def train(self, train_loader, validate_loader):
self.best_per = 100.0
batch_count = len(train_loader)
for epoch in range(self.train_config.epoch):
# shuffle
train_loader.shuffle()
# set to the training mode
self.model.train()
# reset all stats
all_phone_count = 0.0
all_loss_sum = 0.0
all_phone_error_sum = 0.0
# training loop
for ii in range(batch_count):
self.optimizer.zero_grad()
feat_batch, token_batch = train_loader.read_batch(ii)
# forward step
loss_tensor, phone_error_sum, phone_count = self.step(feat_batch, token_batch)
# backprop and optimize
loss_tensor.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.train_config.grad_clip)
self.optimizer.step()
# update stats
loss_sum = loss_tensor.item()
all_phone_count += phone_count
all_loss_sum += loss_sum
all_phone_error_sum += phone_error_sum
if ii % self.train_config.report_per_batch == 0:
message = f'epoch[batch]: {epoch:02d}[{ii:04d}] | train loss {all_loss_sum/all_phone_count:0.5f} train per {all_phone_error_sum / all_phone_count:0.5f}'
self.reporter.write(message)
# reset all stats
all_phone_count = 0.0
all_loss_sum = 0.0
all_phone_error_sum = 0.0
# evaluate this model
validate_phone_error_rate = self.validate(validate_loader)
self.reporter.write(f"epoch{epoch} | validate per : {validate_phone_error_rate:0.5f}")
if validate_phone_error_rate <= self.best_per:
self.best_per = validate_phone_error_rate
self.num_no_improvement = 0
self.reporter.write("saving model")
model_name = f"model_{validate_phone_error_rate:0.5f}.pt"
# save model
torch_save(self.model, self.model_path / model_name)
# overwrite the best model
torch_save(self.model, self.model_path / 'model.pt')
else:
self.num_no_improvement += 1
if self.num_no_improvement >= 3:
self.reporter.write("no improvements for several epochs, early stopping now")
break
# close reporter stream
self.reporter.close()
def validate(self, validate_loader):
self.model.eval()
batch_count = len(validate_loader)
all_phone_error_sum = 0
all_phone_count = 0
# validation loop
for ii in range(batch_count):
self.optimizer.zero_grad()
feat_batch, token_batch = validate_loader.read_batch(ii)
# one step
loss_tensor, phone_error_sum, phone_count = self.step(feat_batch, token_batch)
# update stats
all_phone_error_sum += phone_error_sum
all_phone_count += phone_count
return all_phone_error_sum/all_phone_count
| 6,149 | 30.538462 | 172 | py |
allosaurus | allosaurus-master/allosaurus/am/optimizer.py | from torch.optim import SGD
def read_optimizer(model, train_config):
assert train_config.optimizer == 'sgd', 'only sgd is supported now, others optimizers would be easier to add though'
return SGD(model.parameters(), lr=train_config.lr) | 247 | 34.428571 | 120 | py |
allosaurus | allosaurus-master/test/test_recognition.py | import unittest
from pathlib import Path
from allosaurus.app import read_recognizer
class TestRecognition(unittest.TestCase):
def test_latest_nonempty(self):
audio_file = Path(__file__).parent.parent / 'sample.wav'
model = read_recognizer('latest')
results = model.recognize(audio_file)
self.assertTrue(len(results) > 0)
def test_eng_nonempty(self):
audio_file = Path(__file__).parent.parent / 'sample.wav'
model = read_recognizer('latest')
results = model.recognize(audio_file, 'eng')
self.assertTrue(len(results) > 0)
def test_eng_nonempty_external_model(self):
audio_file = Path(__file__).parent.parent / 'sample.wav'
model_path = Path(__file__).parent.parent / 'test_model'
model = read_recognizer('latest', model_path)
results = model.recognize(audio_file, 'eng')
self.assertTrue(len(results) > 0)
if __name__ == '__main__':
unittest.main() | 970 | 34.962963 | 64 | py |
allosaurus | allosaurus-master/test/test_model.py | import unittest
from pathlib import Path
import requests
class TestModel(unittest.TestCase):
def test_latest_available(self):
model_name = "latest"
url = 'https://github.com/xinjli/allosaurus/releases/download/v1.0/' + model_name + '.tar.gz'
req = requests.head(url)
print(req.status_code)
self.assertTrue(req.status_code == 200 or req.status_code == 302)
if __name__ == '__main__':
unittest.main()
| 452 | 24.166667 | 101 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/setup.py | from setuptools import setup
setup(
name="desed_task",
version="0.1.0",
description="Sound Event Detection and Separation in Domestic Environments.",
author="DCASE2021 Task 4 Organizers",
author_email="cornellsamuele@gmail.com",
license="MIT",
packages=["desed_task"],
python_requires=">=3.8",
install_requires=[
"asteroid==0.4.1",
"dcase_util==0.2.16",
"psds_eval==0.3.0",
"sed_eval==0.2.1",
],
)
| 471 | 23.842105 | 81 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/evaluation/evaluation_measures.py | import os
import numpy as np
import pandas as pd
import psds_eval
import sed_eval
from psds_eval import PSDSEval, plot_psd_roc
def get_event_list_current_file(df, fname):
"""
Get list of events for a given filename
Args:
df: pd.DataFrame, the dataframe to search on
fname: the filename to extract the value from the dataframe
Returns:
list of events (dictionaries) for the given filename
"""
event_file = df[df["filename"] == fname]
if len(event_file) == 1:
if pd.isna(event_file["event_label"].iloc[0]):
event_list_for_current_file = [{"filename": fname}]
else:
event_list_for_current_file = event_file.to_dict("records")
else:
event_list_for_current_file = event_file.to_dict("records")
return event_list_for_current_file
def psds_results(psds_obj):
""" Compute psds scores
Args:
psds_obj: psds_eval.PSDSEval object with operating points.
Returns:
"""
try:
psds_score = psds_obj.psds(alpha_ct=0, alpha_st=0, max_efpr=100)
print(f"\nPSD-Score (0, 0, 100): {psds_score.value:.5f}")
psds_score = psds_obj.psds(alpha_ct=1, alpha_st=0, max_efpr=100)
print(f"\nPSD-Score (1, 0, 100): {psds_score.value:.5f}")
psds_score = psds_obj.psds(alpha_ct=0, alpha_st=1, max_efpr=100)
print(f"\nPSD-Score (0, 1, 100): {psds_score.value:.5f}")
except psds_eval.psds.PSDSEvalError as e:
print("psds did not work ....")
raise EnvironmentError
def event_based_evaluation_df(
reference, estimated, t_collar=0.200, percentage_of_length=0.2
):
""" Calculate EventBasedMetric given a reference and estimated dataframe
Args:
reference: pd.DataFrame containing "filename" "onset" "offset" and "event_label" columns which describe the
reference events
estimated: pd.DataFrame containing "filename" "onset" "offset" and "event_label" columns which describe the
estimated events to be compared with reference
t_collar: float, in seconds, the number of time allowed on onsets and offsets
percentage_of_length: float, between 0 and 1, the percentage of length of the file allowed on the offset
Returns:
sed_eval.sound_event.EventBasedMetrics with the scores
"""
evaluated_files = reference["filename"].unique()
classes = []
classes.extend(reference.event_label.dropna().unique())
classes.extend(estimated.event_label.dropna().unique())
classes = list(set(classes))
event_based_metric = sed_eval.sound_event.EventBasedMetrics(
event_label_list=classes,
t_collar=t_collar,
percentage_of_length=percentage_of_length,
empty_system_output_handling="zero_score",
)
for fname in evaluated_files:
reference_event_list_for_current_file = get_event_list_current_file(
reference, fname
)
estimated_event_list_for_current_file = get_event_list_current_file(
estimated, fname
)
event_based_metric.evaluate(
reference_event_list=reference_event_list_for_current_file,
estimated_event_list=estimated_event_list_for_current_file,
)
return event_based_metric
def segment_based_evaluation_df(reference, estimated, time_resolution=1.0):
""" Calculate SegmentBasedMetrics given a reference and estimated dataframe
Args:
reference: pd.DataFrame containing "filename" "onset" "offset" and "event_label" columns which describe the
reference events
estimated: pd.DataFrame containing "filename" "onset" "offset" and "event_label" columns which describe the
estimated events to be compared with reference
time_resolution: float, the time resolution of the segment based metric
Returns:
sed_eval.sound_event.SegmentBasedMetrics with the scores
"""
evaluated_files = reference["filename"].unique()
classes = []
classes.extend(reference.event_label.dropna().unique())
classes.extend(estimated.event_label.dropna().unique())
classes = list(set(classes))
segment_based_metric = sed_eval.sound_event.SegmentBasedMetrics(
event_label_list=classes, time_resolution=time_resolution
)
for fname in evaluated_files:
reference_event_list_for_current_file = get_event_list_current_file(
reference, fname
)
estimated_event_list_for_current_file = get_event_list_current_file(
estimated, fname
)
segment_based_metric.evaluate(
reference_event_list=reference_event_list_for_current_file,
estimated_event_list=estimated_event_list_for_current_file,
)
return segment_based_metric
def compute_sed_eval_metrics(predictions, groundtruth):
""" Compute sed_eval metrics event based and segment based with default parameters used in the task.
Args:
predictions: pd.DataFrame, predictions dataframe
groundtruth: pd.DataFrame, groundtruth dataframe
Returns:
tuple, (sed_eval.sound_event.EventBasedMetrics, sed_eval.sound_event.SegmentBasedMetrics)
"""
metric_event = event_based_evaluation_df(
groundtruth, predictions, t_collar=0.200, percentage_of_length=0.2
)
metric_segment = segment_based_evaluation_df(
groundtruth, predictions, time_resolution=1.0
)
return metric_event, metric_segment
def compute_per_intersection_macro_f1(
prediction_dfs,
ground_truth_file,
durations_file,
dtc_threshold=0.5,
gtc_threshold=0.5,
cttc_threshold=0.3,
):
""" Compute F1-score per intersection, using the defautl
Args:
prediction_dfs: dict, a dictionary with thresholds keys and predictions dataframe
ground_truth_file: pd.DataFrame, the groundtruth dataframe
durations_file: pd.DataFrame, the duration dataframe
dtc_threshold: float, the parameter used in PSDSEval, percentage of tolerance for groundtruth intersection
with predictions
gtc_threshold: float, the parameter used in PSDSEval percentage of tolerance for predictions intersection
with groundtruth
gtc_threshold: float, the parameter used in PSDSEval to know the percentage needed to count FP as cross-trigger
Returns:
"""
gt = pd.read_csv(ground_truth_file, sep="\t")
durations = pd.read_csv(durations_file, sep="\t")
psds = PSDSEval(
ground_truth=gt,
metadata=durations,
dtc_threshold=dtc_threshold,
gtc_threshold=gtc_threshold,
cttc_threshold=cttc_threshold,
)
psds_macro_f1 = []
for threshold in prediction_dfs.keys():
if not prediction_dfs[threshold].empty:
threshold_f1, _ = psds.compute_macro_f_score(prediction_dfs[threshold])
else:
threshold_f1 = 0
if np.isnan(threshold_f1):
threshold_f1 = 0.0
psds_macro_f1.append(threshold_f1)
psds_macro_f1 = np.mean(psds_macro_f1)
return psds_macro_f1
def compute_psds_from_operating_points(
prediction_dfs,
ground_truth_file,
durations_file,
dtc_threshold=0.5,
gtc_threshold=0.5,
cttc_threshold=0.3,
alpha_ct=0,
alpha_st=0,
max_efpr=100,
save_dir=None,
):
gt = pd.read_csv(ground_truth_file, sep="\t")
durations = pd.read_csv(durations_file, sep="\t")
psds_eval = PSDSEval(
ground_truth=gt,
metadata=durations,
dtc_threshold=dtc_threshold,
gtc_threshold=gtc_threshold,
cttc_threshold=cttc_threshold,
)
for i, k in enumerate(prediction_dfs.keys()):
det = prediction_dfs[k]
# see issue https://github.com/audioanalytic/psds_eval/issues/3
det["index"] = range(1, len(det) + 1)
det = det.set_index("index")
psds_eval.add_operating_point(
det, info={"name": f"Op {i + 1:02d}", "threshold": k}
)
psds_score = psds_eval.psds(alpha_ct=alpha_ct, alpha_st=alpha_st, max_efpr=max_efpr)
if save_dir is not None:
os.makedirs(save_dir, exist_ok=True)
pred_dir = os.path.join(
save_dir,
f"predictions_dtc{dtc_threshold}_gtc{gtc_threshold}_cttc{cttc_threshold}",
)
os.makedirs(pred_dir, exist_ok=True)
for k in prediction_dfs.keys():
prediction_dfs[k].to_csv(
os.path.join(pred_dir, f"predictions_th_{k:.2f}.tsv"),
sep="\t",
index=False,
)
plot_psd_roc(
psds_score,
filename=os.path.join(save_dir, f"PSDS_ct{alpha_ct}_st{alpha_st}_100.png"),
)
return psds_score.value
| 8,788 | 33.876984 | 119 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/recipes/dcase2021_task4_baseline/train_sed_CRST.py | import argparse
from copy import deepcopy
import numpy as np
import os
import pandas as pd
import random
import torch
import yaml
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from desed_task.dataio import ConcatDatasetBatchSampler
from desed_task.dataio.datasets import StronglyAnnotatedSet, UnlabeledSet, WeakSet
from desed_task.nnet.CRNN import CRNN, RCRNN
from desed_task.utils.encoder import ManyHotEncoder
from desed_task.utils.schedulers import ExponentialWarmup
from local.classes_dict import classes_labels
from local.sed_trainer_CRST import SEDTask4_2021
from local.resample_folder import resample_folder
from local.utils import generate_tsv_wav_durations
def resample_data_generate_durations(config_data, test_only=False, evaluation=False):
if not test_only:
dsets = [
"synth_folder",
"synth_val_folder",
"weak_folder",
"unlabeled_folder",
"test_folder",
]
elif test_only:
dsets = ["test_folder"]
else:
dsets = ["eval_folder"]
for dset in dsets:
computed = resample_folder(
config_data[dset + "_44k"], config_data[dset], target_fs=config_data["fs"]
)
if not evaluation:
for base_set in ["synth_val", "test"]:
if not os.path.exists(config_data[base_set + "_dur"]) or computed:
generate_tsv_wav_durations(
config_data[base_set + "_folder"], config_data[base_set + "_dur"]
)
def single_run(
config,
log_dir,
gpus,
checkpoint_resume=None,
test_state_dict=None,
fast_dev_run=False,
evaluation=False
):
"""
Running sound event detection baselin
Args:
config (dict): the dictionary of configuration params
log_dir (str): path to log directory
gpus (int): number of gpus to use
checkpoint_resume (str, optional): path to checkpoint to resume from. Defaults to "".
test_state_dict (dict, optional): if not None, no training is involved. This dictionary is the state_dict
to be loaded to test the model.
fast_dev_run (bool, optional): whether to use a run with only one batch at train and validation, useful
for development purposes.
"""
config.update({"log_dir": log_dir})
##### data prep test ##########
encoder = ManyHotEncoder(
list(classes_labels.keys()),
audio_len=config["data"]["audio_max_len"],
frame_len=config["feats"]["n_filters"],
frame_hop=config["feats"]["hop_length"],
net_pooling=config["data"]["net_subsample"],
fs=config["data"]["fs"],
)
if not evaluation:
devtest_df = pd.read_csv(config["data"]["test_tsv"], sep="\t")
devtest_dataset = StronglyAnnotatedSet(
config["data"]["test_folder"],
devtest_df,
encoder,
return_filename=True,
pad_to=config["data"]["audio_max_len"],
)
else:
devtest_dataset = UnlabeledSet(
config["data"]["test_folder"],
encoder,
pad_to=11,
return_filename=True
)
test_dataset = devtest_dataset
##### model definition ############
sed_student1 = RCRNN(**config["net1"])
sed_student2 = RCRNN(**config["net1"])
if test_state_dict is None:
##### data prep train valid ##########
synth_df = pd.read_csv(config["data"]["synth_tsv"], sep="\t")
synth_set = StronglyAnnotatedSet(
config["data"]["synth_folder"],
synth_df,
encoder,
pad_to=config["data"]["audio_max_len"],
)
weak_df = pd.read_csv(config["data"]["weak_tsv"], sep="\t")
train_weak_df = weak_df.sample(
frac=config["training"]["weak_split"], random_state=config["training"]["seed"]
)
valid_weak_df = weak_df.drop(train_weak_df.index).reset_index(drop=True)
train_weak_df = train_weak_df.reset_index(drop=True)
weak_set = WeakSet(
config["data"]["weak_folder"],
train_weak_df,
encoder,
pad_to=config["data"]["audio_max_len"],
)
unlabeled_set = UnlabeledSet(
config["data"]["unlabeled_folder"],
encoder,
pad_to=config["data"]["audio_max_len"],
)
synth_df_val = pd.read_csv(config["data"]["synth_val_tsv"], sep="\t")
synth_val = StronglyAnnotatedSet(
config["data"]["synth_val_folder"],
synth_df_val,
encoder,
return_filename=True,
pad_to=config["data"]["audio_max_len"],
)
weak_val = WeakSet(
config["data"]["weak_folder"],
valid_weak_df,
encoder,
pad_to=config["data"]["audio_max_len"],
return_filename=True,
)
tot_train_data = [synth_set, weak_set, unlabeled_set]
train_dataset = torch.utils.data.ConcatDataset(tot_train_data)
batch_sizes = config["training"]["batch_size"]
samplers = [torch.utils.data.RandomSampler(x) for x in tot_train_data]
batch_sampler = ConcatDatasetBatchSampler(samplers, batch_sizes)
valid_dataset = torch.utils.data.ConcatDataset(
[synth_val, weak_val]
)
##### training params and optimizers ############
epoch_len = min(
[
len(tot_train_data[indx])
// (
config["training"]["batch_size"][indx]
* config["training"]["accumulate_batches"]
)
for indx in range(len(tot_train_data))
]
)
opt1 = torch.optim.Adam(sed_student1.parameters(), 1e-3, betas=(0.9, 0.999))
opt2 = torch.optim.Adam(sed_student2.parameters(), 1e-3, betas=(0.9, 0.999))
exp_steps = config["training"]["n_epochs_warmup"] * epoch_len
exp_scheduler1 = {
"scheduler": ExponentialWarmup(opt1, config["opt"]["lr"], exp_steps),
"interval": "step",
}
exp_scheduler2 = {
"scheduler": ExponentialWarmup(opt2, config["opt"]["lr"], exp_steps),
"interval": "step",
}
logger = TensorBoardLogger(
os.path.dirname(config["log_dir"]), config["log_dir"].split("/")[-1],
)
print(f"experiment dir: {logger.log_dir}")
callbacks = [
EarlyStopping(
monitor="val/obj_metric",
patience=config["training"]["early_stop_patience"],
verbose=True,
mode="max"
),
ModelCheckpoint(logger.log_dir, monitor="val/obj_metric", save_top_k=1, mode="max",
save_last=True),
]
else:
train_dataset = None
valid_dataset = None
batch_sampler = None
opt1 = None
opt2 = None
exp_scheduler1 = None
exp_scheduler2 = None
logger = True
callbacks = None
desed_training = SEDTask4_2021(
config,
encoder=encoder,
sed_student=[sed_student1, sed_student2],
opt=[opt1, opt2],
train_data=train_dataset,
valid_data=valid_dataset,
test_data=test_dataset,
train_sampler=batch_sampler,
scheduler=[exp_scheduler1, exp_scheduler2],
fast_dev_run=fast_dev_run,
evaluation=evaluation
)
# Not using the fast_dev_run of Trainer because creates a DummyLogger so cannot check problems with the Logger
if fast_dev_run:
flush_logs_every_n_steps = 1
log_every_n_steps = 1
limit_train_batches = 2
limit_val_batches = 2
limit_test_batches = 2
n_epochs = 3
else:
flush_logs_every_n_steps = 100
log_every_n_steps = 40
limit_train_batches = 1.
limit_val_batches = 1.
limit_test_batches = 1.
n_epochs = config["training"]["n_epochs"]
trainer = pl.Trainer(
max_epochs=n_epochs,
callbacks=callbacks,
gpus=gpus,
distributed_backend=config["training"].get("backend"),
accumulate_grad_batches=config["training"]["accumulate_batches"],
logger=logger,
resume_from_checkpoint=checkpoint_resume,
gradient_clip_val=config["training"]["gradient_clip"],
check_val_every_n_epoch=config["training"]["validation_interval"],
num_sanity_val_steps=0,
log_every_n_steps=log_every_n_steps,
flush_logs_every_n_steps=flush_logs_every_n_steps,
limit_train_batches=limit_train_batches,
limit_val_batches=limit_val_batches,
limit_test_batches=limit_test_batches,
)
if test_state_dict is None:
trainer.fit(desed_training)
best_path = trainer.checkpoint_callback.best_model_path
print(f"best model: {best_path}")
test_state_dict = torch.load(best_path)["state_dict"]
desed_training.load_state_dict(test_state_dict)
trainer.test(desed_training)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Training a SED system for DESED Task")
parser.add_argument("--conf_file", default="./confs/sed.yaml",
help="The configuration file with all the experiment parameters.")
parser.add_argument("--log_dir", default="./exp/2021_baseline",
help="Directory where to save tensorboard logs, saved models, etc.")
parser.add_argument("--resume_from_checkpoint", default=None,
help="Allow the training to be resumed, take as input a previously saved model (.ckpt).")
parser.add_argument("--test_from_checkpoint", default=None,
help="Test the model specified")
parser.add_argument("--gpus", default="0", help="The number of GPUs to train on, or the gpu to use, default='0', "
"so uses one GPU indexed by 0.")
parser.add_argument("--fast_dev_run", action="store_true", default=False,
help="Use this option to make a 'fake' run which is useful for development and debugging. "
"It uses very few batches and epochs so it won't give any meaningful result.")
parser.add_argument("--eval_from_checkpoint", default=None, help="Evaluate the model specified")
args = parser.parse_args()
with open(args.conf_file, "r") as f:
configs = yaml.safe_load(f)
evaluation = False
test_from_checkpoint = args.test_from_checkpoint
if args.eval_from_checkpoint is not None:
test_from_checkpoint = args.eval_from_checkpoint
evaluation = True
test_model_state_dict = None
if test_from_checkpoint is not None:
checkpoint = torch.load(test_from_checkpoint)
configs_ckpt = checkpoint["hyper_parameters"]
configs_ckpt["data"] = configs["data"]
configs = configs_ckpt
print(
f"loaded model: {test_from_checkpoint} \n"
f"at epoch: {checkpoint['epoch']}"
)
test_model_state_dict = checkpoint["state_dict"]
#if evaluation:
# configs["training"]["batch_size_val"] = 1
seed = configs["training"]["seed"]
if seed:
torch.random.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
pl.seed_everything(seed)
test_only = test_from_checkpoint is not None
resample_data_generate_durations(configs["data"], test_only)
single_run(
configs,
args.log_dir,
args.gpus,
args.resume_from_checkpoint,
test_model_state_dict,
args.fast_dev_run,
evaluation
)
| 11,870 | 34.121302 | 118 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/recipes/dcase2021_task4_baseline/train_sed_SRST.py | import argparse
from copy import deepcopy
import numpy as np
import os
import pandas as pd
import random
import torch
import yaml
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from desed_task.dataio import ConcatDatasetBatchSampler
from desed_task.dataio.datasets import StronglyAnnotatedSet, UnlabelledSet, WeakSet
from desed_task.nnet.CRNN import CRNN
from desed_task.utils.encoder import ManyHotEncoder
from desed_task.utils.schedulers import ExponentialWarmup
from local.classes_dict import classes_labels
from local.sed_trainer_SRST import SEDTask4_2021
from local.resample_folder import resample_folder
from local.utils import generate_tsv_wav_durations
def resample_data_generate_durations(config_data, test_only=False):
if not test_only:
dsets = [
"synth_folder",
"synth_val_folder",
"weak_folder",
"unlabeled_folder",
"test_folder",
]
else:
dsets = ["test_folder"]
for dset in dsets:
computed = resample_folder(
config_data[dset + "_44k"], config_data[dset], target_fs=config_data["fs"]
)
for base_set in ["synth_val", "test"]:
if not os.path.exists(config_data[base_set + "_dur"]) or computed:
generate_tsv_wav_durations(
config_data[base_set + "_folder"], config_data[base_set + "_dur"]
)
def single_run(
config,
log_dir,
gpus,
checkpoint_resume=None,
test_state_dict=None,
fast_dev_run=False,
):
"""
Running sound event detection baselin
Args:
config (dict): the dictionary of configuration params
log_dir (str): path to log directory
gpus (int): number of gpus to use
checkpoint_resume (str, optional): path to checkpoint to resume from. Defaults to "".
test_state_dict (dict, optional): if not None, no training is involved. This dictionary is the state_dict
to be loaded to test the model.
fast_dev_run (bool, optional): whether to use a run with only one batch at train and validation, useful
for development purposes.
"""
config.update({"log_dir": log_dir})
##### data prep test ##########
encoder = ManyHotEncoder(
list(classes_labels.keys()),
audio_len=config["data"]["audio_max_len"],
frame_len=config["feats"]["n_filters"],
frame_hop=config["feats"]["hop_length"],
net_pooling=config["data"]["net_subsample"],
fs=config["data"]["fs"],
)
devtest_df = pd.read_csv(config["data"]["test_tsv"], sep="\t")
devtest_dataset = StronglyAnnotatedSet(
config["data"]["test_folder"],
devtest_df,
encoder,
return_filename=True,
pad_to=config["data"]["audio_max_len"],
)
test_dataset = devtest_dataset
##### model definition ############
sed_student = CRNN(**config["net"])
if test_state_dict is None:
##### data prep train valid ##########
synth_df = pd.read_csv(config["data"]["synth_tsv"], sep="\t")
synth_set = StronglyAnnotatedSet(
config["data"]["synth_folder"],
synth_df,
encoder,
pad_to=config["data"]["audio_max_len"],
)
weak_df = pd.read_csv(config["data"]["weak_tsv"], sep="\t")
train_weak_df = weak_df.sample(
frac=config["training"]["weak_split"], random_state=config["training"]["seed"]
)
valid_weak_df = weak_df.drop(train_weak_df.index).reset_index(drop=True)
train_weak_df = train_weak_df.reset_index(drop=True)
weak_set = WeakSet(
config["data"]["weak_folder"],
train_weak_df,
encoder,
pad_to=config["data"]["audio_max_len"],
)
unlabeled_set = UnlabelledSet(
config["data"]["unlabeled_folder"],
encoder,
pad_to=config["data"]["audio_max_len"],
)
synth_df_val = pd.read_csv(config["data"]["synth_val_tsv"], sep="\t")
synth_val = StronglyAnnotatedSet(
config["data"]["synth_val_folder"],
synth_df_val,
encoder,
return_filename=True,
pad_to=config["data"]["audio_max_len"],
)
weak_val = WeakSet(
config["data"]["weak_folder"],
valid_weak_df,
encoder,
pad_to=config["data"]["audio_max_len"],
return_filename=True,
)
tot_train_data = [synth_set, weak_set, unlabeled_set]
train_dataset = torch.utils.data.ConcatDataset(tot_train_data)
batch_sizes = config["training"]["batch_size"]
samplers = [torch.utils.data.RandomSampler(x) for x in tot_train_data]
batch_sampler = ConcatDatasetBatchSampler(samplers, batch_sizes)
valid_dataset = torch.utils.data.ConcatDataset(
[synth_val, weak_val]
)
##### training params and optimizers ############
epoch_len = min(
[
len(tot_train_data[indx])
// (
config["training"]["batch_size"][indx]
* config["training"]["accumulate_batches"]
)
for indx in range(len(tot_train_data))
]
)
opt = torch.optim.Adam(sed_student.parameters(), 1e-3, betas=(0.9, 0.999))
exp_steps = config["training"]["n_epochs_warmup"] * epoch_len
exp_scheduler = {
"scheduler": ExponentialWarmup(opt, config["opt"]["lr"], exp_steps),
"interval": "step",
}
logger = TensorBoardLogger(
os.path.dirname(config["log_dir"]), config["log_dir"].split("/")[-1],
)
print(f"experiment dir: {logger.log_dir}")
callbacks = [
EarlyStopping(
monitor="val/obj_metric",
patience=config["training"]["early_stop_patience"],
verbose=True,
mode="max"
),
ModelCheckpoint(logger.log_dir, monitor="val/obj_metric", save_top_k=1, mode="max",
save_last=True),
]
else:
train_dataset = None
valid_dataset = None
batch_sampler = None
opt = None
exp_scheduler = None
logger = True
callbacks = None
desed_training = SEDTask4_2021(
config,
encoder=encoder,
sed_student=sed_student,
opt=opt,
train_data=train_dataset,
valid_data=valid_dataset,
test_data=test_dataset,
train_sampler=batch_sampler,
scheduler=exp_scheduler,
fast_dev_run=fast_dev_run,
)
# Not using the fast_dev_run of Trainer because creates a DummyLogger so cannot check problems with the Logger
if fast_dev_run:
flush_logs_every_n_steps = 1
log_every_n_steps = 1
limit_train_batches = 2
limit_val_batches = 2
limit_test_batches = 2
n_epochs = 3
else:
flush_logs_every_n_steps = 100
log_every_n_steps = 40
limit_train_batches = 1.
limit_val_batches = 1.
limit_test_batches = 1.
n_epochs = config["training"]["n_epochs"]
trainer = pl.Trainer(
max_epochs=n_epochs,
callbacks=callbacks,
gpus=gpus,
distributed_backend=config["training"].get("backend"),
accumulate_grad_batches=config["training"]["accumulate_batches"],
logger=logger,
resume_from_checkpoint=checkpoint_resume,
gradient_clip_val=config["training"]["gradient_clip"],
check_val_every_n_epoch=config["training"]["validation_interval"],
num_sanity_val_steps=0,
log_every_n_steps=log_every_n_steps,
flush_logs_every_n_steps=flush_logs_every_n_steps,
limit_train_batches=limit_train_batches,
limit_val_batches=limit_val_batches,
limit_test_batches=limit_test_batches,
)
if test_state_dict is None:
trainer.fit(desed_training)
best_path = trainer.checkpoint_callback.best_model_path
print(f"best model: {best_path}")
test_state_dict = torch.load(best_path)["state_dict"]
desed_training.load_state_dict(test_state_dict)
trainer.test(desed_training)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Training a SED system for DESED Task")
parser.add_argument("--conf_file", default="./confs/sed.yaml",
help="The configuration file with all the experiment parameters.")
parser.add_argument("--log_dir", default="./exp/2021_baseline",
help="Directory where to save tensorboard logs, saved models, etc.")
parser.add_argument("--resume_from_checkpoint", default=None,
help="Allow the training to be resumed, take as input a previously saved model (.ckpt).")
parser.add_argument("--test_from_checkpoint", default=None,
help="Test the model specified")
parser.add_argument("--gpus", default="0", help="The number of GPUs to train on, or the gpu to use, default='0', "
"so uses one GPU indexed by 0.")
parser.add_argument("--fast_dev_run", action="store_true", default=False,
help="Use this option to make a 'fake' run which is useful for development and debugging. "
"It uses very few batches and epochs so it won't give any meaningful result.")
args = parser.parse_args()
with open(args.conf_file, "r") as f:
configs = yaml.safe_load(f)
test_from_checkpoint = args.test_from_checkpoint
test_model_state_dict = None
if test_from_checkpoint is not None:
checkpoint = torch.load(test_from_checkpoint)
configs_ckpt = checkpoint["hyper_parameters"]
configs_ckpt["data"] = configs["data"]
configs = configs_ckpt
print(
f"loaded model: {test_from_checkpoint} \n"
f"at epoch: {checkpoint['epoch']}"
)
test_model_state_dict = checkpoint["state_dict"]
seed = configs["training"]["seed"]
if seed:
torch.random.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
pl.seed_everything(seed)
test_only = test_from_checkpoint is not None
resample_data_generate_durations(configs["data"], test_only)
single_run(
configs,
args.log_dir,
args.gpus,
args.resume_from_checkpoint,
test_model_state_dict,
args.fast_dev_run,
)
| 10,734 | 34.429043 | 118 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/recipes/dcase2021_task4_baseline/train_sed.py | import argparse
from copy import deepcopy
import numpy as np
import os
import pandas as pd
import random
import torch
import yaml
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from desed_task.dataio import ConcatDatasetBatchSampler
from desed_task.dataio.datasets import StronglyAnnotatedSet, UnlabeledSet, WeakSet
from desed_task.nnet.CRNN import CRNN, RCRNN
from desed_task.utils.encoder import ManyHotEncoder
from desed_task.utils.schedulers import ExponentialWarmup
from local.classes_dict import classes_labels
from local.sed_trainer import SEDTask4_2021
from local.resample_folder import resample_folder
from local.utils import generate_tsv_wav_durations
def resample_data_generate_durations(config_data, test_only=False, evaluation=False):
if not test_only:
dsets = [
"synth_folder",
"synth_val_folder",
"weak_folder",
"unlabeled_folder",
"test_folder",
]
elif test_only:
dsets = ["test_folder"]
else:
dsets = ["eval_folder"]
for dset in dsets:
computed = resample_folder(
config_data[dset + "_44k"], config_data[dset], target_fs=config_data["fs"]
)
if not evaluation:
for base_set in ["synth_val", "test"]:
if not os.path.exists(config_data[base_set + "_dur"]) or computed:
generate_tsv_wav_durations(
config_data[base_set + "_folder"], config_data[base_set + "_dur"]
)
def single_run(
config,
log_dir,
gpus,
checkpoint_resume=None,
test_state_dict=None,
fast_dev_run=False,
evaluation=False
):
"""
Running sound event detection baselin
Args:
config (dict): the dictionary of configuration params
log_dir (str): path to log directory
gpus (int): number of gpus to use
checkpoint_resume (str, optional): path to checkpoint to resume from. Defaults to "".
test_state_dict (dict, optional): if not None, no training is involved. This dictionary is the state_dict
to be loaded to test the model.
fast_dev_run (bool, optional): whether to use a run with only one batch at train and validation, useful
for development purposes.
"""
config.update({"log_dir": log_dir})
##### data prep test ##########
encoder = ManyHotEncoder(
list(classes_labels.keys()),
audio_len=config["data"]["audio_max_len"],
frame_len=config["feats"]["n_filters"],
frame_hop=config["feats"]["hop_length"],
net_pooling=config["data"]["net_subsample"],
fs=config["data"]["fs"],
)
if not evaluation:
devtest_df = pd.read_csv(config["data"]["test_tsv"], sep="\t")
devtest_dataset = StronglyAnnotatedSet(
config["data"]["test_folder"],
devtest_df,
encoder,
return_filename=True,
pad_to=config["data"]["audio_max_len"],
)
else:
devtest_dataset = UnlabeledSet(
config["data"]["eval_folder"],
encoder,
pad_to=10,
return_filename=True
)
test_dataset = devtest_dataset
##### model definition ############
sed_student = CRNN(**config["net"])
if test_state_dict is None:
##### data prep train valid ##########
synth_df = pd.read_csv(config["data"]["synth_tsv"], sep="\t")
synth_set = StronglyAnnotatedSet(
config["data"]["synth_folder"],
synth_df,
encoder,
pad_to=config["data"]["audio_max_len"],
)
weak_df = pd.read_csv(config["data"]["weak_tsv"], sep="\t")
train_weak_df = weak_df.sample(
frac=config["training"]["weak_split"], random_state=config["training"]["seed"]
)
valid_weak_df = weak_df.drop(train_weak_df.index).reset_index(drop=True)
train_weak_df = train_weak_df.reset_index(drop=True)
weak_set = WeakSet(
config["data"]["weak_folder"],
train_weak_df,
encoder,
pad_to=config["data"]["audio_max_len"],
)
unlabeled_set = UnlabeledSet(
config["data"]["unlabeled_folder"],
encoder,
pad_to=config["data"]["audio_max_len"],
)
synth_df_val = pd.read_csv(config["data"]["synth_val_tsv"], sep="\t")
synth_val = StronglyAnnotatedSet(
config["data"]["synth_val_folder"],
synth_df_val,
encoder,
return_filename=True,
pad_to=config["data"]["audio_max_len"],
)
weak_val = WeakSet(
config["data"]["weak_folder"],
valid_weak_df,
encoder,
pad_to=config["data"]["audio_max_len"],
return_filename=True,
)
tot_train_data = [synth_set, weak_set, unlabeled_set]
train_dataset = torch.utils.data.ConcatDataset(tot_train_data)
batch_sizes = config["training"]["batch_size"]
samplers = [torch.utils.data.RandomSampler(x) for x in tot_train_data]
batch_sampler = ConcatDatasetBatchSampler(samplers, batch_sizes)
valid_dataset = torch.utils.data.ConcatDataset(
[synth_val, weak_val]
)
##### training params and optimizers ############
epoch_len = min(
[
len(tot_train_data[indx])
// (
config["training"]["batch_size"][indx]
* config["training"]["accumulate_batches"]
)
for indx in range(len(tot_train_data))
]
)
# print(epoch_len) => 118
opt = torch.optim.Adam(sed_student.parameters(), 1e-3, betas=(0.9, 0.999))
exp_steps = config["training"]["n_epochs_warmup"] * epoch_len
exp_scheduler = {
"scheduler": ExponentialWarmup(opt, config["opt"]["lr"], exp_steps),
"interval": "step",
}
logger = TensorBoardLogger(
os.path.dirname(config["log_dir"]), config["log_dir"].split("/")[-1],
)
print(f"experiment dir: {logger.log_dir}")
callbacks = [
EarlyStopping(
monitor="val/obj_metric",
patience=config["training"]["early_stop_patience"],
verbose=True,
mode="max"
),
ModelCheckpoint(logger.log_dir, monitor="val/obj_metric", save_top_k=1, mode="max",
save_last=True),
]
else:
train_dataset = None
valid_dataset = None
batch_sampler = None
opt = None
exp_scheduler = None
logger = True
callbacks = None
desed_training = SEDTask4_2021(
config,
encoder=encoder,
sed_student=sed_student,
opt=opt,
train_data=train_dataset,
valid_data=valid_dataset,
test_data=test_dataset,
train_sampler=batch_sampler,
scheduler=exp_scheduler,
fast_dev_run=fast_dev_run,
evaluation=evaluation
)
# Not using the fast_dev_run of Trainer because creates a DummyLogger so cannot check problems with the Logger
if fast_dev_run:
flush_logs_every_n_steps = 1
log_every_n_steps = 1
limit_train_batches = 2
limit_val_batches = 2
limit_test_batches = 2
n_epochs = 3
else:
flush_logs_every_n_steps = 100
log_every_n_steps = 40
limit_train_batches = 1.
limit_val_batches = 1.
limit_test_batches = 1.
n_epochs = config["training"]["n_epochs"]
trainer = pl.Trainer(
max_epochs=n_epochs,
callbacks=callbacks,
gpus=gpus,
distributed_backend=config["training"].get("backend"),
accumulate_grad_batches=config["training"]["accumulate_batches"],
logger=logger,
resume_from_checkpoint=checkpoint_resume,
gradient_clip_val=config["training"]["gradient_clip"],
check_val_every_n_epoch=config["training"]["validation_interval"],
num_sanity_val_steps=0,
log_every_n_steps=log_every_n_steps,
flush_logs_every_n_steps=flush_logs_every_n_steps,
limit_train_batches=limit_train_batches,
limit_val_batches=limit_val_batches,
limit_test_batches=limit_test_batches,
)
if test_state_dict is None:
trainer.fit(desed_training)
best_path = trainer.checkpoint_callback.best_model_path
print(f"best model: {best_path}")
test_state_dict = torch.load(best_path)["state_dict"]
desed_training.load_state_dict(test_state_dict)
trainer.test(desed_training)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Training a SED system for DESED Task")
parser.add_argument("--conf_file", default="./confs/sed.yaml",
help="The configuration file with all the experiment parameters.")
parser.add_argument("--log_dir", default="./exp/2021_baseline",
help="Directory where to save tensorboard logs, saved models, etc.")
parser.add_argument("--resume_from_checkpoint", default=None,
help="Allow the training to be resumed, take as input a previously saved model (.ckpt).")
parser.add_argument("--test_from_checkpoint", default=None,
help="Test the model specified")
parser.add_argument("--gpus", default="0", help="The number of GPUs to train on, or the gpu to use, default='0', "
"so uses one GPU indexed by 0.")
parser.add_argument("--fast_dev_run", action="store_true", default=False,
help="Use this option to make a 'fake' run which is useful for development and debugging. "
"It uses very few batches and epochs so it won't give any meaningful result.")
parser.add_argument("--eval_from_checkpoint", default=None, help="Evaluate the model specified")
args = parser.parse_args()
with open(args.conf_file, "r") as f:
configs = yaml.safe_load(f)
evaluation = False
test_from_checkpoint = args.test_from_checkpoint
if args.eval_from_checkpoint is not None:
test_from_checkpoint = args.eval_from_checkpoint
evaluation = True
test_model_state_dict = None
if test_from_checkpoint is not None:
checkpoint = torch.load(test_from_checkpoint)
configs_ckpt = checkpoint["hyper_parameters"]
configs_ckpt["data"] = configs["data"]
configs = configs_ckpt
print(
f"loaded model: {test_from_checkpoint} \n"
f"at epoch: {checkpoint['epoch']}"
)
test_model_state_dict = checkpoint["state_dict"]
# if evaluation:
# configs["training"]["batch_size_val"] = 1
seed = configs["training"]["seed"]
if seed:
torch.random.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
pl.seed_everything(seed)
test_only = test_from_checkpoint is not None
resample_data_generate_durations(configs["data"], test_only)
single_run(
configs,
args.log_dir,
args.gpus,
args.resume_from_checkpoint,
test_model_state_dict,
args.fast_dev_run,
evaluation
)
| 11,519 | 34.015198 | 118 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/recipes/dcase2021_task4_baseline/local/resample_folder.py | import argparse
import glob
import os
from pathlib import Path
import librosa
import torch
import torchaudio
import tqdm
parser = argparse.ArgumentParser("Resample a folder recursively")
parser.add_argument(
"--in_dir",
type=str,
default="/media/sam/bx500/DCASE_DATA/dataset/audio/validation/",
)
parser.add_argument("--out_dir", type=str, default="/tmp/val16k")
parser.add_argument("--target_fs", default=16000)
parser.add_argument("--regex", type=str, default="*.wav")
def resample(audio, orig_fs, target_fs):
"""
Resamples the audio given as input at the target_fs sample rate, if the target sample rate and the
original sample rate are different.
Args:
audio (Tensor): audio to resample
orig_fs (int): original sample rate
target_fs (int): target sample rate
Returns:
Tensor: audio resampled
"""
out = []
for c in range(audio.shape[0]):
tmp = audio[c].detach().cpu().numpy()
if target_fs != orig_fs:
tmp = librosa.resample(tmp, orig_fs, target_fs)
out.append(torch.from_numpy(tmp))
out = torch.stack(out)
return out
def resample_folder(in_dir, out_dir, target_fs=16000, regex="*.wav"):
"""
Resamples the audio files contained in the in_dir folder and saves them in out_dir folder
Args:
in_dir (str): path to audio directory (audio to be resampled)
out_dir (str): path to audio resampled directory
target_fs (int, optional): target sample rate. Defaults to 16000.
regex (str, optional): regular expression for extension of file. Defaults to "*.wav".
"""
compute = True
files = glob.glob(os.path.join(in_dir, regex))
if os.path.exists(out_dir):
out_files = glob.glob(os.path.join(out_dir, regex))
if len(files) == len(out_files):
compute = False
if compute:
for f in tqdm.tqdm(files):
audio, orig_fs = torchaudio.load(f)
audio = resample(audio, orig_fs, target_fs)
os.makedirs(
Path(os.path.join(out_dir, Path(f).relative_to(Path(in_dir)))).parent,
exist_ok=True,
)
torchaudio.set_audio_backend("sox_io")
torchaudio.save(
os.path.join(out_dir, Path(f).relative_to(Path(in_dir))),
audio,
target_fs,
)
return compute
if __name__ == "__main__":
args = parser.parse_args()
resample_folder(args.in_dir, args.out_dir, int(args.target_fs), args.regex)
| 2,556 | 29.807229 | 102 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/recipes/dcase2021_task4_baseline/local/sed_trainer_SRST.py | import os
import random
from copy import deepcopy
from pathlib import Path
import local.config as cfg
import pandas as pd
import pytorch_lightning as pl
import torch
from torchaudio.transforms import AmplitudeToDB, MelSpectrogram
from desed_task.data_augm import mixup, add_noise
from desed_task.utils.scaler import TorchScaler
import numpy as np
from .utils import (
batched_decode_preds,
log_sedeval_metrics,
JSD,
)
from desed_task.evaluation.evaluation_measures import (
compute_per_intersection_macro_f1,
compute_psds_from_operating_points,
)
class SEDTask4_2021(pl.LightningModule):
""" Pytorch lightning module for the SED 2021 baseline
Args:
hparams: dict, the dictionnary to be used for the current experiment/
encoder: ManyHotEncoder object, object to encode and decode labels.
sed_student: torch.Module, the student model to be trained. The teacher model will be
opt: torch.optimizer.Optimizer object, the optimizer to be used
train_data: torch.utils.data.Dataset subclass object, the training data to be used.
valid_data: torch.utils.data.Dataset subclass object, the validation data to be used.
test_data: torch.utils.data.Dataset subclass object, the test data to be used.
train_sampler: torch.utils.data.Sampler subclass object, the sampler to be used in the training dataloader.
scheduler: asteroid.engine.schedulers.BaseScheduler subclass object, the scheduler to be used. This is
used to apply ramp-up during training for example.
fast_dev_run: bool, whether to launch a run with only one batch for each set, this is for development purpose,
to test the code runs.
"""
def __init__(
self,
hparams,
encoder,
sed_student,
opt=None,
train_data=None,
valid_data=None,
test_data=None,
train_sampler=None,
scheduler=None,
fast_dev_run=False,
):
super(SEDTask4_2021, self).__init__()
self.hparams = hparams
self.encoder = encoder
self.sed_student = sed_student
self.sed_teacher = deepcopy(sed_student)
self.opt = opt
self.train_data = train_data
self.valid_data = valid_data
self.test_data = test_data
self.train_sampler = train_sampler
self.scheduler = scheduler
self.fast_dev_run = fast_dev_run
if self.fast_dev_run:
self.num_workers = 1
else:
self.num_workers = self.hparams["training"]["num_workers"]
# add class_label
self.softmax = torch.nn.Softmax(dim=1)
self.jsd = JSD()
self.class_label = torch.tensor(cfg.class_label).cuda()
feat_params = self.hparams["feats"]
self.mel_spec = MelSpectrogram(
sample_rate=feat_params["sample_rate"],
n_fft=feat_params["n_window"],
win_length=feat_params["n_window"],
hop_length=feat_params["hop_length"],
f_min=feat_params["f_min"],
f_max=feat_params["f_max"],
n_mels=feat_params["n_mels"],
window_fn=torch.hamming_window,
wkwargs={"periodic": False},
power=1,
)
for param in self.sed_teacher.parameters():
param.detach_()
# instantiating losses
self.supervised_loss = torch.nn.BCELoss()
if hparams["training"]["self_sup_loss"] == "mse":
self.selfsup_loss = torch.nn.MSELoss()
elif hparams["training"]["self_sup_loss"] == "bce":
self.selfsup_loss = torch.nn.BCELoss()
else:
raise NotImplementedError
# for weak labels we simply compute f1 score
self.get_weak_student_f1_seg_macro = pl.metrics.classification.F1(
len(self.encoder.labels),
average="macro",
multilabel=True,
compute_on_step=False,
)
self.get_weak_teacher_f1_seg_macro = pl.metrics.classification.F1(
len(self.encoder.labels),
average="macro",
multilabel=True,
compute_on_step=False,
)
self.scaler = self._init_scaler()
# buffer for event based scores which we compute using sed-eval
self.val_buffer_student_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_teacher_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_student_test = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_teacher_test = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
test_n_thresholds = self.hparams["training"]["n_test_thresholds"]
test_thresholds = np.arange(
1 / (test_n_thresholds * 2), 1, 1 / test_n_thresholds
)
self.test_psds_buffer_student = {k: pd.DataFrame() for k in test_thresholds}
self.test_psds_buffer_teacher = {k: pd.DataFrame() for k in test_thresholds}
self.decoded_student_05_buffer = pd.DataFrame()
self.decoded_teacher_05_buffer = pd.DataFrame()
def update_ema(self, alpha, global_step, model, ema_model):
""" Update teacher model parameters
Args:
alpha: float, the factor to be used between each updated step.
global_step: int, the current global step to be used.
model: torch.Module, student model to use
ema_model: torch.Module, teacher model to use
"""
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_params, params in zip(ema_model.parameters(), model.parameters()):
ema_params.data.mul_(alpha).add_(params.data, alpha=1 - alpha)
def _init_scaler(self):
"""Scaler inizialization
Raises:
NotImplementedError: in case of not Implemented scaler
Returns:
TorchScaler: returns the scaler
"""
if self.hparams["scaler"]["statistic"] == "instance":
scaler = TorchScaler("instance", "minmax", self.hparams["scaler"]["dims"])
return scaler
elif self.hparams["scaler"]["statistic"] == "dataset":
# we fit the scaler
scaler = TorchScaler(
"dataset",
self.hparams["scaler"]["normtype"],
self.hparams["scaler"]["dims"],
)
else:
raise NotImplementedError
if self.hparams["scaler"]["savepath"] is not None:
if os.path.exists(self.hparams["scaler"]["savepath"]):
scaler = torch.load(self.hparams["scaler"]["savepath"])
print(
"Loaded Scaler from previous checkpoint from {}".format(
self.hparams["scaler"]["savepath"]
)
)
return scaler
self.train_loader = self.train_dataloader()
scaler.fit(
self.train_loader,
transform_func=lambda x: self.take_log(self.mel_spec(x[0])),
)
if self.hparams["scaler"]["savepath"] is not None:
torch.save(scaler, self.hparams["scaler"]["savepath"])
print(
"Saving Scaler from previous checkpoint at {}".format(
self.hparams["scaler"]["savepath"]
)
)
return scaler
def take_log(self, mels):
""" Apply the log transformation to mel spectrograms.
Args:
mels: torch.Tensor, mel spectrograms for which to apply log.
Returns:
Tensor: logarithmic mel spectrogram of the mel spectrogram given as input
"""
amp_to_db = AmplitudeToDB(stype="amplitude")
amp_to_db.amin = 1e-5 # amin= 1e-5 as in librosa
return amp_to_db(mels).clamp(min=-50, max=80) # clamp to reproduce old code
def training_step(self, batch, batch_indx):
""" Applying the training for one batch (a step). Used during trainer.fit
Args:
batch: torch.Tensor, batch input tensor
batch_indx: torch.Tensor, 1D tensor of indexes to know which data are present in each batch.
Returns:
torch.Tensor, the loss to take into account.
"""
audio, labels, padded_indxs = batch
indx_synth, indx_weak, indx_unlabelled = self.hparams["training"]["batch_size"]
features = self.mel_spec(audio)
batch_num = features.shape[0]
# deriving masks for each dataset
strong_mask = torch.zeros(batch_num).to(features).bool()
weak_mask = torch.zeros(batch_num).to(features).bool()
strong_mask[:indx_synth] = 1
weak_mask[indx_synth : indx_weak + indx_synth] = 1
# deriving weak labels
labels_weak = (torch.sum(labels[weak_mask], -1) > 0).float()
mixup_type = self.hparams["training"].get("mixup")
if mixup_type is not None and 0.5 > random.random():
features[weak_mask], labels_weak = mixup(
features[weak_mask], labels_weak, mixup_label_type=mixup_type
)
features[strong_mask], labels[strong_mask] = mixup(
features[strong_mask], labels[strong_mask], mixup_label_type=mixup_type
)
# sed students forward
strong_preds_student, weak_preds_student = self.sed_student(
self.scaler(self.take_log(features))
)
# supervised loss on strong labels
loss_strong = self.supervised_loss(
strong_preds_student[strong_mask], labels[strong_mask]
)
# supervised loss on weakly labelled
loss_weak = self.supervised_loss(weak_preds_student[weak_mask], labels_weak)
# total supervised loss
tot_loss_supervised = loss_strong + loss_weak
with torch.no_grad():
# perturbation
ema_features = self.scaler(self.take_log(add_noise(features)))
strong_preds_teacher, weak_preds_teacher = self.sed_teacher(ema_features)
nClass = self.hparams['net']['nclass']
est_strong_target = torch.zeros(batch_num,156,nClass).cuda()
for bter in range(batch_num):
sp = strong_preds_teacher[bter]
sp = torch.clamp(sp, 1.0e-4, 1-1.0e-4)
p_h1 = torch.log(sp.permute((1,0)))
p_h0 = torch.log(1-sp.permute(1,0))
# K = 0
P0 = p_h0.sum(1)
# K = 1
P1 = P0[:,None] + p_h1 - p_h0
#P = torch.cat([P0.reshape(157,1), P1], 1)
# K = 2
P2 = []
for cter in range(1,nClass):
P2.append(P1[:,:-cter]+P1[:,cter:])
P2 = torch.cat(P2, 1)
P2 = P2 - P0[:,None]
#P = torch.cat([P0.reshape(156,1), P1, P2], 1)
# K: up to 3
P3 = []
for cter1 in range(1,nClass):
for cter2 in range(1,nClass-cter1):
P3.append(P1[:,:-(cter1+cter2)]+P1[:,cter1:-cter2]+P1[:,(cter1+cter2):])
P3 = torch.cat(P3,1)
P3 = P3 - 2*P0[:,None]
P = torch.cat([P0.reshape(156,1), P1, P2, P3], 1)
P = self.softmax(P)
prob_v, prob_i = torch.sort(P, dim=1, descending=True)
norm_p = prob_v.sum(1)
prob_v = prob_v/norm_p[:,None]
cl = self.class_label[prob_i.tolist(),:]
cl = torch.mul(cl, prob_v[:,:,None]).sum(1)
est_strong_target[bter,:,:] = torch.squeeze(cl[:156,:])
est_strong_target = est_strong_target.permute((0,2,1))
est_weak_target = est_strong_target.mean(2)
loss_strong_teacher = self.supervised_loss(
strong_preds_teacher[strong_mask], labels[strong_mask]
)
loss_weak_teacher = self.supervised_loss(
weak_preds_teacher[weak_mask], labels_weak
)
# we apply consistency between the predictions, use the scheduler for learning rate (to be changed ?)
weight = (
self.hparams["training"]["const_max"]
* self.scheduler["scheduler"]._get_scaling_factor()
)
strong_reliability = weight*(1-self.jsd(est_strong_target[strong_mask], labels[strong_mask]).mean())
weak_reliability = weight*(1-self.jsd(est_weak_target[weak_mask], labels_weak).mean())
strong_self_sup_loss = self.selfsup_loss(
strong_preds_student[24:], est_strong_target[24:]
)
weak_self_sup_loss = self.selfsup_loss(
weak_preds_student[weak_mask], est_weak_target[weak_mask]
)
tot_self_loss = strong_reliability*strong_self_sup_loss + weak_reliability*weak_self_sup_loss
tot_loss = tot_loss_supervised + tot_self_loss
self.log("train/student/loss_strong", loss_strong)
self.log("train/student/loss_weak", loss_weak)
self.log("train/teacher/loss_strong", loss_strong_teacher)
self.log("train/teacher/loss_weak", loss_weak_teacher)
self.log("train/step", self.scheduler["scheduler"].step_num, prog_bar=True)
self.log("train/student/tot_loss", tot_loss, prog_bar=True)
self.log("train/weight", weight)
self.log("train/student/tot_supervised", strong_self_sup_loss, prog_bar=True)
self.log("train/student/weak_self_sup_loss", weak_self_sup_loss)
self.log("train/student/strong_self_sup_loss", strong_self_sup_loss)
self.log("train/lr", self.opt.param_groups[-1]["lr"], prog_bar=True)
return {'loss': tot_loss}
def on_before_zero_grad(self, *args, **kwargs):
# update EMA teacher
self.update_ema(
self.hparams["training"]["ema_factor"],
self.scheduler["scheduler"].step_num,
self.sed_student,
self.sed_teacher,
)
def validation_step(self, batch, batch_indx):
""" Apply validation to a batch (step). Used during trainer.fit
Args:
batch: torch.Tensor, input batch tensor
batch_indx: torch.Tensor, 1D tensor of indexes to know which data are present in each batch.
Returns:
"""
audio, labels, padded_indxs, filenames = batch
# prediction for student
logmels = self.scaler(self.take_log(self.mel_spec(audio)))
strong_preds_student, weak_preds_student = self.sed_student(logmels)
# prediction for teacher
strong_preds_teacher, weak_preds_teacher = self.sed_teacher(logmels)
# we derive masks for each dataset based on folders of filenames
mask_weak = (
torch.tensor(
[
str(Path(x).parent)
== str(Path(self.hparams["data"]["weak_folder"]))
for x in filenames
]
)
.to(audio)
.bool()
)
mask_synth = (
torch.tensor(
[
str(Path(x).parent)
== str(Path(self.hparams["data"]["synth_val_folder"]))
for x in filenames
]
)
.to(audio)
.bool()
)
if torch.any(mask_weak):
labels_weak = (torch.sum(labels[mask_weak], -1) >= 1).float()
loss_weak_student = self.supervised_loss(
weak_preds_student[mask_weak], labels_weak
)
loss_weak_teacher = self.supervised_loss(
weak_preds_teacher[mask_weak], labels_weak
)
self.log("val/weak/student/loss_weak", loss_weak_student)
self.log("val/weak/teacher/loss_weak", loss_weak_teacher)
# accumulate f1 score for weak labels
self.get_weak_student_f1_seg_macro(
weak_preds_student[mask_weak], labels_weak
)
self.get_weak_teacher_f1_seg_macro(
weak_preds_teacher[mask_weak], labels_weak
)
if torch.any(mask_synth):
loss_strong_student = self.supervised_loss(
strong_preds_student[mask_synth], labels[mask_synth]
)
loss_strong_teacher = self.supervised_loss(
strong_preds_teacher[mask_synth], labels[mask_synth]
)
self.log("val/synth/student/loss_strong", loss_strong_student)
self.log("val/synth/teacher/loss_strong", loss_strong_teacher)
filenames_synth = [
x
for x in filenames
if Path(x).parent == Path(self.hparams["data"]["synth_val_folder"])
]
decoded_student_strong = batched_decode_preds(
strong_preds_student[mask_synth],
filenames_synth,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.val_buffer_student_synth.keys()),
)
for th in self.val_buffer_student_synth.keys():
self.val_buffer_student_synth[th] = self.val_buffer_student_synth[
th
].append(decoded_student_strong[th], ignore_index=True)
decoded_teacher_strong = batched_decode_preds(
strong_preds_teacher[mask_synth],
filenames_synth,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.val_buffer_teacher_synth.keys()),
)
for th in self.val_buffer_teacher_synth.keys():
self.val_buffer_teacher_synth[th] = self.val_buffer_teacher_synth[
th
].append(decoded_teacher_strong[th], ignore_index=True)
return
def validation_epoch_end(self, outputs):
""" Fonction applied at the end of all the validation steps of the epoch.
Args:
outputs: torch.Tensor, the concatenation of everything returned by validation_step.
Returns:
torch.Tensor, the objective metric to be used to choose the best model from for example.
"""
weak_student_f1_macro = self.get_weak_student_f1_seg_macro.compute()
weak_teacher_f1_macro = self.get_weak_teacher_f1_seg_macro.compute()
# synth dataset
intersection_f1_macro_student = compute_per_intersection_macro_f1(
self.val_buffer_student_synth,
self.hparams["data"]["synth_val_tsv"],
self.hparams["data"]["synth_val_dur"],
)
synth_student_event_macro = log_sedeval_metrics(
self.val_buffer_student_synth[0.5], self.hparams["data"]["synth_val_tsv"],
)[0]
intersection_f1_macro_teacher = compute_per_intersection_macro_f1(
self.val_buffer_teacher_synth,
self.hparams["data"]["synth_val_tsv"],
self.hparams["data"]["synth_val_dur"],
)
synth_teacher_event_macro = log_sedeval_metrics(
self.val_buffer_teacher_synth[0.5], self.hparams["data"]["synth_val_tsv"],
)[0]
obj_metric_synth_type = self.hparams["training"].get("obj_metric_synth_type")
if obj_metric_synth_type is None:
synth_metric = intersection_f1_macro_student
elif obj_metric_synth_type == "event":
synth_metric = synth_student_event_macro
elif obj_metric_synth_type == "intersection":
synth_metric = intersection_f1_macro_student
else:
raise NotImplementedError(
f"obj_metric_synth_type: {obj_metric_synth_type} not implemented."
)
obj_metric = torch.tensor(weak_student_f1_macro.item() + synth_metric)
self.log("val/obj_metric", obj_metric, prog_bar=True)
self.log("val/weak/student/macro_F1", weak_student_f1_macro)
self.log("val/weak/teacher/macro_F1", weak_teacher_f1_macro)
self.log(
"val/synth/student/intersection_f1_macro", intersection_f1_macro_student
)
self.log(
"val/synth/teacher/intersection_f1_macro", intersection_f1_macro_teacher
)
self.log("val/synth/student/event_f1_macro", synth_student_event_macro)
self.log("val/synth/teacher/event_f1_macro", synth_teacher_event_macro)
# free the buffers
self.val_buffer_student_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_teacher_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.get_weak_student_f1_seg_macro.reset()
self.get_weak_teacher_f1_seg_macro.reset()
return obj_metric
def on_save_checkpoint(self, checkpoint):
checkpoint["sed_student"] = self.sed_student.state_dict()
checkpoint["sed_teacher"] = self.sed_teacher.state_dict()
return checkpoint
def test_step(self, batch, batch_indx):
""" Apply Test to a batch (step), used only when (trainer.test is called)
Args:
batch: torch.Tensor, input batch tensor
batch_indx: torch.Tensor, 1D tensor of indexes to know which data are present in each batch.
Returns:
"""
audio, labels, padded_indxs, filenames = batch
# prediction for student
logmels = self.scaler(self.take_log(self.mel_spec(audio)))
strong_preds_student, weak_preds_student = self.sed_student(logmels)
# prediction for teacher
strong_preds_teacher, weak_preds_teacher = self.sed_teacher(logmels)
loss_strong_student = self.supervised_loss(strong_preds_student, labels)
loss_strong_teacher = self.supervised_loss(strong_preds_teacher, labels)
self.log("test/student/loss_strong", loss_strong_student)
self.log("test/teacher/loss_strong", loss_strong_teacher)
# compute psds
decoded_student_strong = batched_decode_preds(
strong_preds_student,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.test_psds_buffer_student.keys()),
)
for th in self.test_psds_buffer_student.keys():
self.test_psds_buffer_student[th] = self.test_psds_buffer_student[
th
].append(decoded_student_strong[th], ignore_index=True)
decoded_teacher_strong = batched_decode_preds(
strong_preds_teacher,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.test_psds_buffer_teacher.keys()),
)
for th in self.test_psds_buffer_teacher.keys():
self.test_psds_buffer_teacher[th] = self.test_psds_buffer_teacher[
th
].append(decoded_teacher_strong[th], ignore_index=True)
# compute f1 score
decoded_student_strong = batched_decode_preds(
strong_preds_student,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=[0.5],
)
self.decoded_student_05_buffer = self.decoded_student_05_buffer.append(
decoded_student_strong[0.5]
)
decoded_teacher_strong = batched_decode_preds(
strong_preds_teacher,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=[0.5],
)
self.decoded_teacher_05_buffer = self.decoded_teacher_05_buffer.append(
decoded_teacher_strong[0.5]
)
def on_test_epoch_end(self):
# pub eval dataset
try:
log_dir = self.logger.log_dir
except Exception as e:
log_dir = self.hparams["log_dir"]
save_dir = os.path.join(log_dir, "metrics_test")
psds_score_scenario1 = compute_psds_from_operating_points(
self.test_psds_buffer_student,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.7,
gtc_threshold=0.7,
alpha_ct=0,
alpha_st=1,
save_dir=os.path.join(save_dir, "student", "scenario1"),
)
psds_score_scenario2 = compute_psds_from_operating_points(
self.test_psds_buffer_student,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.1,
gtc_threshold=0.1,
cttc_threshold=0.3,
alpha_ct=0.5,
alpha_st=1,
save_dir=os.path.join(save_dir, "student", "scenario2"),
)
psds_score_teacher_scenario1 = compute_psds_from_operating_points(
self.test_psds_buffer_teacher,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.7,
gtc_threshold=0.7,
alpha_ct=0,
alpha_st=1,
save_dir=os.path.join(save_dir, "teacher", "scenario1"),
)
psds_score_teacher_scenario2 = compute_psds_from_operating_points(
self.test_psds_buffer_teacher,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.1,
gtc_threshold=0.1,
cttc_threshold=0.3,
alpha_ct=0.5,
alpha_st=1,
save_dir=os.path.join(save_dir, "teacher", "scenario2"),
)
event_macro_student = log_sedeval_metrics(
self.decoded_student_05_buffer,
self.hparams["data"]["test_tsv"],
os.path.join(save_dir, "student"),
)[0]
event_macro_teacher = log_sedeval_metrics(
self.decoded_teacher_05_buffer,
self.hparams["data"]["test_tsv"],
os.path.join(save_dir, "teacher"),
)[0]
# synth dataset
intersection_f1_macro_student = compute_per_intersection_macro_f1(
{"0.5": self.decoded_student_05_buffer},
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
)
# synth dataset
intersection_f1_macro_teacher = compute_per_intersection_macro_f1(
{"0.5": self.decoded_teacher_05_buffer},
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
)
best_test_result = torch.tensor(max(psds_score_scenario1, psds_score_scenario2))
results = {
"hp_metric": best_test_result,
"test/student/psds_score_scenario1": psds_score_scenario1,
"test/student/psds_score_scenario2": psds_score_scenario2,
"test/teacher/psds_score_scenario1": psds_score_teacher_scenario1,
"test/teacher/psds_score_scenario2": psds_score_teacher_scenario2,
"test/student/event_f1_macro": event_macro_student,
"test/student/intersection_f1_macro": intersection_f1_macro_student,
"test/teacher/event_f1_macro": event_macro_teacher,
"test/teacher/intersection_f1_macro": intersection_f1_macro_teacher
}
if self.logger is not None:
self.logger.log_metrics(results)
self.logger.log_hyperparams(self.hparams, results)
for key in results.keys():
self.log(key, results[key], prog_bar=True, logger=False)
def configure_optimizers(self):
return [self.opt], [self.scheduler]
def train_dataloader(self):
self.train_loader = torch.utils.data.DataLoader(
self.train_data,
batch_sampler=self.train_sampler,
num_workers=self.num_workers,
)
return self.train_loader
def val_dataloader(self):
self.val_loader = torch.utils.data.DataLoader(
self.valid_data,
batch_size=self.hparams["training"]["batch_size_val"],
num_workers=self.num_workers,
shuffle=False,
drop_last=False,
)
return self.val_loader
def test_dataloader(self):
self.test_loader = torch.utils.data.DataLoader(
self.test_data,
batch_size=self.hparams["training"]["batch_size_val"],
num_workers=self.num_workers,
shuffle=False,
drop_last=False,
)
return self.test_loader
| 29,014 | 37.077428 | 118 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/recipes/dcase2021_task4_baseline/local/utils.py | import os
from pathlib import Path
import pandas as pd
import scipy
from desed_task.evaluation.evaluation_measures import compute_sed_eval_metrics
from torch import nn
import soundfile
import glob
class JSD(nn.Module):
def __init__(self):
super(JSD, self).__init__()
self.kld = nn.KLDivLoss().cuda()
def forward(self, p, q):
m = 0.5*(p+q)
return -0.5*(self.kld(p,m)+self.kld(q,m))
def batched_decode_preds(
strong_preds, filenames, encoder, thresholds=[0.5], median_filter=7, pad_indx=None,
):
""" Decode a batch of predictions to dataframes. Each threshold gives a different dataframe and stored in a
dictionary
Args:
strong_preds: torch.Tensor, batch of strong predictions.
filenames: list, the list of filenames of the current batch.
encoder: ManyHotEncoder object, object used to decode predictions.
thresholds: list, the list of thresholds to be used for predictions.
median_filter: int, the number of frames for which to apply median window (smoothing).
pad_indx: list, the list of indexes which have been used for padding.
Returns:
dict of predictions, each keys is a threshold and the value is the DataFrame of predictions.
"""
# Init a dataframe per threshold
prediction_dfs = {}
for threshold in thresholds:
prediction_dfs[threshold] = pd.DataFrame()
for j in range(strong_preds.shape[0]): # over batches
for c_th in thresholds:
c_preds = strong_preds[j]
if pad_indx is not None:
true_len = int(c_preds.shape[-1] * pad_indx[j].item())
c_preds = c_preds[:true_len]
pred = c_preds.transpose(0, 1).detach().cpu().numpy()
pred = pred > c_th
pred = scipy.ndimage.filters.median_filter(pred, (median_filter, 1))
pred = encoder.decode_strong(pred)
pred = pd.DataFrame(pred, columns=["event_label", "onset", "offset"])
pred["filename"] = Path(filenames[j]).stem + ".wav"
prediction_dfs[c_th] = prediction_dfs[c_th].append(pred, ignore_index=True)
return prediction_dfs
def convert_to_event_based(weak_dataframe):
""" Convert a weakly labeled DataFrame ('filename', 'event_labels') to a DataFrame strongly labeled
('filename', 'onset', 'offset', 'event_label').
Args:
weak_dataframe: pd.DataFrame, the dataframe to be converted.
Returns:
pd.DataFrame, the dataframe strongly labeled.
"""
new = []
for i, r in weak_dataframe.iterrows():
events = r["event_labels"].split(",")
for e in events:
new.append(
{"filename": r["filename"], "event_label": e, "onset": 0, "offset": 1}
)
return pd.DataFrame(new)
def log_sedeval_metrics(predictions, ground_truth, save_dir=None):
""" Return the set of metrics from sed_eval
Args:
predictions: pd.DataFrame, the dataframe of predictions.
ground_truth: pd.DataFrame, the dataframe of groundtruth.
save_dir: str, path to the folder where to save the event and segment based metrics outputs.
Returns:
tuple, event-based macro-F1 and micro-F1, segment-based macro-F1 and micro-F1
"""
if predictions.empty:
return 0.0, 0.0, 0.0, 0.0
gt = pd.read_csv(ground_truth, sep="\t")
event_res, segment_res = compute_sed_eval_metrics(predictions, gt)
if save_dir is not None:
os.makedirs(save_dir, exist_ok=True)
with open(os.path.join(save_dir, "event_f1.txt"), "w") as f:
f.write(str(event_res))
with open(os.path.join(save_dir, "segment_f1.txt"), "w") as f:
f.write(str(segment_res))
return (
event_res.results()["class_wise_average"]["f_measure"]["f_measure"],
event_res.results()["overall"]["f_measure"]["f_measure"],
segment_res.results()["class_wise_average"]["f_measure"]["f_measure"],
segment_res.results()["overall"]["f_measure"]["f_measure"],
) # return also segment measures
def parse_jams(jams_list, encoder, out_json):
if len(jams_list) == 0:
raise IndexError("jams list is empty ! Wrong path ?")
backgrounds = []
sources = []
for jamfile in jams_list:
with open(jamfile, "r") as f:
jdata = json.load(f)
# check if we have annotations for each source in scaper
assert len(jdata["annotations"][0]["data"]) == len(
jdata["annotations"][-1]["sandbox"]["scaper"]["isolated_events_audio_path"]
)
for indx, sound in enumerate(jdata["annotations"][0]["data"]):
source_name = Path(
jdata["annotations"][-1]["sandbox"]["scaper"][
"isolated_events_audio_path"
][indx]
).stem
source_file = os.path.join(
Path(jamfile).parent,
Path(jamfile).stem + "_events",
source_name + ".wav",
)
if sound["value"]["role"] == "background":
backgrounds.append(source_file)
else: # it is an event
if (
sound["value"]["label"] not in encoder.labels
): # correct different labels
if sound["value"]["label"].startswith("Frying"):
sound["value"]["label"] = "Frying"
elif sound["value"]["label"].startswith("Vacuum_cleaner"):
sound["value"]["label"] = "Vacuum_cleaner"
else:
raise NotImplementedError
sources.append(
{
"filename": source_file,
"onset": sound["value"]["event_time"],
"offset": sound["value"]["event_time"]
+ sound["value"]["event_duration"],
"event_label": sound["value"]["label"],
}
)
os.makedirs(Path(out_json).parent, exist_ok=True)
with open(out_json, "w") as f:
json.dump({"backgrounds": backgrounds, "sources": sources}, f, indent=4)
def generate_tsv_wav_durations(audio_dir, out_tsv):
"""
Generate a dataframe with filename and duration of the file
Args:
audio_dir: str, the path of the folder where audio files are (used by glob.glob)
out_tsv: str, the path of the output tsv file
Returns:
pd.DataFrame: the dataframe containing filenames and durations
"""
meta_list = []
for file in glob.glob(os.path.join(audio_dir, "*.wav")):
d = soundfile.info(file).duration
meta_list.append([os.path.basename(file), d])
meta_df = pd.DataFrame(meta_list, columns=["filename", "duration"])
if out_tsv is not None:
meta_df.to_csv(out_tsv, sep="\t", index=False, float_format="%.1f")
return meta_df
| 6,982 | 35.369792 | 111 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/recipes/dcase2021_task4_baseline/local/utilities.py | import numpy as np
import scipy.signal as sp
import wave, struct
import torch
import torch.nn as nn
from scipy.io import wavfile, loadmat
from torchaudio.functional import lfilter
from torchaudio.transforms import Spectrogram
class LinearSpectrogram(nn.Module):
def __init__(self, nCh=128, n_fft=2048, hop_length=256, win_fn=torch.hamming_window):
super(LinearSpectrogram, self).__init__()
self.spec = Spectrogram(n_fft=n_fft, hop_length=hop_length, window_fn=win_fn)
self.nCh = nCh
nbin = n_fft//2
fbin = nbin // nCh
linfilt = torch.zeros([nbin, nCh]).cuda()
for ch in range(nCh):
stridx = ch*fbin
endidx = ch*fbin+fbin
linfilt[stridx:endidx,ch] = 1.0
self.lfilter = linfilt
def forward(self, wavData):
specData = self.spec(wavData)
bs, nfrq, nfrm = specData.size(0), specData.size(1), specData.size(2)
specData = specData[:,1:,:]
out = torch.matmul(specData.permute(0,2,1),self.lfilter)
return out.permute(0,2,1)
class AuditorySpectrogram(nn.Module):
def __init__(self, frmRate=16, tc=8, fac=1, shft=0):
super(AuditorySpectrogram, self).__init__()
self.frmRate = frmRate
self.tc = tc
self.fac = fac
self.shft = shft
self.haircell_tc = 0.5
cochlear = loadmat('./aud24.mat')
cochba = torch.from_numpy(cochlear['COCHBA']).cuda()
L, M = cochba.shape
self.L = L
self.M = M
A = []
B = []
for ch in range(M-1,-1,-1):
p = torch.real(cochba[0, ch]).to(torch.long)
B.append(torch.real(cochba[1:p+2, ch]).to(torch.float))
A.append(torch.imag(cochba[1:p+2, ch]).to(torch.float))
self.A = A
self.B = B
self.nCh = len(A)
alpha = torch.exp(torch.tensor(-1/(tc*2**(4+shft)))).cuda()
beta = torch.exp(torch.tensor(-1/(self.haircell_tc*2**(4+shft)))).cuda()
self.alpha = alpha
self.L_frm = torch.tensor(frmRate*2**(4+shft)).cuda()
# hair-cell membrane
self.hair_a = torch.tensor([1, -beta]).cuda().to(torch.float)
self.hair_b = torch.tensor([1, 0]).cuda().to(torch.float)
# temporal integration
self.temp_a = torch.tensor([1, -alpha]).cuda().to(torch.float)
self.temp_b = torch.tensor([1,0]).cuda().to(torch.float)
def forward(self, wavData):
bs, wavLeng = wavData.size(0), wavData.size(1)
y1 = lfilter(wavData, self.A[0], self.B[0])
y2 = torch.sigmoid(y1*self.fac)
# hair cell membrane (low-pass <= 4kHz)
if not self.fac == -2:
y2 = lfilter(y2, self.hair_a, self.hair_b)
y2_h = y2
y3_h = 0
#####################################################
# All other channels
#####################################################
audData = []
for ch in range(self.nCh):
y1 = lfilter(wavData, self.A[ch], self.B[ch])
########################################
# TRANSDUCTION: hair cells
########################################
# Fluid cillia coupling (preemphasis) (ignored)
# ionic channels (sigmoid function)
y2 = torch.sigmoid(y1*self.fac)
# hair cell membrane (low-pass <= 4 kHz) ---> y2 (ignored for linear)
if not self.fac == -2:
y2 = lfilter(y2, self.hair_a, self.hair_b)
########################################
# REDUCTION: lateral inhibitory network
########################################
# masked by higher (frequency) spatial response
y3 = y2 - y2_h
y2_h = y2
# half-wave rectifier ---> y4
y4 = torch.maximum(torch.tensor(0).cuda(), y3)
# temporal integration window ---> y5
if self.alpha: # leaky integration
y5 = lfilter(y4, self.temp_a, self.temp_b)
audData.append(y5[:,0:-1:self.L_frm])
else: # short-term average
if L_frm == 1:
audData.append(y4)
else:
audData.append(torch.mean(torch.reshape(y4, [self.L_frm, self.N]), 0))
audData = torch.stack(audData,2)
return audData.permute(0,2,1)
def audioread(audioPath):
FS, wavData = wavfile.read(audioPath)
maxV = np.amax(abs(wavData))
wavData = wavData/maxV
return wavData, FS
def wav2aud(batchWave, frmLeng, tc, fac, shft):
nbatch = batchWave.shape[0]
# define parameters and load cochlear filter
cochlear = loadmat('./aud24.mat')
COCHBA = torch.from_numpy(cochlear['COCHBA']).cuda()
L, M = COCHBA.shape
haircell_tc= 0.5
alpha = torch.exp(torch.tensor(-1/(tc*2**(4+shft)))).cuda()
beta = torch.exp(torch.tensor(-1/(haircell_tc*2**(4+shft)))).cuda()
L_frm = torch.tensor(frmLeng*2**(4+shft)).cuda()
batchAud = []
for bter in range(nbatch):
wavData = batchWave[bter]
L_x = len(wavData)
N = torch.ceil(L_x/L_frm).to(torch.long).cuda()
buff = torch.zeros([N*L_frm]).cuda()
buff[:L_x] = wavData
wavData = buff
# initialize output
audData = torch.zeros([N, M-1]).cuda()
#####################################################
# Last channel (highest frequency)
#####################################################
p = torch.real(COCHBA[0, M-1]).to(torch.long)
B = torch.real(COCHBA[1:p+2, M-1]).to(torch.float)
A = torch.imag(COCHBA[1:p+2, M-1]).to(torch.float)
y1 = lfilter(wavData, A, B)
y2 = torch.sigmoid(y1*fac)
# hair cell membrane (low-pass <= 4kHz)
if not fac == -2:
b = torch.tensor([1, 0]).cuda().to(torch.float)
a = torch.tensor([1, -beta]).cuda().to(torch.float)
y2 = lfilter(y2, a, b)
y2_h = y2
y3_h = 0
#####################################################
# All other channels
#####################################################
for ch in range(M-2,-1,-1):
########################################
# ANALYSIS: cochlear filterbank
########################################
# (IIR) filter bank convolution ---> y1
p = torch.real(COCHBA[0, ch]).to(torch.long)
B = torch.real(COCHBA[1:p+2, ch]).to(torch.float)
A = torch.imag(COCHBA[1:p+2, ch]).to(torch.float)
y1 = lfilter(wavData, A, B)
########################################
# TRANSDUCTION: hair cells
########################################
# Fluid cillia coupling (preemphasis) (ignored)
# ionic channels (sigmoid function)
y2 = torch.sigmoid(y1*fac)
# hair cell membrane (low-pass <= 4 kHz) ---> y2 (ignored for linear)
if not fac == -2:
b = torch.tensor([1, 0]).cuda().to(torch.float)
a = torch.tensor([1, -beta]).cuda().to(torch.float)
y2 = lfilter(y2, a, b)
########################################
# REDUCTION: lateral inhibitory network
########################################
# masked by higher (frequency) spatial response
y3 = y2 - y2_h
y2_h = y2
# half-wave rectifier ---> y4
y4 = torch.maximum(torch.tensor(0).cuda(), y3)
# temporal integration window ---> y5
if alpha: # leaky integration
b = torch.tensor([1, 0]).cuda().to(torch.float)
a = torch.tensor([1, -alpha]).cuda().to(torch.float)
y5 = lfilter(y4, a, b)
audData[:, ch] = y5[0:-1:L_frm]
else: # short-term average
if L_frm == 1:
audData[:, ch] = y4
else:
audData[:, ch] = torch.mean(torch.reshape(y4, [L_frm, N]), 0)
batchAud.append(audData)
batchAud = torch.cat(batchAud, 0).permute(0,2,1)
return batchAud
def sigmoid(x, a):
x = np.exp(-x/a)
return 1/(1+x)
def DataNormalization(target, meanV=None, stdV=None):
nData, nDim = target.shape[0], target.shape[1]
output = np.zeros(shape=[nData, nDim], dtype=float)
if meanV is None:
meanV = np.mean(target, axis=0)
stdV = np.std(target, axis=0, ddof=1)
for dter in range(nData):
output[dter,:nDim] = (target[dter,:nDim]-meanV) / stdV
else:
for dter in range(nData):
output[dter,:nDim] = (target[dter,:nDim]-meanV) / stdV
return output, meanV, stdV
def DataRegularization(target):
nData, nSeq = target.shape[0], target.shape[1]
for dter in range(nData):
for ster in range(nSeq):
temp = target[dter, ster]
maxV = np.amax(temp)
minV = np.amin(temp)
reg_temp = 2*(temp-minV)/(maxV-minV)
target[dter, ster] = reg_temp - np.mean(reg_temp)
return target
def weights_init(m):
""" Initialize the weights of some layers of neural networks, here Conv2D, BatchNorm, GRU, Linear
Based on the work of Xavier Glorot
Args:
m: the model to initialize
"""
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
nn.init.xavier_uniform_(m.weight, gain=np.sqrt(2))
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('GRU') != -1:
for weight in m.parameters():
if len(weight.size()) > 1:
nn.init.orthogonal_(weight.data)
elif classname.find('Linear') != -1:
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def calc_error(samples, labels):
batch_size, nSnaps, nDim = list(samples.size())
_, _, nClass = list(labels.size())
samples = samples.permute(1,0,2)
labels = labels.permute(1,0,2).cpu().numpy()
cidx = np.where(labels[0])[1]
idx = np.arange(nSnaps)
idx = np.delete(idx, 0)
v0 = samples[0]
v1 = samples[idx]
v = v1 - v0
nVec, batch_size, nDim = list(v.size())
error = None
for iter in range(nVec):
idx = np.arange(nVec)
idx = np.roll(idx, iter)
v1_norm = torch.norm(v[idx[1]], dim=1)**2
v2_norm = torch.norm(v[idx[2]], dim=1)**2
v01_dot = torch.mul(v[idx[0]], v[idx[1]]).sum(1)
v02_dot = torch.mul(v[idx[0]], v[idx[2]]).sum(1)
alpha = torch.div(v01_dot, v1_norm)
beta = torch.div(v02_dot, v2_norm)
n_vec = v[idx[0]] - torch.mul(alpha[:,None],v[idx[1]]) - torch.mul(beta[:,None],v[idx[2]])
n_vec_norm = torch.norm(n_vec, dim=1).mean()
orthogonality = 0
for cter in range(nClass):
tidx = np.where(cidx==cter)[0]
ntidx = np.arange(batch_size)
ntidx = np.delete(ntidx, tidx)
vecs = v[idx[0]]
nvec = torch.norm(vecs, dim=1)
vecs = torch.div(vecs, nvec[:,None])
tvec = vecs[tidx]
ntvec = vecs[ntidx].permute(1,0)
inners = torch.matmul(tvec, ntvec)**2
orthogonality += inners.mean()
if error is None:
error = (n_vec_norm + orthogonality/nClass)
else:
error += (n_vec_norm + orthogonality/nClass)
return error
| 10,213 | 28.865497 | 101 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/recipes/dcase2021_task4_baseline/local/config.py | import logging
import math
import os
import pandas as pd
import numpy as np
nClass = 10
# Make class label
tlab = np.diag(np.ones(nClass),-1)[:,:-1]
bag = [tlab]
for iter in range(1,nClass):
temp = np.diag(np.ones(nClass)) + np.diag(np.ones(nClass),iter)[:nClass,:nClass]
bag.append(temp[:nClass-iter,:])
for iter in range(1,nClass):
for jter in range(1,nClass-iter):
temp = np.diag(np.ones(nClass)) + np.diag(np.ones(nClass),iter)[:nClass, :nClass] + np.diag(np.ones(nClass),iter+jter)[:nClass,:nClass]
bag.append(temp[:nClass-(iter+jter),:])
class_label = np.concatenate(bag,0)
nComs = class_label.shape[0]
#temp = []
#for iter in range(157):
# temp.append(np.reshape(class_label,(1,nComs,nClass)))
#class_label_ext = np.concatenate(temp,0)
| 779 | 26.857143 | 143 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/recipes/dcase2021_task4_baseline/local/classes_dict.py | """
we store here a dict where we define the encodings for all classes in DESED task.
"""
from collections import OrderedDict
classes_labels = OrderedDict(
{
"Alarm_bell_ringing": 0,
"Blender": 1,
"Cat": 2,
"Dishes": 3,
"Dog": 4,
"Electric_shaver_toothbrush": 5,
"Frying": 6,
"Running_water": 7,
"Speech": 8,
"Vacuum_cleaner": 9,
}
)
| 425 | 18.363636 | 81 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/recipes/dcase2021_task4_baseline/local/sed_trainer.py | import os
import random
from copy import deepcopy
from pathlib import Path
import pandas as pd
import pytorch_lightning as pl
import torch
from torchaudio.transforms import AmplitudeToDB, MelSpectrogram
from desed_task.data_augm import mixup
from desed_task.utils.scaler import TorchScaler
import numpy as np
from .utils import (
batched_decode_preds,
log_sedeval_metrics,
)
from desed_task.evaluation.evaluation_measures import (
compute_per_intersection_macro_f1,
compute_psds_from_operating_points,
)
class SEDTask4_2021(pl.LightningModule):
""" Pytorch lightning module for the SED 2021 baseline
Args:
hparams: dict, the dictionnary to be used for the current experiment/
encoder: ManyHotEncoder object, object to encode and decode labels.
sed_student: torch.Module, the student model to be trained. The teacher model will be
opt: torch.optimizer.Optimizer object, the optimizer to be used
train_data: torch.utils.data.Dataset subclass object, the training data to be used.
valid_data: torch.utils.data.Dataset subclass object, the validation data to be used.
test_data: torch.utils.data.Dataset subclass object, the test data to be used.
train_sampler: torch.utils.data.Sampler subclass object, the sampler to be used in the training dataloader.
scheduler: asteroid.engine.schedulers.BaseScheduler subclass object, the scheduler to be used. This is
used to apply ramp-up during training for example.
fast_dev_run: bool, whether to launch a run with only one batch for each set, this is for development purpose,
to test the code runs.
"""
def __init__(
self,
hparams,
encoder,
sed_student,
opt=None,
train_data=None,
valid_data=None,
test_data=None,
train_sampler=None,
scheduler=None,
fast_dev_run=False,
evaluation=False
):
super(SEDTask4_2021, self).__init__()
self.hparams = hparams
self.encoder = encoder
self.sed_student = sed_student
self.sed_teacher = deepcopy(sed_student)
self.opt = opt
self.train_data = train_data
self.valid_data = valid_data
self.test_data = test_data
self.train_sampler = train_sampler
self.scheduler = scheduler
self.fast_dev_run = fast_dev_run
self.evaluation = evaluation
if self.fast_dev_run:
self.num_workers = 1
else:
self.num_workers = self.hparams["training"]["num_workers"]
feat_params = self.hparams["feats"]
self.mel_spec = MelSpectrogram(
sample_rate=feat_params["sample_rate"],
n_fft=feat_params["n_window"],
win_length=feat_params["n_window"],
hop_length=feat_params["hop_length"],
f_min=feat_params["f_min"],
f_max=feat_params["f_max"],
n_mels=feat_params["n_mels"],
window_fn=torch.hamming_window,
wkwargs={"periodic": False},
power=1,
)
for param in self.sed_teacher.parameters():
param.detach_()
# instantiating losses
self.supervised_loss = torch.nn.BCELoss()
if hparams["training"]["self_sup_loss"] == "mse":
self.selfsup_loss = torch.nn.MSELoss()
elif hparams["training"]["self_sup_loss"] == "bce":
self.selfsup_loss = torch.nn.BCELoss()
else:
raise NotImplementedError
# for weak labels we simply compute f1 score
self.get_weak_student_f1_seg_macro = pl.metrics.classification.F1(
len(self.encoder.labels),
average="macro",
multilabel=True,
compute_on_step=False,
)
self.get_weak_teacher_f1_seg_macro = pl.metrics.classification.F1(
len(self.encoder.labels),
average="macro",
multilabel=True,
compute_on_step=False,
)
self.scaler = self._init_scaler()
# buffer for event based scores which we compute using sed-eval
self.val_buffer_student_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_teacher_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_student_test = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_teacher_test = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
test_n_thresholds = self.hparams["training"]["n_test_thresholds"]
test_thresholds = np.arange(
1 / (test_n_thresholds * 2), 1, 1 / test_n_thresholds
)
self.test_psds_buffer_student = {k: pd.DataFrame() for k in test_thresholds}
self.test_psds_buffer_teacher = {k: pd.DataFrame() for k in test_thresholds}
self.decoded_student_05_buffer = pd.DataFrame()
self.decoded_teacher_05_buffer = pd.DataFrame()
def update_ema(self, alpha, global_step, model, ema_model):
""" Update teacher model parameters
Args:
alpha: float, the factor to be used between each updated step.
global_step: int, the current global step to be used.
model: torch.Module, student model to use
ema_model: torch.Module, teacher model to use
"""
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_params, params in zip(ema_model.parameters(), model.parameters()):
ema_params.data.mul_(alpha).add_(params.data, alpha=1 - alpha)
def _init_scaler(self):
"""Scaler inizialization
Raises:
NotImplementedError: in case of not Implemented scaler
Returns:
TorchScaler: returns the scaler
"""
if self.hparams["scaler"]["statistic"] == "instance":
scaler = TorchScaler("instance", "minmax", self.hparams["scaler"]["dims"])
return scaler
elif self.hparams["scaler"]["statistic"] == "dataset":
# we fit the scaler
scaler = TorchScaler(
"dataset",
self.hparams["scaler"]["normtype"],
self.hparams["scaler"]["dims"],
)
else:
raise NotImplementedError
if self.hparams["scaler"]["savepath"] is not None:
if os.path.exists(self.hparams["scaler"]["savepath"]):
scaler = torch.load(self.hparams["scaler"]["savepath"])
print(
"Loaded Scaler from previous checkpoint from {}".format(
self.hparams["scaler"]["savepath"]
)
)
return scaler
self.train_loader = self.train_dataloader()
scaler.fit(
self.train_loader,
transform_func=lambda x: self.take_log(self.mel_spec(x[0])),
)
if self.hparams["scaler"]["savepath"] is not None:
torch.save(scaler, self.hparams["scaler"]["savepath"])
print(
"Saving Scaler from previous checkpoint at {}".format(
self.hparams["scaler"]["savepath"]
)
)
return scaler
def take_log(self, mels):
""" Apply the log transformation to mel spectrograms.
Args:
mels: torch.Tensor, mel spectrograms for which to apply log.
Returns:
Tensor: logarithmic mel spectrogram of the mel spectrogram given as input
"""
amp_to_db = AmplitudeToDB(stype="amplitude")
amp_to_db.amin = 1e-5 # amin= 1e-5 as in librosa
return amp_to_db(mels).clamp(min=-50, max=80) # clamp to reproduce old code
def training_step(self, batch, batch_indx):
""" Applying the training for one batch (a step). Used during trainer.fit
Args:
batch: torch.Tensor, batch input tensor
batch_indx: torch.Tensor, 1D tensor of indexes to know which data are present in each batch.
Returns:
torch.Tensor, the loss to take into account.
"""
audio, labels, padded_indxs = batch
indx_synth, indx_weak, indx_unlabelled = self.hparams["training"]["batch_size"]
features = self.mel_spec(audio)
batch_num = features.shape[0]
# deriving masks for each dataset
strong_mask = torch.zeros(batch_num).to(features).bool()
weak_mask = torch.zeros(batch_num).to(features).bool()
strong_mask[:indx_synth] = 1
weak_mask[indx_synth : indx_weak + indx_synth] = 1
# deriving weak labels
labels_weak = (torch.sum(labels[weak_mask], -1) > 0).float()
mixup_type = self.hparams["training"].get("mixup")
if mixup_type is not None and 0.5 > random.random():
features[weak_mask], labels_weak = mixup(
features[weak_mask], labels_weak, mixup_label_type=mixup_type
)
features[strong_mask], labels[strong_mask] = mixup(
features[strong_mask], labels[strong_mask], mixup_label_type=mixup_type
)
# sed student forward
strong_preds_student, weak_preds_student = self.sed_student(
self.scaler(self.take_log(features))
)
# supervised loss on strong labels
loss_strong = self.supervised_loss(
strong_preds_student[strong_mask], labels[strong_mask]
)
# supervised loss on weakly labelled
loss_weak = self.supervised_loss(weak_preds_student[weak_mask], labels_weak)
# total supervised loss
tot_loss_supervised = loss_strong + loss_weak
with torch.no_grad():
ema_features = self.scaler(self.take_log(features))
strong_preds_teacher, weak_preds_teacher = self.sed_teacher(ema_features)
loss_strong_teacher = self.supervised_loss(
strong_preds_teacher[strong_mask], labels[strong_mask]
)
loss_weak_teacher = self.supervised_loss(
weak_preds_teacher[weak_mask], labels_weak
)
# we apply consistency between the predictions, use the scheduler for learning rate (to be changed ?)
weight = (
self.hparams["training"]["const_max"]
* self.scheduler["scheduler"]._get_scaling_factor()
)
strong_self_sup_loss = self.selfsup_loss(
strong_preds_student, strong_preds_teacher.detach()
)
weak_self_sup_loss = self.selfsup_loss(
weak_preds_student, weak_preds_teacher.detach()
)
tot_self_loss = (strong_self_sup_loss + weak_self_sup_loss) * weight
tot_loss = tot_loss_supervised + tot_self_loss
self.log("train/student/loss_strong", loss_strong)
self.log("train/student/loss_weak", loss_weak)
self.log("train/teacher/loss_strong", loss_strong_teacher)
self.log("train/teacher/loss_weak", loss_weak_teacher)
self.log("train/step", self.scheduler["scheduler"].step_num, prog_bar=True)
self.log("train/student/tot_self_loss", tot_self_loss, prog_bar=True)
self.log("train/weight", weight)
self.log("train/student/tot_supervised", strong_self_sup_loss, prog_bar=True)
self.log("train/student/weak_self_sup_loss", weak_self_sup_loss)
self.log("train/student/strong_self_sup_loss", strong_self_sup_loss)
self.log("train/lr", self.opt.param_groups[-1]["lr"], prog_bar=True)
return tot_loss
def on_before_zero_grad(self, *args, **kwargs):
# update EMA teacher
self.update_ema(
self.hparams["training"]["ema_factor"],
self.scheduler["scheduler"].step_num,
self.sed_student,
self.sed_teacher,
)
def validation_step(self, batch, batch_indx):
""" Apply validation to a batch (step). Used during trainer.fit
Args:
batch: torch.Tensor, input batch tensor
batch_indx: torch.Tensor, 1D tensor of indexes to know which data are present in each batch.
Returns:
"""
audio, labels, padded_indxs, filenames = batch
# prediction for student
logmels = self.scaler(self.take_log(self.mel_spec(audio)))
strong_preds_student, weak_preds_student = self.sed_student(logmels)
# prediction for teacher
strong_preds_teacher, weak_preds_teacher = self.sed_teacher(logmels)
# we derive masks for each dataset based on folders of filenames
mask_weak = (
torch.tensor(
[
str(Path(x).parent)
== str(Path(self.hparams["data"]["weak_folder"]))
for x in filenames
]
)
.to(audio)
.bool()
)
mask_synth = (
torch.tensor(
[
str(Path(x).parent)
== str(Path(self.hparams["data"]["synth_val_folder"]))
for x in filenames
]
)
.to(audio)
.bool()
)
if torch.any(mask_weak):
labels_weak = (torch.sum(labels[mask_weak], -1) >= 1).float()
loss_weak_student = self.supervised_loss(
weak_preds_student[mask_weak], labels_weak
)
loss_weak_teacher = self.supervised_loss(
weak_preds_teacher[mask_weak], labels_weak
)
self.log("val/weak/student/loss_weak", loss_weak_student)
self.log("val/weak/teacher/loss_weak", loss_weak_teacher)
# accumulate f1 score for weak labels
self.get_weak_student_f1_seg_macro(
weak_preds_student[mask_weak], labels_weak
)
self.get_weak_teacher_f1_seg_macro(
weak_preds_teacher[mask_weak], labels_weak
)
if torch.any(mask_synth):
loss_strong_student = self.supervised_loss(
strong_preds_student[mask_synth], labels[mask_synth]
)
loss_strong_teacher = self.supervised_loss(
strong_preds_teacher[mask_synth], labels[mask_synth]
)
self.log("val/synth/student/loss_strong", loss_strong_student)
self.log("val/synth/teacher/loss_strong", loss_strong_teacher)
filenames_synth = [
x
for x in filenames
if Path(x).parent == Path(self.hparams["data"]["synth_val_folder"])
]
decoded_student_strong = batched_decode_preds(
strong_preds_student[mask_synth],
filenames_synth,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.val_buffer_student_synth.keys()),
)
for th in self.val_buffer_student_synth.keys():
self.val_buffer_student_synth[th] = self.val_buffer_student_synth[
th
].append(decoded_student_strong[th], ignore_index=True)
decoded_teacher_strong = batched_decode_preds(
strong_preds_teacher[mask_synth],
filenames_synth,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.val_buffer_teacher_synth.keys()),
)
for th in self.val_buffer_teacher_synth.keys():
self.val_buffer_teacher_synth[th] = self.val_buffer_teacher_synth[
th
].append(decoded_teacher_strong[th], ignore_index=True)
return
def validation_epoch_end(self, outputs):
""" Fonction applied at the end of all the validation steps of the epoch.
Args:
outputs: torch.Tensor, the concatenation of everything returned by validation_step.
Returns:
torch.Tensor, the objective metric to be used to choose the best model from for example.
"""
weak_student_f1_macro = self.get_weak_student_f1_seg_macro.compute()
weak_teacher_f1_macro = self.get_weak_teacher_f1_seg_macro.compute()
# synth dataset
intersection_f1_macro_student = compute_per_intersection_macro_f1(
self.val_buffer_student_synth,
self.hparams["data"]["synth_val_tsv"],
self.hparams["data"]["synth_val_dur"],
)
synth_student_event_macro = log_sedeval_metrics(
self.val_buffer_student_synth[0.5], self.hparams["data"]["synth_val_tsv"],
)[0]
intersection_f1_macro_teacher = compute_per_intersection_macro_f1(
self.val_buffer_teacher_synth,
self.hparams["data"]["synth_val_tsv"],
self.hparams["data"]["synth_val_dur"],
)
synth_teacher_event_macro = log_sedeval_metrics(
self.val_buffer_teacher_synth[0.5], self.hparams["data"]["synth_val_tsv"],
)[0]
obj_metric_synth_type = self.hparams["training"].get("obj_metric_synth_type")
if obj_metric_synth_type is None:
synth_metric = intersection_f1_macro_student
elif obj_metric_synth_type == "event":
synth_metric = synth_student_event_macro
elif obj_metric_synth_type == "intersection":
synth_metric = intersection_f1_macro_student
else:
raise NotImplementedError(
f"obj_metric_synth_type: {obj_metric_synth_type} not implemented."
)
obj_metric = torch.tensor(weak_student_f1_macro.item() + synth_metric)
self.log("val/obj_metric", obj_metric, prog_bar=True)
self.log("val/weak/student/macro_F1", weak_student_f1_macro)
self.log("val/weak/teacher/macro_F1", weak_teacher_f1_macro)
self.log(
"val/synth/student/intersection_f1_macro", intersection_f1_macro_student
)
self.log(
"val/synth/teacher/intersection_f1_macro", intersection_f1_macro_teacher
)
self.log("val/synth/student/event_f1_macro", synth_student_event_macro)
self.log("val/synth/teacher/event_f1_macro", synth_teacher_event_macro)
# free the buffers
self.val_buffer_student_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_teacher_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.get_weak_student_f1_seg_macro.reset()
self.get_weak_teacher_f1_seg_macro.reset()
return obj_metric
def on_save_checkpoint(self, checkpoint):
checkpoint["sed_student"] = self.sed_student.state_dict()
checkpoint["sed_teacher"] = self.sed_teacher.state_dict()
return checkpoint
def test_step(self, batch, batch_indx):
""" Apply Test to a batch (step), used only when (trainer.test is called)
Args:
batch: torch.Tensor, input batch tensor
batch_indx: torch.Tensor, 1D tensor of indexes to know which data are present in each batch.
Returns:
"""
audio, labels, padded_indxs, filenames = batch
# prediction for student
logmels = self.scaler(self.take_log(self.mel_spec(audio)))
strong_preds_student, weak_preds_student = self.sed_student(logmels)
# prediction for teacher
strong_preds_teacher, weak_preds_teacher = self.sed_teacher(logmels)
"""
bsz = len(filenames)
for bter in range(bsz):
pred_student = strong_preds_student[bter].cpu().numpy()
pred_teacher = strong_preds_teacher[bter].cpu().numpy()
path, filename = os.path.split(filenames[bter])
np.save('./Posterior/student/{}.npy'.format(filename), pred_student)
np.save('./Posterior/teacher/{}.npy'.format(filename), pred_teacher)
"""
if not self.evaluation:
loss_strong_student = self.supervised_loss(strong_preds_student, labels)
loss_strong_teacher = self.supervised_loss(strong_preds_teacher, labels)
self.log("test/student/loss_strong", loss_strong_student)
self.log("test/teacher/loss_strong", loss_strong_teacher)
# compute psds
decoded_student_strong = batched_decode_preds(
strong_preds_student,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.test_psds_buffer_student.keys()),
)
for th in self.test_psds_buffer_student.keys():
self.test_psds_buffer_student[th] = self.test_psds_buffer_student[
th
].append(decoded_student_strong[th], ignore_index=True)
decoded_teacher_strong = batched_decode_preds(
strong_preds_teacher,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.test_psds_buffer_teacher.keys()),
)
for th in self.test_psds_buffer_teacher.keys():
self.test_psds_buffer_teacher[th] = self.test_psds_buffer_teacher[
th
].append(decoded_teacher_strong[th], ignore_index=True)
# compute f1 score
decoded_student_strong = batched_decode_preds(
strong_preds_student,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=[0.5],
)
self.decoded_student_05_buffer = self.decoded_student_05_buffer.append(
decoded_student_strong[0.5]
)
decoded_teacher_strong = batched_decode_preds(
strong_preds_teacher,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=[0.5],
)
self.decoded_teacher_05_buffer = self.decoded_teacher_05_buffer.append(
decoded_teacher_strong[0.5]
)
def on_test_epoch_end(self):
# pub eval dataset
try:
log_dir = self.logger.log_dir
except Exception as e:
log_dir = self.hparams["log_dir"]
save_dir = os.path.join(log_dir, "metrics_test")
if self.evaluation:
# only save the predictions
save_dir_student = os.path.join(save_dir, "student")
os.makedirs(save_dir_student, exist_ok=True)
self.decoded_student_05_buffer.to_csv(
os.path.join(save_dir_student, f"predictions_05_student.tsv"),
sep="\t",
index=False
)
for k in self.test_psds_buffer_student.keys():
self.test_psds_buffer_student[k].to_csv(
os.path.join(save_dir_student, f"predictions_th_{k:.2f}.tsv"),
sep="\t",
index=False,
)
print(f"\nPredictions for student saved in: {save_dir_student}")
save_dir_teacher = os.path.join(save_dir, "teacher")
os.makedirs(save_dir_teacher, exist_ok=True)
self.decoded_teacher_05_buffer.to_csv(
os.path.join(save_dir_teacher, f"predictions_05_teacher.tsv"),
sep="\t",
index=False
)
for k in self.test_psds_buffer_student.keys():
self.test_psds_buffer_student[k].to_csv(
os.path.join(save_dir_teacher, f"predictions_th_{k:.2f}.tsv"),
sep="\t",
index=False,
)
print(f"\nPredictions for teacher saved in: {save_dir_teacher}")
else:
psds_score_scenario1 = compute_psds_from_operating_points(
self.test_psds_buffer_student,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.7,
gtc_threshold=0.7,
alpha_ct=0,
alpha_st=1,
save_dir=os.path.join(save_dir, "student", "scenario1"),
)
psds_score_scenario2 = compute_psds_from_operating_points(
self.test_psds_buffer_student,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.1,
gtc_threshold=0.1,
cttc_threshold=0.3,
alpha_ct=0.5,
alpha_st=1,
save_dir=os.path.join(save_dir, "student", "scenario2"),
)
psds_score_teacher_scenario1 = compute_psds_from_operating_points(
self.test_psds_buffer_teacher,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.7,
gtc_threshold=0.7,
alpha_ct=0,
alpha_st=1,
save_dir=os.path.join(save_dir, "teacher", "scenario1"),
)
psds_score_teacher_scenario2 = compute_psds_from_operating_points(
self.test_psds_buffer_teacher,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.1,
gtc_threshold=0.1,
cttc_threshold=0.3,
alpha_ct=0.5,
alpha_st=1,
save_dir=os.path.join(save_dir, "teacher", "scenario2"),
)
event_macro_student = log_sedeval_metrics(
self.decoded_student_05_buffer,
self.hparams["data"]["test_tsv"],
os.path.join(save_dir, "student"),
)[0]
event_macro_teacher = log_sedeval_metrics(
self.decoded_teacher_05_buffer,
self.hparams["data"]["test_tsv"],
os.path.join(save_dir, "teacher"),
)[0]
# synth dataset
intersection_f1_macro_student = compute_per_intersection_macro_f1(
{"0.5": self.decoded_student_05_buffer},
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
)
# synth dataset
intersection_f1_macro_teacher = compute_per_intersection_macro_f1(
{"0.5": self.decoded_teacher_05_buffer},
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
)
best_test_result = torch.tensor(max(psds_score_scenario1, psds_score_scenario2))
results = {
"hp_metric": best_test_result,
"test/student/psds_score_scenario1": psds_score_scenario1,
"test/student/psds_score_scenario2": psds_score_scenario2,
"test/teacher/psds_score_scenario1": psds_score_teacher_scenario1,
"test/teacher/psds_score_scenario2": psds_score_teacher_scenario2,
"test/student/event_f1_macro": event_macro_student,
"test/student/intersection_f1_macro": intersection_f1_macro_student,
"test/teacher/event_f1_macro": event_macro_teacher,
"test/teacher/intersection_f1_macro": intersection_f1_macro_teacher
}
if self.logger is not None:
self.logger.log_metrics(results)
self.logger.log_hyperparams(self.hparams, results)
for key in results.keys():
self.log(key, results[key], prog_bar=True, logger=False)
def configure_optimizers(self):
return [self.opt], [self.scheduler]
def train_dataloader(self):
self.train_loader = torch.utils.data.DataLoader(
self.train_data,
batch_sampler=self.train_sampler,
num_workers=self.num_workers,
)
return self.train_loader
def val_dataloader(self):
self.val_loader = torch.utils.data.DataLoader(
self.valid_data,
batch_size=self.hparams["training"]["batch_size_val"],
num_workers=self.num_workers,
shuffle=False,
drop_last=False,
)
return self.val_loader
def test_dataloader(self):
self.test_loader = torch.utils.data.DataLoader(
self.test_data,
batch_size=self.hparams["training"]["batch_size_val"],
num_workers=self.num_workers,
shuffle=False,
drop_last=False,
)
return self.test_loader
| 29,083 | 37.675532 | 118 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/recipes/dcase2021_task4_baseline/local/sed_trainer_CRST.py | import os
import random
from copy import deepcopy
from pathlib import Path
import local.config as cfg
import pandas as pd
import pytorch_lightning as pl
import torch
from torchaudio.transforms import AmplitudeToDB, MelSpectrogram
from desed_task.data_augm import mixup, frame_shift, add_noise, temporal_reverse
from desed_task.utils.scaler import TorchScaler
import numpy as np
from .utils import (
batched_decode_preds,
log_sedeval_metrics,
JSD,
)
from desed_task.evaluation.evaluation_measures import (
compute_per_intersection_macro_f1,
compute_psds_from_operating_points,
)
class SEDTask4_2021(pl.LightningModule):
""" Pytorch lightning module for the SED 2021 baseline
Args:
hparams: dict, the dictionnary to be used for the current experiment/
encoder: ManyHotEncoder object, object to encode and decode labels.
sed_student: torch.Module, the student model to be trained. The teacher model will be
opt: torch.optimizer.Optimizer object, the optimizer to be used
train_data: torch.utils.data.Dataset subclass object, the training data to be used.
valid_data: torch.utils.data.Dataset subclass object, the validation data to be used.
test_data: torch.utils.data.Dataset subclass object, the test data to be used.
train_sampler: torch.utils.data.Sampler subclass object, the sampler to be used in the training dataloader.
scheduler: asteroid.engine.schedulers.BaseScheduler subclass object, the scheduler to be used. This is
used to apply ramp-up during training for example.
fast_dev_run: bool, whether to launch a run with only one batch for each set, this is for development purpose,
to test the code runs.
"""
def __init__(
self,
hparams,
encoder,
sed_student,
opt=None,
train_data=None,
valid_data=None,
test_data=None,
train_sampler=None,
scheduler=None,
fast_dev_run=False,
evaluation=False
):
super(SEDTask4_2021, self).__init__()
self.hparams = hparams
# manual optimization
self.automatic_optimization = False
self.encoder = encoder
self.sed_student1 = sed_student[0]
self.sed_teacher1 = deepcopy(sed_student[0])
self.sed_student2 = sed_student[1]
self.sed_teacher2 = deepcopy(sed_student[1])
self.opt1 = opt[0]
self.opt2 = opt[1]
self.train_data = train_data
self.valid_data = valid_data
self.test_data = test_data
self.train_sampler = train_sampler
self.scheduler1 = scheduler[0]
self.scheduler2 = scheduler[1]
self.fast_dev_run = fast_dev_run
self.evaluation = evaluation
if self.fast_dev_run:
self.num_workers = 1
else:
self.num_workers = self.hparams["training"]["num_workers"]
# add class_label
self.softmax = torch.nn.Softmax(dim=2)
self.jsd = JSD()
self.class_label = torch.tensor(cfg.class_label).cuda()
feat_params = self.hparams["feats"]
#self.lin_spec = LinearSpectrogram(nCh=128, n_fft=2048, hop_length=256, win_fn = torch.hamming_window)
self.mel_spec = MelSpectrogram(
sample_rate=feat_params["sample_rate"],
n_fft=feat_params["n_window"],
win_length=feat_params["n_window"],
hop_length=feat_params["hop_length"],
f_min=feat_params["f_min"],
f_max=feat_params["f_max"],
n_mels=feat_params["n_mels"],
window_fn=torch.hamming_window,
wkwargs={"periodic": False},
power=1,
)
for param in self.sed_teacher1.parameters():
param.detach_()
for param in self.sed_teacher2.parameters():
param.detach_()
# instantiating losses
self.supervised_loss = torch.nn.BCELoss()
if hparams["training"]["self_sup_loss"] == "mse":
self.selfsup_loss = torch.nn.MSELoss()
elif hparams["training"]["self_sup_loss"] == "bce":
self.selfsup_loss = torch.nn.BCELoss()
else:
raise NotImplementedError
# for weak labels we simply compute f1 score
self.get_weak_student_f1_seg_macro = pl.metrics.classification.F1(
len(self.encoder.labels),
average="macro",
multilabel=True,
compute_on_step=False,
)
self.get_weak_teacher_f1_seg_macro = pl.metrics.classification.F1(
len(self.encoder.labels),
average="macro",
multilabel=True,
compute_on_step=False,
)
self.scaler = self._init_scaler()
# buffer for event based scores which we compute using sed-eval
self.val_buffer_student_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_teacher_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_student_test = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_teacher_test = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
test_n_thresholds = self.hparams["training"]["n_test_thresholds"]
test_thresholds = np.arange(
1 / (test_n_thresholds * 2), 1, 1 / test_n_thresholds
)
self.test_psds_buffer_student1 = {k: pd.DataFrame() for k in test_thresholds}
self.test_psds_buffer_teacher1 = {k: pd.DataFrame() for k in test_thresholds}
self.test_psds_buffer_student2 = {k: pd.DataFrame() for k in test_thresholds}
self.test_psds_buffer_teacher2 = {k: pd.DataFrame() for k in test_thresholds}
self.test_psds_buffer_student = {k: pd.DataFrame() for k in test_thresholds}
self.test_psds_buffer_teacher = {k: pd.DataFrame() for k in test_thresholds}
self.decoded_student1_05_buffer = pd.DataFrame()
self.decoded_teacher1_05_buffer = pd.DataFrame()
self.decoded_student2_05_buffer = pd.DataFrame()
self.decoded_teacher2_05_buffer = pd.DataFrame()
self.decoded_student_05_buffer = pd.DataFrame()
self.decoded_teacher_05_buffer = pd.DataFrame()
def update_ema(self, alpha, global_step, model, ema_model):
""" Update teacher model parameters
Args:
alpha: float, the factor to be used between each updated step.
global_step: int, the current global step to be used.
model: torch.Module, student model to use
ema_model: torch.Module, teacher model to use
"""
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_params, params in zip(ema_model.parameters(), model.parameters()):
ema_params.data.mul_(alpha).add_(params.data, alpha=1 - alpha)
def _init_scaler(self):
"""Scaler inizialization
Raises:
NotImplementedError: in case of not Implemented scaler
Returns:
TorchScaler: returns the scaler
"""
if self.hparams["scaler"]["statistic"] == "instance":
scaler = TorchScaler("instance", "minmax", self.hparams["scaler"]["dims"])
return scaler
elif self.hparams["scaler"]["statistic"] == "dataset":
# we fit the scaler
scaler = TorchScaler(
"dataset",
self.hparams["scaler"]["normtype"],
self.hparams["scaler"]["dims"],
)
else:
raise NotImplementedError
if self.hparams["scaler"]["savepath"] is not None:
if os.path.exists(self.hparams["scaler"]["savepath"]):
scaler = torch.load(self.hparams["scaler"]["savepath"])
print(
"Loaded Scaler from previous checkpoint from {}".format(
self.hparams["scaler"]["savepath"]
)
)
return scaler
self.train_loader = self.train_dataloader()
scaler.fit(
self.train_loader,
transform_func=lambda x: self.take_log(self.mel_spec(x[0])),
)
if self.hparams["scaler"]["savepath"] is not None:
torch.save(scaler, self.hparams["scaler"]["savepath"])
print(
"Saving Scaler from previous checkpoint at {}".format(
self.hparams["scaler"]["savepath"]
)
)
return scaler
def take_log(self, mels):
""" Apply the log transformation to mel spectrograms.
Args:
mels: torch.Tensor, mel spectrograms for which to apply log.
Returns:
Tensor: logarithmic mel spectrogram of the mel spectrogram given as input
"""
amp_to_db = AmplitudeToDB(stype="amplitude")
amp_to_db.amin = 1e-5 # amin= 1e-5 as in librosa
return amp_to_db(mels).clamp(min=-50, max=80) # clamp to reproduce old code
def training_step(self, batch, batch_indx, optimizer_idx):
""" Applying the training for one batch (a step). Used during trainer.fit
Args:
batch: torch.Tensor, batch input tensor
batch_indx: torch.Tensor, 1D tensor of indexes to know which data are present in each batch.
Returns:
torch.Tensor, the loss to take into account.
"""
audio, labels, padded_indxs = batch
indx_synth, indx_weak, indx_unlabelled = self.hparams["training"]["batch_size"]
features = self.mel_spec(audio)
batch_num = features.shape[0]
# deriving masks for each dataset
strong_mask = torch.zeros(batch_num).to(features).bool()
weak_mask = torch.zeros(batch_num).to(features).bool()
strong_mask[:indx_synth] = 1
weak_mask[indx_synth : indx_weak + indx_synth] = 1
# deriving weak labels
labels_weak = (torch.sum(labels[weak_mask], -1) > 0).float()
mixup_type = self.hparams["training"].get("mixup")
if mixup_type is not None and 0.5 > random.random():
features[weak_mask], labels_weak = mixup(
features[weak_mask], labels_weak, mixup_label_type=mixup_type
)
features[strong_mask], labels[strong_mask] = mixup(
features[strong_mask], labels[strong_mask], mixup_label_type=mixup_type
)
# perturbation
ori_features = self.scaler(self.take_log(features))
ema_features = ori_features.clone().detach()
ema_labels = labels.clone().detach()
ema_features, ema_labels = frame_shift(ema_features, ema_labels)
ema_labels_weak = (torch.sum(ema_labels[weak_mask], -1) > 0).float()
# sed students forward
strong_preds_student1, weak_preds_student1 = self.sed_student1(ori_features)
strong_preds_student2, weak_preds_student2 = self.sed_student2(ema_features)
# supervised loss on strong labels
loss_strong1 = self.supervised_loss(
strong_preds_student1[strong_mask], labels[strong_mask]
)
loss_strong2 = self.supervised_loss(
strong_preds_student2[strong_mask], ema_labels[strong_mask]
)
# supervised loss on weakly labelled
loss_weak1 = self.supervised_loss(weak_preds_student1[weak_mask], labels_weak)
loss_weak2 = self.supervised_loss(weak_preds_student2[weak_mask], ema_labels_weak)
# total supervised loss
tot_loss_supervised1 = loss_strong1 + loss_weak1
tot_loss_supervised2 = loss_strong2 + loss_weak2
with torch.no_grad():
strong_preds_teacher1, weak_preds_teacher1 = self.sed_teacher1(ema_features)
strong_preds_teacher2, weak_preds_teacher2 = self.sed_teacher2(ori_features)
nClass = self.hparams['net']['nclass']
sp1 = torch.clamp(strong_preds_teacher1, 1.0e-4, 1-1.0e-4)
p1_h1 = torch.log(sp1.permute(0,2,1))
p1_h0 = torch.log(1-sp1.permute(0,2,1))
sp2 = torch.clamp(strong_preds_teacher2, 1.0e-4, 1-1.0e-4)
p2_h1 = torch.log(sp2.permute(0,2,1))
p2_h0 = torch.log(1-sp2.permute(0,2,1))
p_h0 = torch.cat((p1_h0, p2_h0), 1)
p_h1 = torch.cat((p1_h1, p2_h1), 1)
# K = 0
P0 = p_h0.sum(2)
# K = 1
P1 = P0[:,:,None] + p_h1 - p_h0
#P = torch.cat([P0.reshape(157,1), P1], 1)
# K = 2
P2 = []
for cter in range(1,nClass):
P2.append(P1[:,:,:-cter]+P1[:,:,cter:])
P2 = torch.cat(P2, 2)
P2 = P2 - P0[:,:,None]
#P = torch.cat([P0.reshape(156*2,1), P1, P2], 1)
# K: up to 3
P3 = []
for cter1 in range(1,nClass):
for cter2 in range(1,nClass-cter1):
P3.append(P1[:,:,:-(cter1+cter2)]+P1[:,:,cter1:-cter2]+P1[:,:,(cter1+cter2):])
P3 = torch.cat(P3,2)
P3 = P3 - 2*P0[:,:,None]
P = torch.cat([P0.reshape(batch_num,156*2,1), P1, P2, P3], 2)
P = self.softmax(P)
prob_v, prob_i = torch.sort(P, dim=2, descending=True)
# 5 best potential labels
norm_p = prob_v[:,:,:].sum(2)
prob_v = prob_v[:,:,:]/norm_p[:,:,None]
cl = self.class_label[prob_i[:,:,:].tolist(),:]
# picking up the best label
cl = torch.mul(cl, prob_v[:,:,:,None]).sum(2)
est_strong_target1 = torch.squeeze(cl[:,:156,:]).float()
est_strong_target2 = torch.squeeze(cl[:,156:,:]).float()
est_strong_target1 = est_strong_target1.permute((0,2,1)) # for ema_feature
est_strong_target2 = est_strong_target2.permute((0,2,1)) # for ori_feature
est_weak_target1 = est_strong_target1.mean(2)
est_weak_target2 = est_strong_target2.mean(2)
loss_strong_teacher1 = self.supervised_loss(
strong_preds_teacher1[strong_mask], ema_labels[strong_mask]
)
loss_strong_teacher2 = self.supervised_loss(
strong_preds_teacher2[strong_mask], labels[strong_mask]
)
loss_weak_teacher1 = self.supervised_loss(
weak_preds_teacher1[weak_mask], ema_labels_weak
)
loss_weak_teacher2 = self.supervised_loss(
weak_preds_teacher2[weak_mask], labels_weak
)
# we apply consistency between the predictions, use the scheduler for learning rate (to be changed ?)
weight1 = (
self.hparams["training"]["const_max"]
* self.scheduler1["scheduler"]._get_scaling_factor()
)
weight2 = (
self.hparams["training"]["const_max"]
* self.scheduler2["scheduler"]._get_scaling_factor()
)
strong_reliability1 = weight1*(1-self.jsd(est_strong_target1[strong_mask], ema_labels[strong_mask]))
strong_reliability2 = weight2*(1-self.jsd(est_strong_target2[strong_mask], labels[strong_mask]))
weak_reliability1 = weight1*(1-self.jsd(est_weak_target1[weak_mask], ema_labels_weak))
weak_reliability2 = weight2*(1-self.jsd(est_weak_target2[weak_mask], labels_weak))
strong_self_sup_loss1 = self.selfsup_loss(
strong_preds_student1[24:], est_strong_target2[24:] # for ori_feature
)
strong_self_sup_loss2 = self.selfsup_loss(
strong_preds_student2[24:], est_strong_target1[24:] # for ema_feature
)
weak_self_sup_loss1 = self.selfsup_loss(
weak_preds_student1[weak_mask], est_weak_target2[weak_mask]
)
weak_self_sup_loss2 = self.selfsup_loss(
weak_preds_student2[weak_mask], est_weak_target1[weak_mask]
)
tot_self_loss1 = strong_reliability2*strong_self_sup_loss1 + weak_reliability2*weak_self_sup_loss1
tot_self_loss2 = strong_reliability1*strong_self_sup_loss2 + weak_reliability1*weak_self_sup_loss2
tot_loss1 = tot_loss_supervised1 + tot_self_loss1
tot_loss2 = tot_loss_supervised2 + tot_self_loss2
#self.log("train/student/loss_strong1", loss_strong1)
#self.log("train/student/loss_weak1", loss_weak1)
#self.log("train/student/loss_strong2", loss_strong2)
#self.log("train/student/loss_weak2", loss_weak2)
#self.log("train/teacher/loss_strong1", loss_strong_teacher1)
#self.log("train/teacher/loss_weak1", loss_weak_teacher1)
#self.log("train/teacher/loss_strong2", loss_strong_teacher2)
#self.log("train/teacher/loss_weak2", loss_weak_teacher2)
self.log("train/step1", self.scheduler1["scheduler"].step_num, prog_bar=True)
self.log("train/step2", self.scheduler2["scheduler"].step_num, prog_bar=True)
self.log("train/student/tot_loss1", tot_loss1, prog_bar=True)
self.log("train/student/tot_loss2", tot_loss2, prog_bar=True)
self.log("train/strong_reliability1", strong_reliability1, prog_bar=True)
self.log("train/strong_reliability2", strong_reliability2, prog_bar=True)
#self.log("train/student/tot_self_loss1", tot_self_loss1, prog_bar=True)
#self.log("train/student/weak_self_sup_loss1", weak_self_sup_loss1)
#self.log("train/student/strong_self_sup_loss1", strong_self_sup_loss1)
#self.log("train/student/tot_self_loss2", tot_self_loss2, prog_bar=True)
#self.log("train/student/weak_self_sup_loss2", weak_self_sup_loss2)
#self.log("train/student/strong_self_sup_loss2", strong_self_sup_loss2)
self.log("train/lr1", self.opt1.param_groups[-1]["lr"], prog_bar=True)
self.log("train/lr2", self.opt2.param_groups[-1]["lr"], prog_bar=True)
# update EMA teacher
self.update_ema(
self.hparams["training"]["ema_factor"],
self.scheduler1["scheduler"].step_num,
self.sed_student1,
self.sed_teacher1,
)
self.update_ema(
self.hparams["training"]["ema_factor"],
self.scheduler2["scheduler"].step_num,
self.sed_student2,
self.sed_teacher2,
)
# training Model I
self.opt1.zero_grad()
self.manual_backward(tot_loss1, self.opt1)
self.opt1.step()
# training Model II
self.opt2.zero_grad()
self.manual_backward(tot_loss2, self.opt2)
self.opt2.step()
return {'tot_loss1': tot_loss1, 'tot_loss2': tot_loss2}
def validation_step(self, batch, batch_indx):
""" Apply validation to a batch (step). Used during trainer.fit
Args:
batch: torch.Tensor, input batch tensor
batch_indx: torch.Tensor, 1D tensor of indexes to know which data are present in each batch.
Returns:
"""
audio, labels, padded_indxs, filenames = batch
features = self.mel_spec(audio)
#features2 = self.lin_spec(audio)
#features = torch.cat([features1, features2], 1)
logmels = self.scaler(self.take_log(features))
# prediction for strudent
strong_preds_student1, weak_preds_student1 = self.sed_student1(logmels)
strong_preds_student2, weak_preds_student2 = self.sed_student2(logmels)
strong_preds_student = (strong_preds_student1 + strong_preds_student2)/2
weak_preds_student = (weak_preds_student1 + weak_preds_student2)/2
# prediction for teacher
strong_preds_teacher1, weak_preds_teacher1 = self.sed_teacher1(logmels)
strong_preds_teacher2, weak_preds_teacher2 = self.sed_teacher2(logmels)
strong_preds_teacher = (strong_preds_teacher1 + strong_preds_teacher2)/2
weak_preds_teacher = (weak_preds_teacher1 + weak_preds_teacher2)/2
# we derive masks for each dataset based on folders of filenames
mask_weak = (
torch.tensor(
[
str(Path(x).parent)
== str(Path(self.hparams["data"]["weak_folder"]))
for x in filenames
]
)
.to(audio)
.bool()
)
mask_synth = (
torch.tensor(
[
str(Path(x).parent)
== str(Path(self.hparams["data"]["synth_val_folder"]))
for x in filenames
]
)
.to(audio)
.bool()
)
if torch.any(mask_weak):
labels_weak = (torch.sum(labels[mask_weak], -1) >= 1).float()
loss_weak_student = self.supervised_loss(
weak_preds_student[mask_weak], labels_weak
)
loss_weak_teacher = self.supervised_loss(
weak_preds_teacher[mask_weak], labels_weak
)
self.log("val/weak/student/loss_weak", loss_weak_student)
self.log("val/weak/teacher/loss_weak", loss_weak_teacher)
# accumulate f1 score for weak labels
self.get_weak_student_f1_seg_macro(
weak_preds_student[mask_weak], labels_weak
)
self.get_weak_teacher_f1_seg_macro(
weak_preds_teacher[mask_weak], labels_weak
)
if torch.any(mask_synth):
loss_strong_student = self.supervised_loss(
strong_preds_student[mask_synth], labels[mask_synth]
)
loss_strong_teacher = self.supervised_loss(
strong_preds_teacher[mask_synth], labels[mask_synth]
)
self.log("val/synth/student/loss_strong", loss_strong_student)
self.log("val/synth/teacher/loss_strong", loss_strong_teacher)
filenames_synth = [
x
for x in filenames
if Path(x).parent == Path(self.hparams["data"]["synth_val_folder"])
]
decoded_student_strong = batched_decode_preds(
strong_preds_student[mask_synth],
filenames_synth,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.val_buffer_student_synth.keys()),
)
for th in self.val_buffer_student_synth.keys():
self.val_buffer_student_synth[th] = self.val_buffer_student_synth[
th
].append(decoded_student_strong[th], ignore_index=True)
decoded_teacher_strong = batched_decode_preds(
strong_preds_teacher[mask_synth],
filenames_synth,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.val_buffer_teacher_synth.keys()),
)
for th in self.val_buffer_teacher_synth.keys():
self.val_buffer_teacher_synth[th] = self.val_buffer_teacher_synth[
th
].append(decoded_teacher_strong[th], ignore_index=True)
return
def validation_epoch_end(self, outputs):
""" Fonction applied at the end of all the validation steps of the epoch.
Args:
outputs: torch.Tensor, the concatenation of everything returned by validation_step.
Returns:
torch.Tensor, the objective metric to be used to choose the best model from for example.
"""
weak_student_f1_macro = self.get_weak_student_f1_seg_macro.compute()
weak_teacher_f1_macro = self.get_weak_teacher_f1_seg_macro.compute()
# synth dataset
intersection_f1_macro_student = compute_per_intersection_macro_f1(
self.val_buffer_student_synth,
self.hparams["data"]["synth_val_tsv"],
self.hparams["data"]["synth_val_dur"],
)
synth_student_event_macro = log_sedeval_metrics(
self.val_buffer_student_synth[0.5], self.hparams["data"]["synth_val_tsv"],
)[0]
intersection_f1_macro_teacher = compute_per_intersection_macro_f1(
self.val_buffer_teacher_synth,
self.hparams["data"]["synth_val_tsv"],
self.hparams["data"]["synth_val_dur"],
)
synth_teacher_event_macro = log_sedeval_metrics(
self.val_buffer_teacher_synth[0.5], self.hparams["data"]["synth_val_tsv"],
)[0]
obj_metric_synth_type = self.hparams["training"].get("obj_metric_synth_type")
if obj_metric_synth_type is None:
synth_metric = intersection_f1_macro_student
elif obj_metric_synth_type == "event":
synth_metric = synth_student_event_macro
elif obj_metric_synth_type == "intersection":
synth_metric = intersection_f1_macro_student
else:
raise NotImplementedError(
f"obj_metric_synth_type: {obj_metric_synth_type} not implemented."
)
obj_metric = torch.tensor(weak_student_f1_macro.item() + synth_metric)
self.log("val/obj_metric", obj_metric, prog_bar=True)
self.log("val/weak/student/macro_F1", weak_student_f1_macro)
self.log("val/weak/teacher/macro_F1", weak_teacher_f1_macro)
self.log(
"val/synth/student/intersection_f1_macro", intersection_f1_macro_student
)
self.log(
"val/synth/teacher/intersection_f1_macro", intersection_f1_macro_teacher
)
self.log("val/synth/student/event_f1_macro", synth_student_event_macro)
self.log("val/synth/teacher/event_f1_macro", synth_teacher_event_macro)
# free the buffers
self.val_buffer_student_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_teacher_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.get_weak_student_f1_seg_macro.reset()
self.get_weak_teacher_f1_seg_macro.reset()
return obj_metric
def on_save_checkpoint(self, checkpoint):
checkpoint["sed_student1"] = self.sed_student1.state_dict()
checkpoint["sed_teacher1"] = self.sed_teacher1.state_dict()
checkpoint["sed_student2"] = self.sed_student2.state_dict()
checkpoint["sed_teacher2"] = self.sed_teacher2.state_dict()
return checkpoint
def test_step(self, batch, batch_indx):
""" Apply Test to a batch (step), used only when (trainer.test is called)
Args:
batch: torch.Tensor, input batch tensor
batch_indx: torch.Tensor, 1D tensor of indexes to know which data are present in each batch.
Returns:
"""
audio, labels, padded_indxs, filenames = batch
features = self.mel_spec(audio)
#features2 = self.lin_spec(audio)
#features = torch.cat([features1, features2], 1)
# prediction for student
logmels = self.scaler(self.take_log(features))
strong_preds_student1, weak_preds_student1 = self.sed_student1(logmels)
strong_preds_student2, weak_preds_student2 = self.sed_student2(logmels)
strong_preds_student = (strong_preds_student1 + strong_preds_student2)/2
weak_preds_student = (weak_preds_student1 + weak_preds_student2)/2
# prediction for teacher
strong_preds_teacher1, weak_preds_teacher1 = self.sed_teacher1(logmels)
strong_preds_teacher2, weak_preds_teacher2 = self.sed_teacher2(logmels)
strong_preds_teacher = (strong_preds_teacher1 + strong_preds_teacher2)/2
weak_preds_teacher = (weak_preds_teacher1 + weak_preds_teacher2)/2
bsz = len(filenames)
for bter in range(bsz):
path, filename = os.path.split(filenames[bter])
pred_student = strong_preds_student[bter].cpu().numpy()
pred_teacher = strong_preds_teacher[bter].cpu().numpy()
np.save('./Posterior/student/{}.npy'.format(filename), pred_student)
np.save('./Posterior/teacher/{}.npy'.format(filename), pred_teacher)
if not self.evaluation:
loss_strong_student1 = self.supervised_loss(strong_preds_student1, labels)
loss_strong_student2 = self.supervised_loss(strong_preds_student2, labels)
loss_strong_student = self.supervised_loss(strong_preds_student, labels)
loss_strong_teacher1 = self.supervised_loss(strong_preds_teacher1, labels)
loss_strong_teacher2 = self.supervised_loss(strong_preds_teacher2, labels)
loss_strong_teacher = self.supervised_loss(strong_preds_teacher, labels)
# self.log("test/student1/loss_strong", loss_strong_student1)
# self.log("test/student2/loss_strong", loss_strong_student2)
self.log("test/student/loss_strong", loss_strong_student)
# self.log("test/teacher1/loss_strong", loss_strong_teacher1)
# self.log("test/teacher2/loss_strong", loss_strong_teacher2)
self.log("test/teacher/loss_strong", loss_strong_teacher)
# compute psds
decoded_student1_strong = batched_decode_preds(
strong_preds_student1,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.test_psds_buffer_student1.keys()),
)
for th in self.test_psds_buffer_student1.keys():
self.test_psds_buffer_student1[th] = self.test_psds_buffer_student1[
th
].append(decoded_student1_strong[th], ignore_index=True)
decoded_student2_strong = batched_decode_preds(
strong_preds_student2,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.test_psds_buffer_student2.keys()),
)
for th in self.test_psds_buffer_student2.keys():
self.test_psds_buffer_student2[th] = self.test_psds_buffer_student2[
th
].append(decoded_student2_strong[th], ignore_index=True)
decoded_student_strong = batched_decode_preds(
strong_preds_student,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.test_psds_buffer_student.keys()),
)
for th in self.test_psds_buffer_student.keys():
self.test_psds_buffer_student[th] = self.test_psds_buffer_student[
th
].append(decoded_student_strong[th], ignore_index=True)
decoded_teacher1_strong = batched_decode_preds(
strong_preds_teacher1,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.test_psds_buffer_teacher1.keys()),
)
for th in self.test_psds_buffer_teacher1.keys():
self.test_psds_buffer_teacher1[th] = self.test_psds_buffer_teacher1[
th
].append(decoded_teacher1_strong[th], ignore_index=True)
decoded_teacher2_strong = batched_decode_preds(
strong_preds_teacher2,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.test_psds_buffer_teacher2.keys()),
)
for th in self.test_psds_buffer_teacher2.keys():
self.test_psds_buffer_teacher2[th] = self.test_psds_buffer_teacher2[
th
].append(decoded_teacher2_strong[th], ignore_index=True)
decoded_teacher_strong = batched_decode_preds(
strong_preds_teacher,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.test_psds_buffer_teacher.keys()),
)
for th in self.test_psds_buffer_teacher.keys():
self.test_psds_buffer_teacher[th] = self.test_psds_buffer_teacher[
th
].append(decoded_teacher_strong[th], ignore_index=True)
# compute f1 score
decoded_student1_strong = batched_decode_preds(
strong_preds_student1,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=[0.5],
)
self.decoded_student1_05_buffer = self.decoded_student1_05_buffer.append(
decoded_student1_strong[0.5]
)
decoded_student2_strong = batched_decode_preds(
strong_preds_student2,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=[0.5],
)
self.decoded_student2_05_buffer = self.decoded_student2_05_buffer.append(
decoded_student2_strong[0.5]
)
decoded_student_strong = batched_decode_preds(
strong_preds_student,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=[0.5],
)
self.decoded_student_05_buffer = self.decoded_student_05_buffer.append(
decoded_student_strong[0.5]
)
decoded_teacher1_strong = batched_decode_preds(
strong_preds_teacher1,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=[0.5],
)
self.decoded_teacher1_05_buffer = self.decoded_teacher1_05_buffer.append(
decoded_teacher1_strong[0.5]
)
decoded_teacher2_strong = batched_decode_preds(
strong_preds_teacher2,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=[0.5],
)
self.decoded_teacher2_05_buffer = self.decoded_teacher2_05_buffer.append(
decoded_teacher2_strong[0.5]
)
decoded_teacher_strong = batched_decode_preds(
strong_preds_teacher,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=[0.5],
)
self.decoded_teacher_05_buffer = self.decoded_teacher_05_buffer.append(
decoded_teacher_strong[0.5]
)
def on_test_epoch_end(self):
# pub eval dataset
try:
log_dir = self.logger.log_dir
except Exception as e:
log_dir = self.hparams["log_dir"]
save_dir = os.path.join(log_dir, "metrics_test")
if self.evaluation:
# only save the predictions
save_dir_student = os.path.join(save_dir, "student")
os.makedirs(save_dir_student, exist_ok=True)
self.decoded_student_05_buffer.to_csv(
os.path.join(save_dir_student, f"predictions_05_student.tsv"),
sep="\t",
index=False
)
for k in self.test_psds_buffer_student.keys():
self.test_psds_buffer_student[k].to_csv(
os.path.join(save_dir_student, f"predictions_th_{k:.2f}.tsv"),
sep="\t",
index=False,
)
print(f"\nPredictions for student saved in: {save_dir_student}")
save_dir_teacher = os.path.join(save_dir, "teacher")
os.makedirs(save_dir_teacher, exist_ok=True)
self.decoded_teacher_05_buffer.to_csv(
os.path.join(save_dir_teacher, f"predictions_05_teacher.tsv"),
sep="\t",
index=False
)
for k in self.test_psds_buffer_student.keys():
self.test_psds_buffer_student[k].to_csv(
os.path.join(save_dir_teacher, f"predictions_th_{k:.2f}.tsv"),
sep="\t",
index=False,
)
print(f"\nPredictions for teacher saved in: {save_dir_teacher}")
else:
# calculate the metrics
psds_score_student1_scenario1 = compute_psds_from_operating_points(
self.test_psds_buffer_student1,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.7,
gtc_threshold=0.7,
alpha_ct=0,
alpha_st=1,
save_dir=os.path.join(save_dir, "student1", "scenario1"),
)
psds_score_student1_scenario2 = compute_psds_from_operating_points(
self.test_psds_buffer_student1,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.1,
gtc_threshold=0.1,
cttc_threshold=0.3,
alpha_ct=0.5,
alpha_st=1,
save_dir=os.path.join(save_dir, "student1", "scenario2"),
)
psds_score_student2_scenario1 = compute_psds_from_operating_points(
self.test_psds_buffer_student2,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.7,
gtc_threshold=0.7,
alpha_ct=0,
alpha_st=1,
save_dir=os.path.join(save_dir, "student2", "scenario1"),
)
psds_score_student2_scenario2 = compute_psds_from_operating_points(
self.test_psds_buffer_student2,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.1,
gtc_threshold=0.1,
cttc_threshold=0.3,
alpha_ct=0.5,
alpha_st=1,
save_dir=os.path.join(save_dir, "student2", "scenario2"),
)
psds_score_student_scenario1 = compute_psds_from_operating_points(
self.test_psds_buffer_student,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.7,
gtc_threshold=0.7,
alpha_ct=0,
alpha_st=1,
save_dir=os.path.join(save_dir, "student", "scenario1"),
)
psds_score_student_scenario2 = compute_psds_from_operating_points(
self.test_psds_buffer_student,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.1,
gtc_threshold=0.1,
cttc_threshold=0.3,
alpha_ct=0.5,
alpha_st=1,
save_dir=os.path.join(save_dir, "student", "scenario2"),
)
psds_score_teacher1_scenario1 = compute_psds_from_operating_points(
self.test_psds_buffer_teacher1,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.7,
gtc_threshold=0.7,
alpha_ct=0,
alpha_st=1,
save_dir=os.path.join(save_dir, "teacher1", "scenario1"),
)
psds_score_teacher1_scenario2 = compute_psds_from_operating_points(
self.test_psds_buffer_teacher1,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.1,
gtc_threshold=0.1,
cttc_threshold=0.3,
alpha_ct=0.5,
alpha_st=1,
save_dir=os.path.join(save_dir, "teacher1", "scenario2"),
)
psds_score_teacher2_scenario1 = compute_psds_from_operating_points(
self.test_psds_buffer_teacher2,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.7,
gtc_threshold=0.7,
alpha_ct=0,
alpha_st=1,
save_dir=os.path.join(save_dir, "teacher2", "scenario1"),
)
psds_score_teacher2_scenario2 = compute_psds_from_operating_points(
self.test_psds_buffer_teacher2,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.1,
gtc_threshold=0.1,
cttc_threshold=0.3,
alpha_ct=0.5,
alpha_st=1,
save_dir=os.path.join(save_dir, "teacher2", "scenario2"),
)
psds_score_teacher_scenario1 = compute_psds_from_operating_points(
self.test_psds_buffer_teacher,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.7,
gtc_threshold=0.7,
alpha_ct=0,
alpha_st=1,
save_dir=os.path.join(save_dir, "teacher", "scenario1"),
)
psds_score_teacher_scenario2 = compute_psds_from_operating_points(
self.test_psds_buffer_teacher,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.1,
gtc_threshold=0.1,
cttc_threshold=0.3,
alpha_ct=0.5,
alpha_st=1,
save_dir=os.path.join(save_dir, "teacher", "scenario2"),
)
event_macro_student1 = log_sedeval_metrics(
self.decoded_student1_05_buffer,
self.hparams["data"]["test_tsv"],
os.path.join(save_dir, "student1"),
)[0]
event_macro_student2 = log_sedeval_metrics(
self.decoded_student2_05_buffer,
self.hparams["data"]["test_tsv"],
os.path.join(save_dir, "student2"),
)[0]
event_macro_student = log_sedeval_metrics(
self.decoded_student_05_buffer,
self.hparams["data"]["test_tsv"],
os.path.join(save_dir, "student"),
)[0]
event_macro_teacher1 = log_sedeval_metrics(
self.decoded_teacher1_05_buffer,
self.hparams["data"]["test_tsv"],
os.path.join(save_dir, "teacher1"),
)[0]
event_macro_teacher2 = log_sedeval_metrics(
self.decoded_teacher2_05_buffer,
self.hparams["data"]["test_tsv"],
os.path.join(save_dir, "teacher2"),
)[0]
event_macro_teacher = log_sedeval_metrics(
self.decoded_teacher_05_buffer,
self.hparams["data"]["test_tsv"],
os.path.join(save_dir, "teacher"),
)[0]
# synth dataset
intersection_f1_macro_student1 = compute_per_intersection_macro_f1(
{"0.5": self.decoded_student1_05_buffer},
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
)
# synth dataset
intersection_f1_macro_teacher1 = compute_per_intersection_macro_f1(
{"0.5": self.decoded_teacher1_05_buffer},
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
)
# synth dataset
intersection_f1_macro_student2 = compute_per_intersection_macro_f1(
{"0.5": self.decoded_student2_05_buffer},
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
)
# synth dataset
intersection_f1_macro_teacher2 = compute_per_intersection_macro_f1(
{"0.5": self.decoded_teacher2_05_buffer},
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
)
# synth dataset
intersection_f1_macro_student = compute_per_intersection_macro_f1(
{"0.5": self.decoded_student_05_buffer},
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
)
# synth dataset
intersection_f1_macro_teacher = compute_per_intersection_macro_f1(
{"0.5": self.decoded_teacher_05_buffer},
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
)
best_test_result1 = torch.tensor(max(psds_score_student1_scenario1, psds_score_student1_scenario2))
best_test_result2 = torch.tensor(max(psds_score_student2_scenario1, psds_score_student2_scenario2))
best_test_result = torch.tensor(max(psds_score_student_scenario1, psds_score_student_scenario2))
results = {
"hp_metric": best_test_result,
"test/student/psds_score_scenario1": psds_score_student_scenario1,
"test/student/psds_score_scenario2": psds_score_student_scenario2,
"test/teacher/psds_score_scenario1": psds_score_teacher_scenario1,
"test/teacher/psds_score_scenario2": psds_score_teacher_scenario2,
"test/student/event_f1_macro": event_macro_student,
"test/student/intersection_f1_macro": intersection_f1_macro_student,
"test/teacher/event_f1_macro": event_macro_teacher,
"test/teacher/intersection_f1_macro": intersection_f1_macro_teacher,
#"hp_metric_I": best_test_result1,
#"test/student1/psds_score_scenario1": psds_score_student1_scenario1,
#"test/student1/psds_score_scenario2": psds_score_student1_scenario2,
#"test/teacher1/psds_score_scenario1": psds_score_teacher1_scenario1,
#"test/teacher1/psds_score_scenario2": psds_score_teacher1_scenario2,
#"test/student1/event_f1_macro": event_macro_student1,
#"test/student1/intersection_f1_macro": intersection_f1_macro_student1,
#"test/teacher1/event_f1_macro": event_macro_teacher1,
#"test/teacher1/intersection_f1_macro": intersection_f1_macro_teacher1,
#"hp_metric_II": best_test_result2,
#"test/student2/psds_score_scenario1": psds_score_student2_scenario1,
#"test/student2/psds_score_scenario2": psds_score_student2_scenario2,
#"test/teacher2/psds_score_scenario1": psds_score_teacher2_scenario1,
#"test/teacher2/psds_score_scenario2": psds_score_teacher2_scenario2,
#"test/student2/event_f1_macro": event_macro_student2,
#"test/student2/intersection_f1_macro": intersection_f1_macro_student2,
#"test/teacher2/event_f1_macro": event_macro_teacher2,
#"test/teacher2/intersection_f1_macro": intersection_f1_macro_teacher2,
}
if self.logger is not None:
self.logger.log_metrics(results)
self.logger.log_hyperparams(self.hparams, results)
for key in results.keys():
self.log(key, results[key], prog_bar=True, logger=False)
def configure_optimizers(self):
return [self.opt1, self.opt2], [self.scheduler1, self.scheduler2]
def train_dataloader(self):
self.train_loader = torch.utils.data.DataLoader(
self.train_data,
batch_sampler=self.train_sampler,
num_workers=self.num_workers,
)
return self.train_loader
def val_dataloader(self):
self.val_loader = torch.utils.data.DataLoader(
self.valid_data,
batch_size=self.hparams["training"]["batch_size_val"],
num_workers=self.num_workers,
shuffle=False,
drop_last=False,
)
return self.val_loader
def test_dataloader(self):
self.test_loader = torch.utils.data.DataLoader(
self.test_data,
batch_size=self.hparams["training"]["batch_size_val"],
num_workers=self.num_workers,
shuffle=False,
drop_last=False,
)
return self.test_loader
| 48,990 | 39.757903 | 118 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/nnet/CNN.py | import torch.nn as nn
import torch
import math
import torch.nn.functional as F
class GLU(nn.Module):
def __init__(self, input_num):
super(GLU, self).__init__()
self.sigmoid = nn.Sigmoid()
self.linear = nn.Linear(input_num, input_num)
def forward(self, x):
lin = self.linear(x.permute(0, 2, 3, 1))
lin = lin.permute(0, 3, 1, 2)
sig = self.sigmoid(x)
res = lin * sig
return res
class ContextGating(nn.Module):
def __init__(self, input_num):
super(ContextGating, self).__init__()
self.sigmoid = nn.Sigmoid()
self.linear = nn.Linear(input_num, input_num)
def forward(self, x):
lin = self.linear(x.permute(0, 2, 3, 1))
lin = lin.permute(0, 3, 1, 2)
sig = self.sigmoid(lin)
res = x * sig
return res
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU() if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class ChannelGate(nn.Module):
def __init__(self, gate_channels, reduction_ratio=4, pool_types=['avg', 'max']):
super(ChannelGate, self).__init__()
self.gate_channels = gate_channels
self.mlp = nn.Sequential(
Flatten(),
nn.Linear(gate_channels, gate_channels // reduction_ratio),
nn.ReLU(),
nn.Linear(gate_channels // reduction_ratio, gate_channels)
)
self.pool_types = pool_types
def forward(self, x):
channel_att_sum = None
for pool_type in self.pool_types:
if pool_type=='avg':
avg_pool = F.avg_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(avg_pool)
elif pool_type == 'max':
max_pool = F.max_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(max_pool)
elif pool_type == 'lp':
lp_pool = F.lp_pool2d(x, 2, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(lp_pool)
elif pool_type == 'lse':
# LSE pool only
lse_pool = logsumexp_2d(x)
channel_att_raw = self.mlp(lse_pool)
if channel_att_sum is None:
channel_att_sum = channel_att_raw
else:
channel_att_sum += channel_att_raw
scale = F.sigmoid(channel_att_sum).unsqueeze(2).unsqueeze(3).expand_as(x)
return x*scale
def logsumexp_2d(tensor):
tensor_flatten = tensor.view(tensor.size(0), tensor.size(1), -1)
s, _ = torch.max(tensor_flatten, dim=2, keepdim=True)
outputs = s + (tensor_flatten-s).exp().sum(dim=2, keepdim=True).log()
return outputs
class ChannelPool(nn.Module):
def forward(self,x):
return torch.cat((torch.max(x,1)[0].unsqueeze(1), torch.mean(x,1).unsqueeze(1)), dim=1)
class SpatialGate(nn.Module):
def __init__(self):
super(SpatialGate, self).__init__()
kernel_size = 7
self.compress = ChannelPool()
self.spatial = BasicConv(2,1,kernel_size, stride=1, padding=(kernel_size-1)//2, relu=False)
def forward(self, x):
x_compress = self.compress(x)
x_out = self.spatial(x_compress)
scale = F.sigmoid(x_out)
return x*scale
class CBAM(nn.Module):
def __init__(self, gate_channels, reduction_ratio=4, pool_types=['avg', 'max'], no_spatial=False):
super(CBAM, self).__init__()
self.no_spatial = no_spatial
self.ChannelGate = ChannelGate(gate_channels, reduction_ratio, pool_types)
self.SpatialGate = SpatialGate() if not no_spatial else None
def forward(self, x):
x_out = self.ChannelGate(x)
if not self.no_spatial:
x_out = self.SpatialGate(x_out)
return x_out
class ResidualConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, bn=True, bias=False):
super(ResidualConv, self).__init__()
self.conv1 = BasicConv(in_planes, out_planes, kernel_size, padding=1)
self.conv2 = BasicConv(out_planes, out_planes, kernel_size, padding=1)
self.skip = nn.Conv2d(in_planes, out_planes, [3,3], [1,1], [1,1])
def forward(self, x):
c1 = self.conv1(x)
c2 = self.conv2(c1)
s = self.skip(x)
return c2+s
class ResidualCNN(nn.Module):
def __init__(
self,
n_in_channel,
activation="Relu",
conv_dropout=0,
kernel_size=[3, 3, 3],
padding=[1, 1, 1],
stride=[1, 1, 1],
nb_filters=[64, 64, 64],
pooling=[(1, 4), (1, 4), (1, 4)],
normalization="batch",
**transformer_kwargs
):
"""
Initialization of CNN network s
Args:
n_in_channel: int, number of input channel
activation: str, activation function
conv_dropout: float, dropout
kernel_size: kernel size
padding: padding
stride: list, stride
nb_filters: number of filters
pooling: list of tuples, time and frequency pooling
normalization: choose between "batch" for BatchNormalization and "layer" for LayerNormalization.
"""
super(ResidualCNN, self).__init__()
self.nb_filters = nb_filters
cnn = nn.Sequential()
# stem block 0
cnn.add_module('conv0', nn.Conv2d(n_in_channel, nb_filters[0], kernel_size=kernel_size[0], stride=stride[0], padding=padding[0]))
cnn.add_module('batchnorm0', nn.BatchNorm2d(nb_filters[0], eps=0.001, momentum=0.99))
cnn.add_module('glu0', GLU(nb_filters[0]))
cnn.add_module('avgpool0', nn.AvgPool2d(pooling[0]))
# stem block 1
cnn.add_module('conv1', nn.Conv2d(nb_filters[0], nb_filters[1], kernel_size=kernel_size[1], stride=stride[1], padding=padding[1]))
cnn.add_module('batchnorm1', nn.BatchNorm2d(nb_filters[1], eps=0.001, momentum=0.99))
cnn.add_module('glu1', GLU(nb_filters[1]))
cnn.add_module('avgpool1', nn.AvgPool2d(pooling[1]))
# Residual block 0
cnn.add_module('conv2', ResidualConv(nb_filters[1], nb_filters[2], kernel_size=kernel_size[2], stride=stride[2], padding=padding[2]))
cnn.add_module('cbam2', CBAM(nb_filters[2]))
cnn.add_module('avgpool2', nn.AvgPool2d(pooling[2]))
cnn.add_module('conv3', ResidualConv(nb_filters[2], nb_filters[3], kernel_size=kernel_size[3], stride=stride[3], padding=padding[3]))
cnn.add_module('cbam3', CBAM(nb_filters[3]))
cnn.add_module('avgpool3', nn.AvgPool2d(pooling[3]))
# Residual block 2
cnn.add_module('conv4', ResidualConv(nb_filters[3], nb_filters[4], kernel_size=kernel_size[4], stride=stride[4], padding=padding[4]))
cnn.add_module('cbam4', CBAM(nb_filters[4]))
cnn.add_module('avgpool4', nn.AvgPool2d(pooling[4]))
# Residual block 3
cnn.add_module('conv5', ResidualConv(nb_filters[4], nb_filters[5], kernel_size=kernel_size[5], stride=stride[5], padding=padding[5]))
cnn.add_module('cbam5', CBAM(nb_filters[5]))
cnn.add_module('avgpool5', nn.AvgPool2d(pooling[5]))
# Residual block 4
cnn.add_module('conv6', ResidualConv(nb_filters[5], nb_filters[6], kernel_size=kernel_size[6], stride=stride[6], padding=padding[6]))
cnn.add_module('cbam6', CBAM(nb_filters[6]))
cnn.add_module('avgpool6', nn.AvgPool2d(pooling[6]))
# Residual block 4
#cnn.add_module('conv7', ResidualConv(nb_filters[6], nb_filters[7], kernel_size=kernel_size[7], stride=stride[7], padding=padding[7]))
#cnn.add_module('cbam7', CBAM(nb_filters[7]))
#cnn.add_module('avgpool7', nn.AvgPool2d(pooling[7]))
# cnn
self.cnn = cnn
def forward(self, x):
"""
Forward step of the CNN module
Args:
x (Tensor): input batch of size (batch_size, n_channels, n_frames, n_freq)
Returns:
Tensor: batch embedded
"""
# conv features
x = self.cnn(x)
return x
class CNN(nn.Module):
def __init__(
self,
n_in_channel,
activation="Relu",
conv_dropout=0,
kernel_size=[3, 3, 3],
padding=[1, 1, 1],
stride=[1, 1, 1],
nb_filters=[64, 64, 64],
pooling=[(1, 4), (1, 4), (1, 4)],
normalization="batch",
**transformer_kwargs
):
"""
Initialization of CNN network s
Args:
n_in_channel: int, number of input channel
activation: str, activation function
conv_dropout: float, dropout
kernel_size: kernel size
padding: padding
stride: list, stride
nb_filters: number of filters
pooling: list of tuples, time and frequency pooling
normalization: choose between "batch" for BatchNormalization and "layer" for LayerNormalization.
"""
super(CNN, self).__init__()
self.nb_filters = nb_filters
cnn = nn.Sequential()
def conv(i, normalization="batch", dropout=None, activ="relu"):
nIn = n_in_channel if i == 0 else nb_filters[i - 1]
nOut = nb_filters[i]
cnn.add_module(
"conv{0}".format(i),
nn.Conv2d(nIn, nOut, kernel_size[i], stride[i], padding[i]),
)
if normalization == "batch":
cnn.add_module(
"batchnorm{0}".format(i),
nn.BatchNorm2d(nOut, eps=0.001, momentum=0.99),
)
elif normalization == "layer":
cnn.add_module("layernorm{0}".format(i), nn.GroupNorm(1, nOut))
if activ.lower() == "leakyrelu":
cnn.add_module("relu{0}".format(i), nn.LeakyReLU(0.2))
elif activ.lower() == "relu":
cnn.add_module("relu{0}".format(i), nn.ReLU())
elif activ.lower() == "glu":
cnn.add_module("glu{0}".format(i), GLU(nOut))
elif activ.lower() == "cg":
cnn.add_module("cg{0}".format(i), ContextGating(nOut))
if dropout is not None:
cnn.add_module("dropout{0}".format(i), nn.Dropout(dropout))
# 128x862x64
for i in range(len(nb_filters)):
conv(i, normalization=normalization, dropout=conv_dropout, activ=activation)
cnn.add_module(
"pooling{0}".format(i), nn.AvgPool2d(pooling[i])
) # bs x tframe x mels
self.cnn = cnn
def forward(self, x):
"""
Forward step of the CNN module
Args:
x (Tensor): input batch of size (batch_size, n_channels, n_frames, n_freq)
Returns:
Tensor: batch embedded
"""
# conv features
x = self.cnn(x)
return x
| 11,753 | 35.616822 | 154 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/nnet/CRNN.py | import warnings
import torch.nn as nn
import torch
from .RNN import BidirectionalGRU
from .CNN import CNN, ResidualCNN
class RCRNN(nn.Module):
def __init__(
self,
n_in_channel=1,
nclass=10,
attention=True,
activation="glu",
dropout=0.5,
train_cnn=True,
rnn_type="BGRU",
n_RNN_cell=128,
n_layers_RNN=2,
dropout_recurrent=0,
cnn_integration=False,
freeze_bn=False,
**kwargs,
):
"""
Initialization of CRNN model
Args:
n_in_channel: int, number of input channel
n_class: int, number of classes
attention: bool, adding attention layer or not
activation: str, activation function
dropout: float, dropout
train_cnn: bool, training cnn layers
rnn_type: str, rnn type
n_RNN_cell: int, RNN nodes
n_layer_RNN: int, number of RNN layers
dropout_recurrent: float, recurrent layers dropout
cnn_integration: bool, integration of cnn
freeze_bn:
**kwargs: keywords arguments for CNN.
"""
super(RCRNN, self).__init__()
self.n_in_channel = n_in_channel
self.attention = attention
self.cnn_integration = cnn_integration
self.freeze_bn = freeze_bn
n_in_cnn = n_in_channel
if cnn_integration:
n_in_cnn = 1
self.cnn = ResidualCNN(
n_in_channel=n_in_cnn, activation=activation, conv_dropout=dropout, **kwargs
)
self.train_cnn = train_cnn
if not train_cnn:
for param in self.cnn.parameters():
param.requires_grad = False
if rnn_type == "BGRU":
nb_in = self.cnn.nb_filters[-1]
if self.cnn_integration:
# self.fc = nn.Linear(nb_in * n_in_channel, nb_in)
nb_in = nb_in * n_in_channel
self.rnn = BidirectionalGRU(
n_in=nb_in,
n_hidden=n_RNN_cell,
dropout=dropout_recurrent,
num_layers=2,
)
#self.rnn2 = BidirectionalGRU(
# n_in=nb_in*2,
# n_hidden=n_RNN_cell,
# dropout=dropout_recurrent,
# num_layers=1,
#)
else:
NotImplementedError("Only BGRU supported for CRNN for now")
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
self.dense = nn.Linear(n_RNN_cell * 2, nclass)
self.sigmoid = nn.Sigmoid()
if self.attention:
self.dense_softmax = nn.Linear(n_RNN_cell * 2, nclass)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, pad_mask=None):
if len(x.shape) < 4:
x = x.transpose(1, 2).unsqueeze(1)
else:
x = x.permute(0,1,3,2)
# input size : (batch_size, n_channels, n_frames, n_freq)
if self.cnn_integration:
bs_in, nc_in = x.size(0), x.size(1)
x = x.view(bs_in * nc_in, 1, *x.shape[2:])
# conv features
x = self.cnn(x)
bs, chan, frames, freq = x.size()
if self.cnn_integration:
x = x.reshape(bs_in, chan * nc_in, frames, freq)
if freq != 1:
warnings.warn(
f"Output shape is: {(bs, frames, chan * freq)}, from {freq} staying freq"
)
x = x.permute(0, 2, 1, 3)
x = x.contiguous().view(bs, frames, chan * freq)
else:
x = x.squeeze(-1)
x = x.permute(0, 2, 1) # [bs, frames, chan]
# rnn features
x = self.rnn(x)
#x = self.rnn1(x)
#x = self.relu(x)
#x = self.rnn2(x)
#x = self.relu(x)
x = self.dropout(x)
strong = self.dense(x) # [bs, frames, nclass]
strong = self.sigmoid(strong)
if self.attention:
sof = self.dense_softmax(x) # [bs, frames, nclass]
if not pad_mask is None:
sof = sof.masked_fill(pad_mask.transpose(1, 2), -1e30) # mask attention
sof = self.softmax(sof)
sof = torch.clamp(sof, min=1e-7, max=1)
weak = (strong * sof).sum(1) / sof.sum(1) # [bs, nclass]
else:
weak = strong.mean(1)
return strong.transpose(1, 2), weak
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super(RCRNN, self).train(mode)
if self.freeze_bn:
print("Freezing Mean/Var of BatchNorm2D.")
if self.freeze_bn:
print("Freezing Weight/Bias of BatchNorm2D.")
if self.freeze_bn:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
if self.freeze_bn:
m.weight.requires_grad = False
m.bias.requires_grad = False
class CRNN(nn.Module):
def __init__(
self,
n_in_channel=1,
nclass=10,
attention=True,
activation="glu",
dropout=0.5,
train_cnn=True,
rnn_type="BGRU",
n_RNN_cell=128,
n_layers_RNN=2,
dropout_recurrent=0,
cnn_integration=False,
freeze_bn=False,
**kwargs,
):
"""
Initialization of CRNN model
Args:
n_in_channel: int, number of input channel
n_class: int, number of classes
attention: bool, adding attention layer or not
activation: str, activation function
dropout: float, dropout
train_cnn: bool, training cnn layers
rnn_type: str, rnn type
n_RNN_cell: int, RNN nodes
n_layer_RNN: int, number of RNN layers
dropout_recurrent: float, recurrent layers dropout
cnn_integration: bool, integration of cnn
freeze_bn:
**kwargs: keywords arguments for CNN.
"""
super(CRNN, self).__init__()
self.n_in_channel = n_in_channel
self.attention = attention
self.cnn_integration = cnn_integration
self.freeze_bn = freeze_bn
n_in_cnn = n_in_channel
if cnn_integration:
n_in_cnn = 1
self.cnn = CNN(
n_in_channel=n_in_cnn, activation=activation, conv_dropout=dropout, **kwargs
)
self.train_cnn = train_cnn
if not train_cnn:
for param in self.cnn.parameters():
param.requires_grad = False
if rnn_type == "BGRU":
nb_in = self.cnn.nb_filters[-1]
if self.cnn_integration:
# self.fc = nn.Linear(nb_in * n_in_channel, nb_in)
nb_in = nb_in * n_in_channel
self.rnn = BidirectionalGRU(
n_in=nb_in,
n_hidden=n_RNN_cell,
dropout=dropout_recurrent,
num_layers=n_layers_RNN,
)
else:
NotImplementedError("Only BGRU supported for CRNN for now")
self.dropout = nn.Dropout(dropout)
self.dense = nn.Linear(n_RNN_cell * 2, nclass)
self.sigmoid = nn.Sigmoid()
if self.attention:
self.dense_softmax = nn.Linear(n_RNN_cell * 2, nclass)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, pad_mask=None):
x = x.transpose(1, 2).unsqueeze(1)
# input size : (batch_size, n_channels, n_frames, n_freq)
if self.cnn_integration:
bs_in, nc_in = x.size(0), x.size(1)
x = x.view(bs_in * nc_in, 1, *x.shape[2:])
# conv features
x = self.cnn(x)
bs, chan, frames, freq = x.size()
if self.cnn_integration:
x = x.reshape(bs_in, chan * nc_in, frames, freq)
if freq != 1:
warnings.warn(
f"Output shape is: {(bs, frames, chan * freq)}, from {freq} staying freq"
)
x = x.permute(0, 2, 1, 3)
x = x.contiguous().view(bs, frames, chan * freq)
else:
x = x.squeeze(-1)
x = x.permute(0, 2, 1) # [bs, frames, chan]
# rnn features
x = self.rnn(x)
x = self.dropout(x)
strong = self.dense(x) # [bs, frames, nclass]
strong = self.sigmoid(strong)
if self.attention:
sof = self.dense_softmax(x) # [bs, frames, nclass]
if not pad_mask is None:
sof = sof.masked_fill(pad_mask.transpose(1, 2), -1e30) # mask attention
sof = self.softmax(sof)
sof = torch.clamp(sof, min=1e-7, max=1)
weak = (strong * sof).sum(1) / sof.sum(1) # [bs, nclass]
else:
weak = strong.mean(1)
return strong.transpose(1, 2), weak
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super(CRNN, self).train(mode)
if self.freeze_bn:
print("Freezing Mean/Var of BatchNorm2D.")
if self.freeze_bn:
print("Freezing Weight/Bias of BatchNorm2D.")
if self.freeze_bn:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
if self.freeze_bn:
m.weight.requires_grad = False
m.bias.requires_grad = False
| 9,567 | 31.767123 | 89 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/nnet/RNN.py | import warnings
import torch
from torch import nn as nn
class BidirectionalGRU(nn.Module):
def __init__(self, n_in, n_hidden, dropout=0, num_layers=1):
"""
Initialization of BidirectionalGRU instance
Args:
n_in: int, number of input
n_hidden: int, number of hidden layers
dropout: flat, dropout
num_layers: int, number of layers
"""
super(BidirectionalGRU, self).__init__()
self.rnn = nn.GRU(
n_in,
n_hidden,
bidirectional=True,
dropout=dropout,
batch_first=True,
num_layers=num_layers,
)
def forward(self, input_feat):
recurrent, _ = self.rnn(input_feat)
return recurrent
class BidirectionalLSTM(nn.Module):
def __init__(self, nIn, nHidden, nOut, dropout=0, num_layers=1):
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(
nIn,
nHidden // 2,
bidirectional=True,
batch_first=True,
dropout=dropout,
num_layers=num_layers,
)
self.embedding = nn.Linear(nHidden * 2, nOut)
def forward(self, input_feat):
recurrent, _ = self.rnn(input_feat)
b, T, h = recurrent.size()
t_rec = recurrent.contiguous().view(b * T, h)
output = self.embedding(t_rec) # [T * b, nOut]
output = output.view(b, T, -1)
return output
| 1,488 | 26.072727 | 68 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/utils/scaler.py | import tqdm
import torch
class TorchScaler(torch.nn.Module):
"""
This torch module implements scaling for input tensors, both instance based
and dataset-wide statistic based.
Args:
statistic: str, (default='dataset'), represent how to compute the statistic for normalisation.
Choice in {'dataset', 'instance'}.
'dataset' needs to be 'fit()' with a dataloader of the dataset.
'instance' apply the normalisation at an instance-level, so compute the statitics on the instance
specified, it can be a clip or a batch.
normtype: str, (default='standard') the type of normalisation to use.
Choice in {'standard', 'mean', 'minmax'}. 'standard' applies a classic normalisation with mean and standard
deviation. 'mean' substract the mean to the data. 'minmax' substract the minimum of the data and divide by
the difference between max and min.
"""
def __init__(self, statistic="dataset", normtype="standard", dims=(1, 2), eps=1e-8):
super(TorchScaler, self).__init__()
assert statistic in ["dataset", "instance"]
assert normtype in ["standard", "mean", "minmax"]
if statistic == "dataset" and normtype == "minmax":
raise NotImplementedError(
"statistic==dataset and normtype==minmax is not currently implemented."
)
self.statistic = statistic
self.normtype = normtype
self.dims = dims
self.eps = eps
def load_state_dict(self, state_dict, strict=True):
if self.statistic == "dataset":
super(TorchScaler, self).load_state_dict(state_dict, strict)
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
if self.statistic == "dataset":
super(TorchScaler, self)._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
def fit(self, dataloader, transform_func=lambda x: x[0]):
"""
Scaler fitting
Args:
dataloader (DataLoader): training data DataLoader
transform_func (lambda function, optional): Transforms applied to the data.
Defaults to lambdax:x[0].
"""
indx = 0
for batch in tqdm.tqdm(dataloader):
feats = transform_func(batch)
if indx == 0:
mean = torch.mean(feats, self.dims, keepdim=True).mean(0).unsqueeze(0)
mean_squared = (
torch.mean(feats ** 2, self.dims, keepdim=True).mean(0).unsqueeze(0)
)
else:
mean += torch.mean(feats, self.dims, keepdim=True).mean(0).unsqueeze(0)
mean_squared += (
torch.mean(feats ** 2, self.dims, keepdim=True).mean(0).unsqueeze(0)
)
indx += 1
mean /= indx
mean_squared /= indx
self.register_buffer("mean", mean)
self.register_buffer("mean_squared", mean_squared)
def forward(self, tensor):
if self.statistic == "dataset":
assert hasattr(self, "mean") and hasattr(
self, "mean_squared"
), "TorchScaler should be fit before used if statistics=dataset"
assert tensor.ndim == self.mean.ndim, "Pre-computed statistics "
if self.normtype == "mean":
return tensor - self.mean
elif self.normtype == "standard":
std = torch.sqrt(self.mean_squared - self.mean ** 2)
return (tensor - self.mean) / (std + self.eps)
else:
raise NotImplementedError
else:
if self.normtype == "mean":
return tensor - torch.mean(tensor, self.dims, keepdim=True)
elif self.normtype == "standard":
return (tensor - torch.mean(tensor, self.dims, keepdim=True)) / (
torch.std(tensor, self.dims, keepdim=True) + self.eps
)
elif self.normtype == "minmax":
return (tensor - torch.amin(tensor, dim=self.dims, keepdim=True)) / (
torch.amax(tensor, dim=self.dims, keepdim=True)
- torch.amin(tensor, dim=self.dims, keepdim=True)
+ self.eps
)
| 4,606 | 38.042373 | 119 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/utils/schedulers.py | from asteroid.engine.schedulers import *
import numpy as np
class ExponentialWarmup(BaseScheduler):
""" Scheduler to apply ramp-up during training to the learning rate.
Args:
optimizer: torch.optimizer.Optimizer, the optimizer from which to rampup the value from
max_lr: float, the maximum learning to use at the end of ramp-up.
rampup_length: int, the length of the rampup (number of steps).
exponent: float, the exponent to be used.
"""
def __init__(self, optimizer, max_lr, rampup_length, exponent=-5.0):
super().__init__(optimizer)
self.rampup_len = rampup_length
self.max_lr = max_lr
self.step_num = 1
self.exponent = exponent
def _get_scaling_factor(self):
if self.rampup_len == 0:
return 1.0
else:
current = np.clip(self.step_num, 0.0, self.rampup_len)
phase = 1.0 - current / self.rampup_len
return float(np.exp(self.exponent * phase * phase))
def _get_lr(self):
return self.max_lr * self._get_scaling_factor()
| 1,094 | 32.181818 | 95 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/utils/encoder.py | import numpy as np
import pandas as pd
from dcase_util.data import DecisionEncoder
class ManyHotEncoder:
""""
Adapted after DecisionEncoder.find_contiguous_regions method in
https://github.com/DCASE-REPO/dcase_util/blob/master/dcase_util/data/decisions.py
Encode labels into numpy arrays where 1 correspond to presence of the class and 0 absence.
Multiple 1 can appear on the same line, it is for multi label problem.
Args:
labels: list, the classes which will be encoded
n_frames: int, (Default value = None) only useful for strong labels. The number of frames of a segment.
Attributes:
labels: list, the classes which will be encoded
n_frames: int, only useful for strong labels. The number of frames of a segment.
"""
def __init__(
self, labels, audio_len, frame_len, frame_hop, net_pooling=1, fs=16000
):
if type(labels) in [np.ndarray, np.array]:
labels = labels.tolist()
self.labels = labels
self.audio_len = audio_len
self.frame_len = frame_len
self.frame_hop = frame_hop
self.fs = fs
self.net_pooling = net_pooling
n_frames = self.audio_len * self.fs
# self.n_frames = int(
# int(((n_frames - self.frame_len) / self.frame_hop)) / self.net_pooling
# )
self.n_frames = int(int((n_frames / self.frame_hop)) / self.net_pooling)
def encode_weak(self, labels):
""" Encode a list of weak labels into a numpy array
Args:
labels: list, list of labels to encode (to a vector of 0 and 1)
Returns:
numpy.array
A vector containing 1 for each label, and 0 everywhere else
"""
# useful for tensor empty labels
if type(labels) is str:
if labels == "empty":
y = np.zeros(len(self.labels)) - 1
return y
else:
labels = labels.split(",")
if type(labels) is pd.DataFrame:
if labels.empty:
labels = []
elif "event_label" in labels.columns:
labels = labels["event_label"]
y = np.zeros(len(self.labels))
for label in labels:
if not pd.isna(label):
i = self.labels.index(label)
y[i] = 1
return y
def _time_to_frame(self, time):
samples = time * self.fs
frame = (samples) / self.frame_hop
return np.clip(frame / self.net_pooling, a_min=0, a_max=self.n_frames)
def _frame_to_time(self, frame):
frame = frame * self.net_pooling / (self.fs / self.frame_hop)
return np.clip(frame, a_min=0, a_max=self.audio_len)
def encode_strong_df(self, label_df):
"""Encode a list (or pandas Dataframe or Serie) of strong labels, they correspond to a given filename
Args:
label_df: pandas DataFrame or Series, contains filename, onset (in frames) and offset (in frames)
If only filename (no onset offset) is specified, it will return the event on all the frames
onset and offset should be in frames
Returns:
numpy.array
Encoded labels, 1 where the label is present, 0 otherwise
"""
assert any(
[x is not None for x in [self.audio_len, self.frame_len, self.frame_hop]]
)
samples_len = self.n_frames
if type(label_df) is str:
if label_df == "empty":
y = np.zeros((samples_len, len(self.labels))) - 1
return y
y = np.zeros((samples_len, len(self.labels)))
if type(label_df) is pd.DataFrame:
if {"onset", "offset", "event_label"}.issubset(label_df.columns):
for _, row in label_df.iterrows():
if not pd.isna(row["event_label"]):
i = self.labels.index(row["event_label"])
onset = int(self._time_to_frame(row["onset"]))
offset = int(np.ceil(self._time_to_frame(row["offset"])))
y[
onset:offset, i
] = 1 # means offset not included (hypothesis of overlapping frames, so ok)
elif type(label_df) in [
pd.Series,
list,
np.ndarray,
]: # list of list or list of strings
if type(label_df) is pd.Series:
if {"onset", "offset", "event_label"}.issubset(
label_df.index
): # means only one value
if not pd.isna(label_df["event_label"]):
i = self.labels.index(label_df["event_label"])
onset = int(self._time_to_frame(label_df["onset"]))
offset = int(np.ceil(self._time_to_frame(label_df["offset"])))
y[onset:offset, i] = 1
return y
for event_label in label_df:
# List of string, so weak labels to be encoded in strong
if type(event_label) is str:
if event_label != "":
i = self.labels.index(event_label)
y[:, i] = 1
# List of list, with [label, onset, offset]
elif len(event_label) == 3:
if event_label[0] != "":
i = self.labels.index(event_label[0])
onset = int(self._time_to_frame(event_label[1]))
offset = int(np.ceil(self._time_to_frame(event_label[2])))
y[onset:offset, i] = 1
else:
raise NotImplementedError(
"cannot encode strong, type mismatch: {}".format(
type(event_label)
)
)
else:
raise NotImplementedError(
"To encode_strong, type is pandas.Dataframe with onset, offset and event_label"
"columns, or it is a list or pandas Series of event labels, "
"type given: {}".format(type(label_df))
)
return y
def decode_weak(self, labels):
""" Decode the encoded weak labels
Args:
labels: numpy.array, the encoded labels to be decoded
Returns:
list
Decoded labels, list of string
"""
result_labels = []
for i, value in enumerate(labels):
if value == 1:
result_labels.append(self.labels[i])
return result_labels
def decode_strong(self, labels):
""" Decode the encoded strong labels
Args:
labels: numpy.array, the encoded labels to be decoded
Returns:
list
Decoded labels, list of list: [[label, onset offset], ...]
"""
result_labels = []
for i, label_column in enumerate(labels.T):
change_indices = DecisionEncoder().find_contiguous_regions(label_column)
# append [label, onset, offset] in the result list
for row in change_indices:
result_labels.append(
[
self.labels[i],
self._frame_to_time(row[0]),
self._frame_to_time(row[1]),
]
)
return result_labels
def state_dict(self):
return {
"labels": self.labels,
"audio_len": self.audio_len,
"frame_len": self.frame_len,
"frame_hop": self.frame_hop,
"net_pooling": self.net_pooling,
"fs": self.fs,
}
@classmethod
def load_state_dict(cls, state_dict):
labels = state_dict["labels"]
audio_len = state_dict["audio_len"]
frame_len = state_dict["frame_len"]
frame_hop = state_dict["frame_hop"]
net_pooling = state_dict["net_pooling"]
fs = state_dict["fs"]
return cls(labels, audio_len, frame_len, frame_hop, net_pooling, fs)
| 8,185 | 37.252336 | 111 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/utils/torch_utils.py | import torch
import numpy as np
def nantensor(*args, **kwargs):
return torch.ones(*args, **kwargs) * np.nan
def nanmean(v, *args, inplace=False, **kwargs):
if not inplace:
v = v.clone()
is_nan = torch.isnan(v)
v[is_nan] = 0
return v.sum(*args, **kwargs) / (~is_nan).float().sum(*args, **kwargs)
| 327 | 20.866667 | 74 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/utils/__init__.py | from .encoder import ManyHotEncoder
from .schedulers import ExponentialWarmup
| 78 | 25.333333 | 41 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/desed_task/data_augm.py | import numpy as np
import torch
import random
def frame_shift(mels, labels, net_pooling=4):
bsz, n_bands, frames = mels.shape
shifted = []
new_labels = []
for bindx in range(bsz):
shift = int(random.gauss(0, 90))
shifted.append(torch.roll(mels[bindx], shift, dims=-1))
shift = -abs(shift) // net_pooling if shift < 0 else shift // net_pooling
new_labels.append(torch.roll(labels[bindx], shift, dims=-1))
return torch.stack(shifted), torch.stack(new_labels)
def frame_shift2(mels, labels, net_pooling=4):
bsz, n_ch, n_bands, frames = mels.shape
shifted1 = []
new_labels = []
for bindx in range(bsz):
shift = int(random.gauss(0, 90))
shifted1.append(torch.roll(mels[bindx,0], shift, dims=-1))
shift = -abs(shift) // net_pooling if shift < 0 else shift // net_pooling
new_labels.append(torch.roll(labels[bindx], shift, dims=-1))
shifted1 = torch.stack(shifted1)
shifted2 = []
for bindx in range(bsz):
shift = int(random.gauss(0, 90))
shifted2.append(torch.roll(mels[bindx,1], shift, dims=-1))
shifted2 = torch.stack(shifted2)
shifted = torch.stack([shifted1, shifted2],3).permute(0,3,1,2)
return shifted, torch.stack(new_labels)
def temporal_reverse(mels, labels, net_pooling=4):
bsz, n_bands, frames = mels.shape
reverse = []
new_labels = []
for bindx in range(bsz):
reverse.append(torch.fliplr(mels[bindx]))
new_labels.append(torch.fliplr(labels[bindx]))
return torch.stack(reverse), torch.stack(new_labels)
def mixup(data, target=None, alpha=0.2, beta=0.2, mixup_label_type="soft"):
"""Mixup data augmentation by permuting the data
Args:
data: input tensor, must be a batch so data can be permuted and mixed.
target: tensor of the target to be mixed, if None, do not return targets.
alpha: float, the parameter to the np.random.beta distribution
beta: float, the parameter to the np.random.beta distribution
mixup_label_type: str, the type of mixup to be used choice between {'soft', 'hard'}.
Returns:
torch.Tensor of mixed data and labels if given
"""
with torch.no_grad():
batch_size = data.size(0)
c = np.random.beta(alpha, beta)
perm = torch.randperm(batch_size)
mixed_data = c * data + (1 - c) * data[perm, :]
if target is not None:
if mixup_label_type == "soft":
mixed_target = torch.clamp(
c * target + (1 - c) * target[perm, :], min=0, max=1
)
elif mixup_label_type == "hard":
mixed_target = torch.clamp(target + target[perm, :], min=0, max=1)
else:
raise NotImplementedError(
f"mixup_label_type: {mixup_label_type} not implemented. choice in "
f"{'soft', 'hard'}"
)
return mixed_data, mixed_target
else:
return mixed_data
def add_noise(mels, snrs=(6, 30), dims=(1, 2)):
""" Add white noise to mels spectrograms
Args:
mels: torch.tensor, mels spectrograms to apply the white noise to.
snrs: int or tuple, the range of snrs to choose from if tuple (uniform)
dims: tuple, the dimensions for which to compute the standard deviation (default to (1,2) because assume
an input of a batch of mel spectrograms.
Returns:
torch.Tensor of mels with noise applied
"""
if isinstance(snrs, (list, tuple)):
snr = (snrs[0] - snrs[1]) * torch.rand(
(mels.shape[0],), device=mels.device
).reshape(-1, 1, 1) + snrs[1]
else:
snr = snrs
snr = 10 ** (snr / 20) # linear domain
sigma = torch.std(mels, dim=dims, keepdim=True) / snr
mels = mels + torch.randn(mels.shape, device=mels.device) * sigma
return mels
| 3,931 | 35.073394 | 112 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/desed_task/dataio/sampler.py | from torch.utils.data import Sampler
import numpy as np
class ConcatDatasetBatchSampler(Sampler):
"""This sampler is built to work with a standard Pytorch ConcatDataset.
From SpeechBrain dataio see https://github.com/speechbrain/
It is used to retrieve elements from the different concatenated datasets placing them in the same batch
with proportion specified by batch_sizes, e.g 8, 16 means each batch will
be of 24 elements with the first 8 belonging to the first dataset in ConcatDataset
object and the last 16 to the second.
More than two datasets are supported, in that case you need to provide 3 batch
sizes.
Note
----
Batched are drawn from the datasets till the one with smallest length is exhausted.
Thus number of examples in your training epoch is dictated by the dataset
whose length is the smallest.
Arguments
---------
samplers : int
The base seed to use for the random number generator. It is recommended
to use a value which has a good mix of 0 and 1 bits.
batch_sizes: list
Batch sizes.
epoch : int
The epoch to start at.
"""
def __init__(self, samplers, batch_sizes: (tuple, list), epoch=0) -> None:
if not isinstance(samplers, (list, tuple)):
raise ValueError(
"samplers should be a list or tuple of Pytorch Samplers, "
"but got samplers={}".format(batch_sizes)
)
if not isinstance(batch_sizes, (list, tuple)):
raise ValueError(
"batch_sizes should be a list or tuple of integers, "
"but got batch_sizes={}".format(batch_sizes)
)
if not len(batch_sizes) == len(samplers):
raise ValueError("batch_sizes and samplers should be have same length")
self.batch_sizes = batch_sizes
self.samplers = samplers
self.offsets = [0] + np.cumsum([len(x) for x in self.samplers]).tolist()[:-1]
self.epoch = epoch
self.set_epoch(self.epoch)
def _iter_one_dataset(self, c_batch_size, c_sampler, c_offset):
batch = []
for idx in c_sampler:
batch.append(c_offset + idx)
if len(batch) == c_batch_size:
yield batch
def set_epoch(self, epoch):
if hasattr(self.samplers[0], "epoch"):
for s in self.samplers:
s.set_epoch(epoch)
def __iter__(self):
iterators = [iter(i) for i in self.samplers]
tot_batch = []
for b_num in range(len(self)):
for samp_idx in range(len(self.samplers)):
c_batch = []
while len(c_batch) < self.batch_sizes[samp_idx]:
c_batch.append(self.offsets[samp_idx] + next(iterators[samp_idx]))
tot_batch.extend(c_batch)
yield tot_batch
tot_batch = []
def __len__(self):
min_len = float("inf")
for idx, sampler in enumerate(self.samplers):
c_len = (len(sampler)) // self.batch_sizes[idx]
min_len = min(c_len, min_len)
return min_len
| 3,147 | 33.217391 | 107 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/desed_task/dataio/datasets.py | from torch.utils.data import Dataset
import pandas as pd
import os
import numpy as np
import torchaudio
import torch
import glob
def to_mono(mixture, random_ch=False):
if mixture.ndim > 1: # multi channel
if not random_ch:
mixture = torch.mean(mixture, 0)
else: # randomly select one channel
indx = np.random.randint(0, mixture.shape[0] - 1)
mixture = mixture[indx]
return mixture
def pad_audio(audio, target_len):
if audio.shape[-1] < target_len:
audio = torch.nn.functional.pad(
audio, (0, target_len - audio.shape[-1]), mode="constant"
)
padded_indx = [target_len / len(audio)]
else:
padded_indx = [1.0]
return audio, padded_indx
def read_audio(file, multisrc, random_channel, pad_to):
mixture, fs = torchaudio.load(file)
if not multisrc:
mixture = to_mono(mixture, random_channel)
if pad_to is not None:
mixture, padded_indx = pad_audio(mixture, pad_to)
else:
padded_indx = [1.0]
mixture = mixture.float()
return mixture, padded_indx
class StronglyAnnotatedSet(Dataset):
def __init__(
self,
audio_folder,
tsv_entries,
encoder,
pad_to=10,
fs=16000,
return_filename=False,
random_channel=False,
multisrc=False,
evaluation=False
):
self.encoder = encoder
self.fs = fs
self.pad_to = pad_to * fs
self.return_filename = return_filename
self.random_channel = random_channel
self.multisrc = multisrc
# annotation = pd.read_csv(tsv_file, sep="\t")
examples = {}
for i, r in tsv_entries.iterrows():
if r["filename"] not in examples.keys():
examples[r["filename"]] = {
"mixture": os.path.join(audio_folder, r["filename"]),
"events": [],
}
if not np.isnan(r["onset"]):
examples[r["filename"]]["events"].append(
{
"event_label": r["event_label"],
"onset": r["onset"],
"offset": r["offset"],
}
)
else:
if not np.isnan(r["onset"]):
examples[r["filename"]]["events"].append(
{
"event_label": r["event_label"],
"onset": r["onset"],
"offset": r["offset"],
}
)
# we construct a dictionary for each example
self.examples = examples
self.examples_list = list(examples.keys())
def __len__(self):
return len(self.examples_list)
def __getitem__(self, item):
c_ex = self.examples[self.examples_list[item]]
mixture, padded_indx = read_audio(
c_ex["mixture"], self.multisrc, self.random_channel, self.pad_to
)
# labels
labels = c_ex["events"]
# check if labels exists:
if not len(labels):
max_len_targets = self.encoder.n_frames
strong = torch.zeros(max_len_targets, len(self.encoder.labels)).float()
else:
# to steps
strong = self.encoder.encode_strong_df(pd.DataFrame(labels))
strong = torch.from_numpy(strong).float()
if self.return_filename:
return mixture, strong.transpose(0, 1), padded_indx, c_ex["mixture"]
else:
return mixture, strong.transpose(0, 1), padded_indx
class WeakSet(Dataset):
def __init__(
self,
audio_folder,
tsv_entries,
encoder,
pad_to=10,
fs=16000,
return_filename=False,
random_channel=False,
multisrc=False,
):
self.encoder = encoder
self.fs = fs
self.pad_to = pad_to * fs
self.return_filename = return_filename
self.random_channel = random_channel
self.multisrc = multisrc
examples = {}
for i, r in tsv_entries.iterrows():
if r["filename"] not in examples.keys():
examples[r["filename"]] = {
"mixture": os.path.join(audio_folder, r["filename"]),
"events": r["event_labels"].split(","),
}
self.examples = examples
self.examples_list = list(examples.keys())
def __len__(self):
return len(self.examples_list)
def __getitem__(self, item):
file = self.examples_list[item]
c_ex = self.examples[file]
mixture, padded_indx = read_audio(
c_ex["mixture"], self.multisrc, self.random_channel, self.pad_to
)
# labels
labels = c_ex["events"]
# check if labels exists:
max_len_targets = self.encoder.n_frames
weak = torch.zeros(max_len_targets, len(self.encoder.labels))
if len(labels):
weak_labels = self.encoder.encode_weak(labels)
weak[0, :] = torch.from_numpy(weak_labels).float()
out_args = [mixture, weak.transpose(0, 1), padded_indx]
if self.return_filename:
out_args.append(c_ex["mixture"])
return out_args
class UnlabeledSet(Dataset):
def __init__(
self,
unlabeled_folder,
encoder,
pad_to=10,
fs=16000,
return_filename=False,
random_channel=False,
multisrc=False,
):
self.encoder = encoder
self.fs = fs
self.pad_to = pad_to * fs if pad_to is not None else None
self.examples = glob.glob(os.path.join(unlabeled_folder, "*.wav"))
self.return_filename = return_filename
self.random_channel = random_channel
self.multisrc = multisrc
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
c_ex = self.examples[item]
mixture, padded_indx = read_audio(
c_ex, self.multisrc, self.random_channel, self.pad_to
)
max_len_targets = self.encoder.n_frames
strong = torch.zeros(max_len_targets, len(self.encoder.labels)).float()
out_args = [mixture, strong.transpose(0, 1), padded_indx]
if self.return_filename:
out_args.append(c_ex)
return out_args
| 6,460 | 27.337719 | 83 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/desed_task/dataio/__init__.py | from .datasets import WeakSet, UnlabeledSet, StronglyAnnotatedSet
from .sampler import ConcatDatasetBatchSampler
| 113 | 37 | 65 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/main_MT_model.py | # -*- coding: utf-8 -*-
import argparse
import datetime
import inspect
import os
import time
from pprint import pprint
import pandas as pd
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch import nn
from data_utils.Desed import DESED
from data_utils.DataLoad import DataLoadDf, ConcatDataset, MultiStreamBatchSampler
from TestModel import _load_model
from evaluation_measures import get_predictions, psds_score, compute_psds_from_operating_points, compute_metrics
from models.CRNN import CRNN
import config as cfg
from utilities import ramps
from utilities.Logger import create_logger
from utilities.Scaler import ScalerPerAudio, Scaler
from utilities.utils import SaveBest, to_cuda_if_available, weights_init, AverageMeterSet, EarlyStopping, \
get_durations_df
from utilities.ManyHotEncoder import ManyHotEncoder
from utilities.Transforms import get_transforms
def adjust_learning_rate(optimizer, rampup_value, rampdown_value=1):
""" adjust the learning rate
Args:
optimizer: torch.Module, the optimizer to be updated
rampup_value: float, the float value between 0 and 1 that should increases linearly
rampdown_value: float, the float between 1 and 0 that should decrease linearly
Returns:
"""
# LR warm-up to handle large minibatch sizes from https://arxiv.org/abs/1706.02677
# We commented parts on betas and weight decay to match 2nd system of last year from Orange
lr = rampup_value * rampdown_value * cfg.max_learning_rate
# beta1 = rampdown_value * cfg.beta1_before_rampdown + (1. - rampdown_value) * cfg.beta1_after_rampdown
# beta2 = (1. - rampup_value) * cfg.beta2_during_rampdup + rampup_value * cfg.beta2_after_rampup
# weight_decay = (1 - rampup_value) * cfg.weight_decay_during_rampup + cfg.weight_decay_after_rampup * rampup_value
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# param_group['betas'] = (beta1, beta2)
# param_group['weight_decay'] = weight_decay
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_params, params in zip(ema_model.parameters(), model.parameters()):
ema_params.data.mul_(alpha).add_(1 - alpha, params.data)
def train(train_loader, model, optimizer, c_epoch, ema_model=None, mask_weak=None, mask_strong=None, adjust_lr=False):
""" One epoch of a Mean Teacher model
Args:
train_loader: torch.utils.data.DataLoader, iterator of training batches for an epoch.
Should return a tuple: ((teacher input, student input), labels)
model: torch.Module, model to be trained, should return a weak and strong prediction
optimizer: torch.Module, optimizer used to train the model
c_epoch: int, the current epoch of training
ema_model: torch.Module, student model, should return a weak and strong prediction
mask_weak: slice or list, mask the batch to get only the weak labeled data (used to calculate the loss)
mask_strong: slice or list, mask the batch to get only the strong labeled data (used to calcultate the loss)
adjust_lr: bool, Whether or not to adjust the learning rate during training (params in config)
"""
log = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
class_criterion = nn.BCELoss()
consistency_criterion = nn.MSELoss()
class_criterion, consistency_criterion = to_cuda_if_available(class_criterion, consistency_criterion)
meters = AverageMeterSet()
log.debug("Nb batches: {}".format(len(train_loader)))
start = time.time()
for i, ((batch_input, ema_batch_input), target) in enumerate(train_loader):
global_step = c_epoch * len(train_loader) + i
rampup_value = ramps.exp_rampup(global_step, cfg.n_epoch_rampup*len(train_loader))
if adjust_lr:
adjust_learning_rate(optimizer, rampup_value)
meters.update('lr', optimizer.param_groups[0]['lr'])
batch_input, ema_batch_input, target = to_cuda_if_available(batch_input, ema_batch_input, target)
# Outputs
strong_pred_ema, weak_pred_ema = ema_model(ema_batch_input)
strong_pred_ema = strong_pred_ema.detach()
weak_pred_ema = weak_pred_ema.detach()
strong_pred, weak_pred = model(batch_input)
#sample = target[mask_strong].sum(2)
#sample = sample.cpu().numpy()
#print(np.where(sample[-1,:]>1))
loss = None
# Weak BCE Loss
target_weak = target.max(-2)[0] # Take the max in the time axis
if mask_weak is not None:
weak_class_loss = class_criterion(weak_pred[mask_weak], target_weak[mask_weak])
ema_class_loss = class_criterion(weak_pred_ema[mask_weak], target_weak[mask_weak])
loss = weak_class_loss
if i == 0:
log.debug(f"target: {target.mean(-2)} \n Target_weak: {target_weak} \n "
f"Target weak mask: {target_weak[mask_weak]} \n "
f"Target strong mask: {target[mask_strong].sum(-2)}\n"
f"weak loss: {weak_class_loss} \t rampup_value: {rampup_value}"
f"tensor mean: {batch_input.mean()}")
meters.update('weak_class_loss', weak_class_loss.item())
meters.update('Weak EMA loss', ema_class_loss.item())
# Strong BCE loss
if mask_strong is not None:
strong_class_loss = class_criterion(strong_pred[mask_strong], target[mask_strong])
meters.update('Strong loss', strong_class_loss.item())
strong_ema_class_loss = class_criterion(strong_pred_ema[mask_strong], target[mask_strong])
meters.update('Strong EMA loss', strong_ema_class_loss.item())
if loss is not None:
loss += strong_class_loss
else:
loss = strong_class_loss
# Teacher-student consistency cost
if ema_model is not None:
consistency_cost = cfg.max_consistency_cost * rampup_value
meters.update('Consistency weight', consistency_cost)
# Take consistency about strong predictions (all data)
#consistency_loss_strong = consistency_cost * consistency_criterion(strong_pred, strong_pred_ema)
consistency_loss_strong = consistency_cost * class_criterion(strong_pred, strong_pred_ema)
meters.update('Consistency strong', consistency_loss_strong.item())
if loss is not None:
loss += consistency_loss_strong
else:
loss = consistency_loss_strong
meters.update('Consistency weight', consistency_cost)
# Take consistency about weak predictions (all data)
#consistency_loss_weak = consistency_cost * consistency_criterion(weak_pred, weak_pred_ema)
consistency_loss_weak = consistency_cost * class_criterion(weak_pred, weak_pred_ema)
meters.update('Consistency weak', consistency_loss_weak.item())
if loss is not None:
loss += consistency_loss_weak
else:
loss = consistency_loss_weak
assert not (np.isnan(loss.item()) or loss.item() > 1e5), 'Loss explosion: {}'.format(loss.item())
assert not loss.item() < 0, 'Loss problem, cannot be negative'
meters.update('Loss', loss.item())
# compute gradient and do optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
global_step += 1
if ema_model is not None:
update_ema_variables(model, ema_model, 0.999, global_step)
epoch_time = time.time() - start
log.info(f"Epoch: {c_epoch}\t Time {epoch_time:.2f}\t {meters}")
return loss
def get_dfs(desed_dataset, nb_files=None, separated_sources=False):
log = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
audio_weak_ss = None
audio_unlabel_ss = None
audio_validation_ss = None
audio_synthetic_ss = None
if separated_sources:
audio_weak_ss = cfg.weak_ss
audio_unlabel_ss = cfg.unlabel_ss
audio_validation_ss = cfg.validation_ss
audio_synthetic_ss = cfg.synthetic_ss
weak_df = desed_dataset.initialize_and_get_df(cfg.weak, audio_dir_ss=audio_weak_ss, nb_files=nb_files)
unlabel_df = desed_dataset.initialize_and_get_df(cfg.unlabel, audio_dir_ss=audio_unlabel_ss, nb_files=nb_files)
# Event if synthetic not used for training, used on validation purpose
synthetic_df = desed_dataset.initialize_and_get_df(cfg.synthetic, audio_dir_ss=audio_synthetic_ss,
nb_files=nb_files, download=False)
log.debug(f"synthetic: {synthetic_df.head()}")
validation_df = desed_dataset.initialize_and_get_df(cfg.validation, audio_dir=cfg.audio_validation_dir,
audio_dir_ss=audio_validation_ss, nb_files=nb_files)
# Divide synthetic in train and valid
filenames_train = synthetic_df.filename.drop_duplicates().sample(frac=0.8, random_state=26)
train_synth_df = synthetic_df[synthetic_df.filename.isin(filenames_train)]
valid_synth_df = synthetic_df.drop(train_synth_df.index).reset_index(drop=True)
# Put train_synth in frames so many_hot_encoder can work.
# Not doing it for valid, because not using labels (when prediction) and event based metric expect sec.
train_synth_df.onset = train_synth_df.onset * cfg.sample_rate // cfg.hop_size // pooling_time_ratio
train_synth_df.offset = train_synth_df.offset * cfg.sample_rate // cfg.hop_size // pooling_time_ratio
log.debug(valid_synth_df.event_label.value_counts())
data_dfs = {"weak": weak_df,
"unlabel": unlabel_df,
"synthetic": synthetic_df,
"train_synthetic": train_synth_df,
"valid_synthetic": valid_synth_df,
"validation": validation_df,
}
return data_dfs
if __name__ == '__main__':
torch.manual_seed(2020)
np.random.seed(2020)
logger = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
logger.info("Baseline 2020")
logger.info(f"Starting time: {datetime.datetime.now()}")
parser = argparse.ArgumentParser(description="")
parser.add_argument("-s", '--subpart_data', type=int, default=None, dest="subpart_data",
help="Number of files to be used. Useful when testing on small number of files.")
parser.add_argument("-n", '--no_synthetic', dest='no_synthetic', action='store_true', default=False,
help="Not using synthetic labels during training")
f_args = parser.parse_args()
pprint(vars(f_args))
reduced_number_of_data = f_args.subpart_data
no_synthetic = f_args.no_synthetic
if no_synthetic:
add_dir_model_name = "_no_synthetic"
else:
add_dir_model_name = "_with_synthetic"
store_dir = os.path.join("stored_data", "MeanTeacher_CRNN_bce4")
saved_model_dir = os.path.join(store_dir, "model")
saved_pred_dir = os.path.join(store_dir, "predictions")
if os.path.exists(store_dir):
if os.path.exists(saved_model_dir):
load_flag = True
else:
load_flag = False
os.makedirs(saved_model_dir, exist_ok=True)
os.makedirs(saved_pred_dir, exist_ok=True)
else:
load_flag = False
os.makedirs(store_dir, exist_ok=True)
os.makedirs(saved_model_dir, exist_ok=True)
os.makedirs(saved_pred_dir, exist_ok=True)
n_channel = 1
add_axis_conv = 0
# Model taken from 2nd of dcase19 challenge: see Delphin-Poulat2019 in the results.
n_layers = 7
crnn_kwargs = {"n_in_channel": n_channel, "nclass": len(cfg.classes), "attention": True, "n_RNN_cell": 128,
"n_layers_RNN": 2,
"activation": "glu",
"dropout": 0.5,
"kernel_size": n_layers * [3], "padding": n_layers * [1], "stride": n_layers * [1],
"nb_filters": [16, 32, 64, 128, 128, 128, 128],
"pooling": [[2, 2], [2, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2]]}
pooling_time_ratio = 4 # 2 * 2
out_nb_frames_1s = cfg.sample_rate / cfg.hop_size / pooling_time_ratio
median_window = max(int(cfg.median_window_s * out_nb_frames_1s), 1)
logger.debug(f"median_window: {median_window}")
# ##############
# DATA
# ##############
dataset = DESED(base_feature_dir=os.path.join(cfg.workspace, "dataset", "features"),
compute_log=False)
dfs = get_dfs(dataset, reduced_number_of_data)
# Meta path for psds
durations_synth = get_durations_df(cfg.synthetic)
many_hot_encoder = ManyHotEncoder(cfg.classes, n_frames=cfg.max_frames // pooling_time_ratio)
encod_func = many_hot_encoder.encode_strong_df
# Normalisation per audio or on the full dataset
if cfg.scaler_type == "dataset":
transforms = get_transforms(cfg.max_frames, add_axis=add_axis_conv)
weak_data = DataLoadDf(dfs["weak"], encod_func, transforms)
unlabel_data = DataLoadDf(dfs["unlabel"], encod_func, transforms)
train_synth_data = DataLoadDf(dfs["train_synthetic"], encod_func, transforms)
scaler_args = []
scaler = Scaler()
# # Only on real data since that's our final goal and test data are real
scaler.calculate_scaler(ConcatDataset([weak_data, unlabel_data, train_synth_data]))
logger.debug(f"scaler mean: {scaler.mean_}")
else:
scaler_args = ["global", "min-max"]
scaler = ScalerPerAudio(*scaler_args)
transforms = get_transforms(cfg.max_frames, scaler, add_axis_conv,
noise_dict_params={"mean": 0., "snr": cfg.noise_snr})
transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
weak_data = DataLoadDf(dfs["weak"], encod_func, transforms, in_memory=cfg.in_memory)
unlabel_data = DataLoadDf(dfs["unlabel"], encod_func, transforms, in_memory=cfg.in_memory_unlab)
train_synth_data = DataLoadDf(dfs["train_synthetic"], encod_func, transforms, in_memory=cfg.in_memory)
valid_synth_data = DataLoadDf(dfs["valid_synthetic"], encod_func, transforms_valid,
return_indexes=True, in_memory=cfg.in_memory)
logger.debug(f"len synth: {len(train_synth_data)}, len_unlab: {len(unlabel_data)}, len weak: {len(weak_data)}")
if not no_synthetic:
list_dataset = [weak_data, unlabel_data, train_synth_data]
batch_sizes = [cfg.batch_size//4, cfg.batch_size//2, cfg.batch_size//4]
strong_mask = slice((3*cfg.batch_size)//4, cfg.batch_size)
else:
list_dataset = [weak_data, unlabel_data]
batch_sizes = [cfg.batch_size // 4, 3 * cfg.batch_size // 4]
strong_mask = None
weak_mask = slice(batch_sizes[0]) # Assume weak data is always the first one
concat_dataset = ConcatDataset(list_dataset)
sampler = MultiStreamBatchSampler(concat_dataset, batch_sizes=batch_sizes)
training_loader = DataLoader(concat_dataset, batch_sampler=sampler)
valid_synth_loader = DataLoader(valid_synth_data, batch_size=cfg.batch_size)
# ##############
# Model
# ##############
if load_flag:
mlist = os.listdir(saved_model_dir)
modelName = mlist[-1]
n_epoch = np.int(modelName.split('_')[-1]) + 1
model_fname = os.path.join(saved_model_dir, modelName)
state = torch.load(model_fname)
crnn = _load_model(state, 'crnn')
logger.info(f"training model: {model_fname}, epoch: {state['epoch']}")
crnn_ema = _load_model(state, 'crnn')
for param in crnn_ema.parameters():
param.detach()
optim_kwargs = state['optimizer']["kwargs"]
optim = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn.parameters()), **optim_kwargs)
else:
n_epoch = 0
crnn = CRNN(**crnn_kwargs)
pytorch_total_params = sum(p.numel() for p in crnn.parameters() if p.requires_grad)
logger.info(crnn)
logger.info("number of parameters in the model: {}".format(pytorch_total_params))
crnn.apply(weights_init)
crnn_ema = CRNN(**crnn_kwargs)
crnn_ema.apply(weights_init)
for param in crnn_ema.parameters():
param.detach_()
optim_kwargs = {"lr": cfg.default_learning_rate, "betas": (0.9, 0.999)}
optim = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn.parameters()), **optim_kwargs)
bce_loss = nn.BCELoss()
state = {
'model': {"name": crnn.__class__.__name__,
'args': '',
"kwargs": crnn_kwargs,
'state_dict': crnn.state_dict()},
'model_ema': {"name": crnn_ema.__class__.__name__,
'args': '',
"kwargs": crnn_kwargs,
'state_dict': crnn_ema.state_dict()},
'optimizer': {"name": optim.__class__.__name__,
'args': '',
"kwargs": optim_kwargs,
'state_dict': optim.state_dict()},
"pooling_time_ratio": pooling_time_ratio,
"scaler": {
"type": type(scaler).__name__,
"args": scaler_args,
"state_dict": scaler.state_dict()},
"many_hot_encoder": many_hot_encoder.state_dict(),
"median_window": median_window,
"desed": dataset.state_dict()
}
save_best_cb = SaveBest("sup")
if cfg.early_stopping is not None:
early_stopping_call = EarlyStopping(patience=cfg.early_stopping, val_comp="sup", init_patience=cfg.es_init_wait)
# ##############
# Train
# ##############
results = pd.DataFrame(columns=["loss", "valid_synth_f1", "weak_metric", "global_valid"])
for epoch in range(cfg.n_epoch):
crnn.train()
crnn_ema.train()
crnn, crnn_ema = to_cuda_if_available(crnn, crnn_ema)
loss_value = train(training_loader, crnn, optim, epoch,
ema_model=crnn_ema, mask_weak=weak_mask, mask_strong=strong_mask, adjust_lr=cfg.adjust_lr)
# Validation
crnn = crnn.eval()
logger.info("\n ### Valid synthetic metric ### \n")
predictions = get_predictions(crnn, valid_synth_loader, many_hot_encoder.decode_strong, pooling_time_ratio,
median_window=median_window, save_predictions=None)
# Validation with synthetic data (dropping feature_filename for psds)
valid_synth = dfs["valid_synthetic"].drop("feature_filename", axis=1)
valid_synth_f1, psds_m_f1 = compute_metrics(predictions, valid_synth, durations_synth)
# Update state
state['model']['state_dict'] = crnn.state_dict()
state['model_ema']['state_dict'] = crnn_ema.state_dict()
state['optimizer']['state_dict'] = optim.state_dict()
state['epoch'] = epoch
state['valid_metric'] = valid_synth_f1
state['valid_f1_psds'] = psds_m_f1
# Callbacks
if cfg.checkpoint_epochs is not None and (epoch + 1) % cfg.checkpoint_epochs == 0:
model_fname = os.path.join(saved_model_dir, "baseline_epoch_" + str(epoch))
torch.save(state, model_fname)
if cfg.save_best:
if save_best_cb.apply(valid_synth_f1):
model_fname = os.path.join(saved_model_dir, "baseline_best")
torch.save(state, model_fname)
results.loc[epoch, "global_valid"] = valid_synth_f1
results.loc[epoch, "loss"] = loss_value.item()
results.loc[epoch, "valid_synth_f1"] = valid_synth_f1
if cfg.early_stopping:
if early_stopping_call.apply(valid_synth_f1):
logger.warn("EARLY STOPPING")
break
if cfg.save_best:
model_fname = os.path.join(saved_model_dir, "baseline_best")
state = torch.load(model_fname)
crnn = _load_model(state, 'crnn')
logger.info(f"testing model: {model_fname}, epoch: {state['epoch']}")
else:
logger.info("testing model of last epoch: {}".format(cfg.n_epoch))
results_df = pd.DataFrame(results).to_csv(os.path.join(saved_pred_dir, "results.tsv"),
sep="\t", index=False, float_format="%.4f")
# ##############
# Validation
# ##############
crnn.eval()
transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
predicitons_fname = os.path.join(saved_pred_dir, "baseline_validation.tsv")
validation_data = DataLoadDf(dfs["validation"], encod_func, transform=transforms_valid, return_indexes=True)
validation_dataloader = DataLoader(validation_data, batch_size=cfg.batch_size, shuffle=False, drop_last=False)
validation_labels_df = dfs["validation"].drop("feature_filename", axis=1)
durations_validation = get_durations_df(cfg.validation, cfg.audio_validation_dir)
# Preds with only one value
valid_predictions = get_predictions(crnn, validation_dataloader, many_hot_encoder.decode_strong,
pooling_time_ratio, median_window=median_window,
save_predictions=predicitons_fname)
compute_metrics(valid_predictions, validation_labels_df, durations_validation)
# ##########
# Optional but recommended
# ##########
# Compute psds scores with multiple thresholds (more accurate). n_thresholds could be increased.
n_thresholds = 50
# Example of 5 thresholds: 0.1, 0.3, 0.5, 0.7, 0.9
list_thresholds = np.arange(1 / (n_thresholds * 2), 1, 1 / n_thresholds)
pred_ss_thresh = get_predictions(crnn, validation_dataloader, many_hot_encoder.decode_strong,
pooling_time_ratio, thresholds=list_thresholds, median_window=median_window,
save_predictions=predicitons_fname)
psds = compute_psds_from_operating_points(pred_ss_thresh, validation_labels_df, durations_validation)
psds_score(psds, filename_roc_curves=os.path.join(saved_pred_dir, "figures/psds_roc.png"))
| 22,648 | 47.189362 | 120 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/main_ICT_model.py | # -*- coding: utf-8 -*-
import argparse
import datetime
import inspect
import os
import time
from pprint import pprint
import pandas as pd
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch import nn
from data_utils.Desed import DESED
from data_utils.DataLoad import DataLoadDf, ConcatDataset, MultiStreamBatchSampler
from TestModel import _load_model
from evaluation_measures import get_predictions, psds_score, compute_psds_from_operating_points, compute_metrics
from models.CRNN import CRNN
import config as cfg
from utilities import ramps
from utilities.Logger import create_logger
from utilities.Scaler import ScalerPerAudio, Scaler
from utilities.utils import SaveBest, to_cuda_if_available, weights_init, AverageMeterSet, EarlyStopping, \
get_durations_df
from utilities.ManyHotEncoder import ManyHotEncoder
from utilities.Transforms import get_transforms
def adjust_learning_rate(optimizer, rampup_value, rampdown_value=1):
""" adjust the learning rate
Args:
optimizer: torch.Module, the optimizer to be updated
rampup_value: float, the float value between 0 and 1 that should increases linearly
rampdown_value: float, the float between 1 and 0 that should decrease linearly
Returns:
"""
# LR warm-up to handle large minibatch sizes from https://arxiv.org/abs/1706.02677
# We commented parts on betas and weight decay to match 2nd system of last year from Orange
lr = rampup_value * rampdown_value * cfg.max_learning_rate
# beta1 = rampdown_value * cfg.beta1_before_rampdown + (1. - rampdown_value) * cfg.beta1_after_rampdown
# beta2 = (1. - rampup_value) * cfg.beta2_during_rampdup + rampup_value * cfg.beta2_after_rampup
# weight_decay = (1 - rampup_value) * cfg.weight_decay_during_rampup + cfg.weight_decay_after_rampup * rampup_value
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# param_group['betas'] = (beta1, beta2)
# param_group['weight_decay'] = weight_decay
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_params, params in zip(ema_model.parameters(), model.parameters()):
ema_params.data.mul_(alpha).add_(1 - alpha, params.data)
def train(train_loader, model, optimizer, c_epoch, ema_model=None, mask_weak=None, mask_strong=None, adjust_lr=False):
""" One epoch of a Mean Teacher model
Args:
train_loader: torch.utils.data.DataLoader, iterator of training batches for an epoch.
Should return a tuple: ((teacher input, student input), labels)
model: torch.Module, model to be trained, should return a weak and strong prediction
optimizer: torch.Module, optimizer used to train the model
c_epoch: int, the current epoch of training
ema_model: torch.Module, student model, should return a weak and strong prediction
mask_weak: slice or list, mask the batch to get only the weak labeled data (used to calculate the loss)
mask_strong: slice or list, mask the batch to get only the strong labeled data (used to calcultate the loss)
adjust_lr: bool, Whether or not to adjust the learning rate during training (params in config)
"""
log = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
class_criterion = nn.BCELoss()
consistency_criterion = nn.MSELoss()
class_criterion, consistency_criterion = to_cuda_if_available(class_criterion, consistency_criterion)
meters = AverageMeterSet()
log.debug("Nb batches: {}".format(len(train_loader)))
start = time.time()
for i, ((batch_input, ema_batch_input), target) in enumerate(train_loader):
global_step = c_epoch * len(train_loader) + i
rampup_value = ramps.exp_rampup(global_step, cfg.n_epoch_rampup*len(train_loader))
if adjust_lr:
adjust_learning_rate(optimizer, rampup_value)
meters.update('lr', optimizer.param_groups[0]['lr'])
batch_input, ema_batch_input, target = to_cuda_if_available(batch_input, ema_batch_input, target)
# Outputs
strong_pred_ema, weak_pred_ema = ema_model(ema_batch_input)
strong_pred_ema = strong_pred_ema.detach()
weak_pred_ema = weak_pred_ema.detach()
strong_pred, weak_pred = model(batch_input)
# core for Interpolation Consistency Training (ICT)
#this version: about 36.55 % in validation
n_unlabeled = int(3*cfg.batch_size/4)
unlabeled_data = batch_input[:n_unlabeled]
strong_prediction, weak_prediction = ema_model(unlabeled_data)
mixed_unlabeled_data = []
mixed_strong_plabel = []
mixed_weak_plabel = []
idx = np.arange(n_unlabeled)
for iter in range(n_unlabeled):
lambda_ = torch.rand(1).cuda()
np.random.shuffle(idx)
idx1 = idx[0]
idx2 = idx[1]
mixed = lambda_*unlabeled_data[idx1] + (1.0-lambda_)*unlabeled_data[idx2]
mixed_unlabeled_data.append(mixed)
spred = lambda_*strong_prediction[idx1] + (1.0-lambda_)*strong_prediction[idx2]
mixed_strong_plabel.append(spred)
wpred = lambda_*weak_prediction[idx1] + (1.0-lambda_)*weak_prediction[idx2]
mixed_weak_plabel.append(wpred)
mixed_unlabeled_data = torch.cat(mixed_unlabeled_data, dim=0)
mixed_unlabeled_data = torch.reshape(mixed_unlabeled_data, (n_unlabeled, 1, 628, 128)).cuda()
mixed_strong_plabel = torch.cat(mixed_strong_plabel, dim=0)
mixed_strong_plabel = torch.reshape(mixed_strong_plabel, (n_unlabeled, 157, 10)).cuda()
mixed_weak_plabel = torch.cat(mixed_weak_plabel, dim=0)
mixed_weak_plabel = torch.reshape(mixed_weak_plabel, (n_unlabeled,10)).cuda()
'''#this version is equal to add noise with random SNR depending on lambda_
n_unlabeled = int(3*cfg.batch_size/4) # mask for unlabeled and weakly labeled data
unlabeled_data1 = batch_input[:n_unlabeled]
unlabeled_data2 = ema_batch_input[:n_unlabeled]
strong_prediction1, weak_prediction1 = ema_model(unlabeled_data1)
strong_prediction2, weak_prediction2 = ema_model(unlabeled_data2)
lambda_ = torch.rand(1).cuda()
mixed_unlabeled_data = lambda_*unlabeled_data1 + (1.0-lambda_)*unlabeled_data2
mixed_strong_plabel = lambda_*strong_prediction1 + (1.0-lambda_)*strong_prediction2
mixed_weak_plabel = lambda_*weak_prediction1 + (1.0-lambda_)*weak_prediction2
'''
strong_prediction_mixed, weak_prediction_mixed = model(mixed_unlabeled_data)
#sample = target[mask_strong].sum(2)
#sample = sample.cpu().numpy()
#print(np.where(sample[-1,:]>1))
loss = None
# Weak BCE Loss
target_weak = target.max(-2)[0] # Take the max in the time axis
if mask_weak is not None:
weak_class_loss = class_criterion(weak_pred[mask_weak], target_weak[mask_weak])
ema_class_loss = class_criterion(weak_pred_ema[mask_weak], target_weak[mask_weak])
loss = weak_class_loss
if i == 0:
log.debug(f"target: {target.mean(-2)} \n Target_weak: {target_weak} \n "
f"Target weak mask: {target_weak[mask_weak]} \n "
f"Target strong mask: {target[mask_strong].sum(-2)}\n"
f"weak loss: {weak_class_loss} \t rampup_value: {rampup_value}"
f"tensor mean: {batch_input.mean()}")
meters.update('weak_class_loss', weak_class_loss.item())
meters.update('Weak EMA loss', ema_class_loss.item())
# Strong BCE loss
if mask_strong is not None:
strong_class_loss = class_criterion(strong_pred[mask_strong], target[mask_strong])
meters.update('Strong loss', strong_class_loss.item())
strong_ema_class_loss = class_criterion(strong_pred_ema[mask_strong], target[mask_strong])
meters.update('Strong EMA loss', strong_ema_class_loss.item())
if loss is not None:
loss += strong_class_loss
else:
loss = strong_class_loss
# Teacher-student consistency cost
if ema_model is not None:
consistency_cost = cfg.max_consistency_cost * rampup_value
meters.update('Consistency weight', consistency_cost)
# Take consistency about strong predictions (all data)
consistency_loss_strong = consistency_cost * consistency_criterion(strong_prediction_mixed, mixed_strong_plabel)
meters.update('Consistency strong', consistency_loss_strong.item())
if loss is not None:
loss += consistency_loss_strong
else:
loss = consistency_loss_strong
meters.update('Consistency weight', consistency_cost)
# Take consistency about weak predictions (all data)
consistency_loss_weak = consistency_cost * consistency_criterion(weak_prediction_mixed, mixed_weak_plabel)
meters.update('Consistency weak', consistency_loss_weak.item())
if loss is not None:
loss += consistency_loss_weak
else:
loss = consistency_loss_weak
assert not (np.isnan(loss.item()) or loss.item() > 1e5), 'Loss explosion: {}'.format(loss.item())
assert not loss.item() < 0, 'Loss problem, cannot be negative'
meters.update('Loss', loss.item())
# compute gradient and do optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
global_step += 1
if ema_model is not None:
update_ema_variables(model, ema_model, 0.999, global_step)
epoch_time = time.time() - start
log.info(f"Epoch: {c_epoch}\t Time {epoch_time:.2f}\t {meters}")
return loss
def get_dfs(desed_dataset, nb_files=None, separated_sources=False):
log = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
audio_weak_ss = None
audio_unlabel_ss = None
audio_validation_ss = None
audio_synthetic_ss = None
if separated_sources:
audio_weak_ss = cfg.weak_ss
audio_unlabel_ss = cfg.unlabel_ss
audio_validation_ss = cfg.validation_ss
audio_synthetic_ss = cfg.synthetic_ss
weak_df = desed_dataset.initialize_and_get_df(cfg.weak, audio_dir_ss=audio_weak_ss, nb_files=nb_files)
unlabel_df = desed_dataset.initialize_and_get_df(cfg.unlabel, audio_dir_ss=audio_unlabel_ss, nb_files=nb_files)
# Event if synthetic not used for training, used on validation purpose
synthetic_df = desed_dataset.initialize_and_get_df(cfg.synthetic, audio_dir_ss=audio_synthetic_ss,
nb_files=nb_files, download=False)
log.debug(f"synthetic: {synthetic_df.head()}")
validation_df = desed_dataset.initialize_and_get_df(cfg.validation, audio_dir=cfg.audio_validation_dir,
audio_dir_ss=audio_validation_ss, nb_files=nb_files)
# Divide synthetic in train and valid
filenames_train = synthetic_df.filename.drop_duplicates().sample(frac=0.8, random_state=26)
train_synth_df = synthetic_df[synthetic_df.filename.isin(filenames_train)]
valid_synth_df = synthetic_df.drop(train_synth_df.index).reset_index(drop=True)
# Put train_synth in frames so many_hot_encoder can work.
# Not doing it for valid, because not using labels (when prediction) and event based metric expect sec.
train_synth_df.onset = train_synth_df.onset * cfg.sample_rate // cfg.hop_size // pooling_time_ratio
train_synth_df.offset = train_synth_df.offset * cfg.sample_rate // cfg.hop_size // pooling_time_ratio
log.debug(valid_synth_df.event_label.value_counts())
data_dfs = {"weak": weak_df,
"unlabel": unlabel_df,
"synthetic": synthetic_df,
"train_synthetic": train_synth_df,
"valid_synthetic": valid_synth_df,
"validation": validation_df,
}
return data_dfs
if __name__ == '__main__':
torch.manual_seed(2020)
np.random.seed(2020)
logger = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
logger.info("Baseline 2020")
logger.info(f"Starting time: {datetime.datetime.now()}")
parser = argparse.ArgumentParser(description="")
parser.add_argument("-s", '--subpart_data', type=int, default=None, dest="subpart_data",
help="Number of files to be used. Useful when testing on small number of files.")
parser.add_argument("-n", '--no_synthetic', dest='no_synthetic', action='store_true', default=False,
help="Not using synthetic labels during training")
f_args = parser.parse_args()
pprint(vars(f_args))
reduced_number_of_data = f_args.subpart_data
no_synthetic = f_args.no_synthetic
if no_synthetic:
add_dir_model_name = "_no_synthetic"
else:
add_dir_model_name = "_with_synthetic"
store_dir = os.path.join("stored_data", "MeanTeacher_with_ICT5")
saved_model_dir = os.path.join(store_dir, "model")
saved_pred_dir = os.path.join(store_dir, "predictions")
if os.path.exists(store_dir):
if os.path.exists(saved_model_dir):
load_flag = True
else:
load_flag = False
os.makedirs(saved_model_dir, exist_ok=True)
os.makedirs(saved_pred_dir, exist_ok=True)
else:
load_flag = False
os.makedirs(store_dir, exist_ok=True)
os.makedirs(saved_model_dir, exist_ok=True)
os.makedirs(saved_pred_dir, exist_ok=True)
n_channel = 1
add_axis_conv = 0
# Model taken from 2nd of dcase19 challenge: see Delphin-Poulat2019 in the results.
n_layers = 7
crnn_kwargs = {"n_in_channel": n_channel, "nclass": len(cfg.classes), "attention": True, "n_RNN_cell": 128,
"n_layers_RNN": 2,
"activation": "glu",
"dropout": 0.5,
"kernel_size": n_layers * [3], "padding": n_layers * [1], "stride": n_layers * [1],
"nb_filters": [16, 32, 64, 128, 128, 128, 128],
"pooling": [[2, 2], [2, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2]]}
pooling_time_ratio = 4 # 2 * 2
out_nb_frames_1s = cfg.sample_rate / cfg.hop_size / pooling_time_ratio
median_window = max(int(cfg.median_window_s * out_nb_frames_1s), 1)
logger.debug(f"median_window: {median_window}")
# ##############
# DATA
# ##############
dataset = DESED(base_feature_dir=os.path.join(cfg.workspace, "dataset", "features"),
compute_log=False)
dfs = get_dfs(dataset, reduced_number_of_data)
# Meta path for psds
durations_synth = get_durations_df(cfg.synthetic)
many_hot_encoder = ManyHotEncoder(cfg.classes, n_frames=cfg.max_frames // pooling_time_ratio)
encod_func = many_hot_encoder.encode_strong_df
# Normalisation per audio or on the full dataset
if cfg.scaler_type == "dataset":
transforms = get_transforms(cfg.max_frames, add_axis=add_axis_conv)
weak_data = DataLoadDf(dfs["weak"], encod_func, transforms)
unlabel_data = DataLoadDf(dfs["unlabel"], encod_func, transforms)
train_synth_data = DataLoadDf(dfs["train_synthetic"], encod_func, transforms)
scaler_args = []
scaler = Scaler()
# # Only on real data since that's our final goal and test data are real
scaler.calculate_scaler(ConcatDataset([weak_data, unlabel_data, train_synth_data]))
logger.debug(f"scaler mean: {scaler.mean_}")
else:
scaler_args = ["global", "min-max"]
scaler = ScalerPerAudio(*scaler_args)
transforms = get_transforms(cfg.max_frames, scaler, add_axis_conv,
noise_dict_params={"mean": 0., "snr": cfg.noise_snr})
transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
weak_data = DataLoadDf(dfs["weak"], encod_func, transforms, in_memory=cfg.in_memory)
unlabel_data = DataLoadDf(dfs["unlabel"], encod_func, transforms, in_memory=cfg.in_memory_unlab)
train_synth_data = DataLoadDf(dfs["train_synthetic"], encod_func, transforms, in_memory=cfg.in_memory)
valid_synth_data = DataLoadDf(dfs["valid_synthetic"], encod_func, transforms_valid,
return_indexes=True, in_memory=cfg.in_memory)
logger.debug(f"len synth: {len(train_synth_data)}, len_unlab: {len(unlabel_data)}, len weak: {len(weak_data)}")
if not no_synthetic:
list_dataset = [weak_data, unlabel_data, train_synth_data]
batch_sizes = [cfg.batch_size//4, cfg.batch_size//2, cfg.batch_size//4]
strong_mask = slice((3*cfg.batch_size)//4, cfg.batch_size)
else:
list_dataset = [weak_data, unlabel_data]
batch_sizes = [cfg.batch_size // 4, 3 * cfg.batch_size // 4]
strong_mask = None
weak_mask = slice(batch_sizes[0]) # Assume weak data is always the first one
concat_dataset = ConcatDataset(list_dataset)
sampler = MultiStreamBatchSampler(concat_dataset, batch_sizes=batch_sizes)
training_loader = DataLoader(concat_dataset, batch_sampler=sampler)
valid_synth_loader = DataLoader(valid_synth_data, batch_size=cfg.batch_size)
# ##############
# Model
# ##############
if load_flag:
mlist = os.listdir(saved_model_dir)
modelName = mlist[-1]
n_epoch = np.int(modelName.split('_')[-1]) + 1
model_fname = os.path.join(saved_model_dir, modelName)
state = torch.load(model_fname)
crnn = _load_model(state, 'crnn')
logger.info(f"training model: {model_fname}, epoch: {state['epoch']}")
crnn_ema = _load_model(state, 'crnn')
for param in crnn_ema.parameters():
param.detach()
optim_kwargs = state['optimizer']["kwargs"]
optim = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn.parameters()), **optim_kwargs)
else:
n_epoch = 0
crnn = CRNN(**crnn_kwargs)
pytorch_total_params = sum(p.numel() for p in crnn.parameters() if p.requires_grad)
logger.info(crnn)
logger.info("number of parameters in the model: {}".format(pytorch_total_params))
crnn.apply(weights_init)
crnn_ema = CRNN(**crnn_kwargs)
crnn_ema.apply(weights_init)
for param in crnn_ema.parameters():
param.detach_()
optim_kwargs = {"lr": cfg.default_learning_rate, "betas": (0.9, 0.999)}
optim = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn.parameters()), **optim_kwargs)
bce_loss = nn.BCELoss()
state = {
'model': {"name": crnn.__class__.__name__,
'args': '',
"kwargs": crnn_kwargs,
'state_dict': crnn.state_dict()},
'model_ema': {"name": crnn_ema.__class__.__name__,
'args': '',
"kwargs": crnn_kwargs,
'state_dict': crnn_ema.state_dict()},
'optimizer': {"name": optim.__class__.__name__,
'args': '',
"kwargs": optim_kwargs,
'state_dict': optim.state_dict()},
"pooling_time_ratio": pooling_time_ratio,
"scaler": {
"type": type(scaler).__name__,
"args": scaler_args,
"state_dict": scaler.state_dict()},
"many_hot_encoder": many_hot_encoder.state_dict(),
"median_window": median_window,
"desed": dataset.state_dict()
}
save_best_cb = SaveBest("sup")
if cfg.early_stopping is not None:
early_stopping_call = EarlyStopping(patience=cfg.early_stopping, val_comp="sup", init_patience=cfg.es_init_wait)
# ##############
# Train
# ##############
results = pd.DataFrame(columns=["loss", "valid_synth_f1", "weak_metric", "global_valid"])
for epoch in range(cfg.n_epoch):
crnn.train()
crnn_ema.train()
crnn, crnn_ema = to_cuda_if_available(crnn, crnn_ema)
loss_value = train(training_loader, crnn, optim, epoch,
ema_model=crnn_ema, mask_weak=weak_mask, mask_strong=strong_mask, adjust_lr=cfg.adjust_lr)
# Validation
crnn = crnn.eval()
logger.info("\n ### Valid synthetic metric ### \n")
predictions = get_predictions(crnn, valid_synth_loader, many_hot_encoder.decode_strong, pooling_time_ratio,
median_window=median_window, save_predictions=None)
# Validation with synthetic data (dropping feature_filename for psds)
valid_synth = dfs["valid_synthetic"].drop("feature_filename", axis=1)
valid_synth_f1, psds_m_f1 = compute_metrics(predictions, valid_synth, durations_synth)
# Update state
state['model']['state_dict'] = crnn.state_dict()
state['model_ema']['state_dict'] = crnn_ema.state_dict()
state['optimizer']['state_dict'] = optim.state_dict()
state['epoch'] = epoch
state['valid_metric'] = valid_synth_f1
state['valid_f1_psds'] = psds_m_f1
# Callbacks
if cfg.checkpoint_epochs is not None and (epoch + 1) % cfg.checkpoint_epochs == 0:
model_fname = os.path.join(saved_model_dir, "baseline_epoch_" + str(epoch))
torch.save(state, model_fname)
if cfg.save_best:
if save_best_cb.apply(valid_synth_f1):
model_fname = os.path.join(saved_model_dir, "baseline_best")
torch.save(state, model_fname)
results.loc[epoch, "global_valid"] = valid_synth_f1
results.loc[epoch, "loss"] = loss_value.item()
results.loc[epoch, "valid_synth_f1"] = valid_synth_f1
if cfg.early_stopping:
if early_stopping_call.apply(valid_synth_f1):
logger.warn("EARLY STOPPING")
break
if cfg.save_best:
model_fname = os.path.join(saved_model_dir, "baseline_best")
state = torch.load(model_fname)
crnn = _load_model(state, 'crnn')
logger.info(f"testing model: {model_fname}, epoch: {state['epoch']}")
else:
logger.info("testing model of last epoch: {}".format(cfg.n_epoch))
results_df = pd.DataFrame(results).to_csv(os.path.join(saved_pred_dir, "results.tsv"),
sep="\t", index=False, float_format="%.4f")
# ##############
# Validation
# ##############
crnn.eval()
transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
predicitons_fname = os.path.join(saved_pred_dir, "baseline_validation.tsv")
validation_data = DataLoadDf(dfs["validation"], encod_func, transform=transforms_valid, return_indexes=True)
validation_dataloader = DataLoader(validation_data, batch_size=cfg.batch_size, shuffle=False, drop_last=False)
validation_labels_df = dfs["validation"].drop("feature_filename", axis=1)
durations_validation = get_durations_df(cfg.validation, cfg.audio_validation_dir)
# Preds with only one value
valid_predictions = get_predictions(crnn, validation_dataloader, many_hot_encoder.decode_strong,
pooling_time_ratio, median_window=median_window,
save_predictions=predicitons_fname)
compute_metrics(valid_predictions, validation_labels_df, durations_validation)
# ##########
# Optional but recommended
# ##########
# Compute psds scores with multiple thresholds (more accurate). n_thresholds could be increased.
n_thresholds = 50
# Example of 5 thresholds: 0.1, 0.3, 0.5, 0.7, 0.9
list_thresholds = np.arange(1 / (n_thresholds * 2), 1, 1 / n_thresholds)
pred_ss_thresh = get_predictions(crnn, validation_dataloader, many_hot_encoder.decode_strong,
pooling_time_ratio, thresholds=list_thresholds, median_window=median_window,
save_predictions=predicitons_fname)
psds = compute_psds_from_operating_points(pred_ss_thresh, validation_labels_df, durations_validation)
psds_score(psds, filename_roc_curves=os.path.join(saved_pred_dir, "figures/psds_roc.png"))
| 24,772 | 47.57451 | 124 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/TestModel.py | # -*- coding: utf-8 -*-
import argparse
import os.path as osp
import torch
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
from data_utils.DataLoad import DataLoadDf
from data_utils.Desed import DESED
from evaluation_measures import psds_score, get_predictions_v2, \
compute_psds_from_operating_points, compute_metrics
from utilities.utils import to_cuda_if_available, generate_tsv_wav_durations, meta_path_to_audio_dir
from utilities.ManyHotEncoder import ManyHotEncoder
from utilities.Transforms import get_transforms
from utilities.Logger import create_logger
from utilities.Scaler import Scaler, ScalerPerAudio
from models.CRNN import CRNN
from models.Transformer import Transformer
from models.Conformer_bk import Conformer
import config as cfg
logger = create_logger(__name__)
torch.manual_seed(2020)
def _load_model(state, model_type, model_name="model"):
model_args = state[model_name]["args"]
model_kwargs = state[model_name]["kwargs"]
if model_type is 'crnn':
model = CRNN(*model_args, **model_kwargs)
elif model_type is 'transformer':
model = Transformer(*model_args, **model_kwargs)
elif model_type is 'conformer':
model = Conformer(*model_args, **model_kwargs)
model.load_state_dict(state[model_name]["state_dict"])
model.eval()
model = to_cuda_if_available(model)
logger.info("Model loaded at epoch: {}".format(state["epoch"]))
logger.info(model)
return model
def _load_model_v2(state, model_id, model_type, model_name="model"):
model_args = state[model_name]["args"]
model_kwargs = state[model_name]["kwargs"]
if model_type is 'crnn':
model = CRNN(*model_args, **model_kwargs)
elif model_type is 'transformer':
model = Transformer(*model_args, **model_kwargs)
elif model_type is 'conformer':
model = Conformer(*model_args, **model_kwargs)
if model_id == 1:
model.load_state_dict(state[model_name]["state_dict1"])
elif model_id == 2:
model.load_state_dict(state[model_name]["state_dict2"])
model.eval()
model = to_cuda_if_available(model)
logger.info("Model loaded at epoch: {}".format(state["epoch"]))
logger.info(model)
return model
def _load_scaler(state):
scaler_state = state["scaler"]
type_sc = scaler_state["type"]
if type_sc == "ScalerPerAudio":
scaler = ScalerPerAudio(*scaler_state["args"])
elif type_sc == "Scaler":
scaler = Scaler()
else:
raise NotImplementedError("Not the right type of Scaler has been saved in state")
scaler.load_state_dict(state["scaler"]["state_dict"])
return scaler
def _load_state_vars(state, gtruth_df, median_win=None):
pred_df = gtruth_df.copy()
# Define dataloader
many_hot_encoder = ManyHotEncoder.load_state_dict(state["many_hot_encoder"])
scaler = _load_scaler(state)
model = _load_model(state, 'crnn')
transforms_valid = get_transforms(cfg.max_frames, scaler=scaler, add_axis=0)
strong_dataload = DataLoadDf(pred_df, many_hot_encoder.encode_strong_df, transforms_valid, return_indexes=True)
strong_dataloader_ind = DataLoader(strong_dataload, batch_size=cfg.batch_size, drop_last=False)
pooling_time_ratio = state["pooling_time_ratio"]
many_hot_encoder = ManyHotEncoder.load_state_dict(state["many_hot_encoder"])
if median_win is None:
median_win = state["median_window"]
return {
"model": model,
"dataloader": strong_dataloader_ind,
"pooling_time_ratio": pooling_time_ratio,
"many_hot_encoder": many_hot_encoder,
"median_window": median_win
}
def get_variables(args):
model_pth = args.model_path
gt_fname, ext = osp.splitext(args.groundtruth_tsv)
median_win = args.median_window
meta_gt = args.meta_gt
gt_audio_pth = args.groundtruth_audio_dir
if meta_gt is None:
meta_gt = gt_fname + "_durations" + ext
if gt_audio_pth is None:
gt_audio_pth = meta_path_to_audio_dir(gt_fname)
# Useful because of the data format
if "validation" in gt_audio_pth:
gt_audio_pth = osp.dirname(gt_audio_pth)
groundtruth = pd.read_csv(args.groundtruth_tsv, sep="\t")
if osp.exists(meta_gt):
meta_dur_df = pd.read_csv(meta_gt, sep='\t')
if len(meta_dur_df) == 0:
meta_dur_df = generate_tsv_wav_durations(gt_audio_pth, meta_gt)
else:
meta_dur_df = generate_tsv_wav_durations(gt_audio_pth, meta_gt)
return model_pth, median_win, gt_audio_pth, groundtruth, meta_dur_df
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument("-m", '--model_path', type=str, required=True,
help="Path of the model to be evaluated")
parser.add_argument("-g", '--groundtruth_tsv', type=str, required=True,
help="Path of the groundtruth tsv file")
# Not required after that, but recommended to defined
parser.add_argument("-mw", "--median_window", type=int, default=None,
help="Nb of frames for the median window, "
"if None the one defined for testing after training is used")
# Next groundtruth variable could be ommited if same organization than DESED dataset
parser.add_argument('--meta_gt', type=str, default=None,
help="Path of the groundtruth description of feat_filenames and durations")
parser.add_argument("-ga", '--groundtruth_audio_dir', type=str, default=None,
help="Path of the groundtruth filename, (see in config, at dataset folder)")
parser.add_argument("-s", '--save_predictions_path', type=str, default=None,
help="Path for the predictions to be saved (if needed)")
# Dev
parser.add_argument("-n", '--nb_files', type=int, default=None,
help="Number of files to be used. Useful when testing on small number of files.")
# Savepath for posterior
parser.add_argument("-sp", '--save_posterior', type=str, default=None,
help="Save path for posterior")
f_args = parser.parse_args()
# Get variables from f_args
model_path, median_window, gt_audio_dir, groundtruth, durations = get_variables(f_args)
# Model
expe_state = torch.load(model_path, map_location="cpu")
dataset = DESED(base_feature_dir=osp.join(cfg.workspace, "dataset", "features"), compute_log=False)
gt_df_feat = dataset.initialize_and_get_df(f_args.groundtruth_tsv, gt_audio_dir, nb_files=f_args.nb_files)
params = _load_state_vars(expe_state, gt_df_feat, median_window)
# Preds with only one value
single_predictions = get_predictions_v2(params["model"], params["dataloader"],
params["many_hot_encoder"].decode_strong, params["pooling_time_ratio"],
median_window=params["median_window"], save_dir = f_args.save_posterior,
save_predictions=f_args.save_predictions_path)
compute_metrics(single_predictions, groundtruth, durations)
'''
# ##########
# Optional but recommended
# ##########
# Compute psds scores with multiple thresholds (more accurate). n_thresholds could be increased.
n_thresholds = 50
# Example of 5 thresholds: 0.1, 0.3, 0.5, 0.7, 0.9
list_thresholds = np.arange(1 / (n_thresholds * 2), 1, 1 / n_thresholds)
pred_ss_thresh = get_predictions(params["model"], params["dataloader"],
params["many_hot_encoder"].decode_strong, params["pooling_time_ratio"],
thresholds=list_thresholds, median_window=params["median_window"],
save_predictions=f_args.save_predictions_path)
psds = compute_psds_from_operating_points(pred_ss_thresh, groundtruth, durations)
fname_roc = None
if f_args.save_predictions_path is not None:
fname_roc = osp.splitext(f_args.save_predictions_path)[0] + "_roc.png"
psds_score(psds, filename_roc_curves=fname_roc)
'''
| 8,195 | 40.604061 | 115 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/TestModel_ss_late_integration.py | # -*- coding: utf-8 -*-
import argparse
import os
import os.path as osp
import scipy
import torch
from dcase_util.data import ProbabilityEncoder
import pandas as pd
import numpy as np
from data_utils.DataLoad import DataLoadDf
from data_utils.Desed import DESED
from TestModel import _load_scaler, _load_crnn
from evaluation_measures import psds_score, compute_psds_from_operating_points, compute_metrics
from utilities.utils import to_cuda_if_available, generate_tsv_wav_durations, meta_path_to_audio_dir
from utilities.ManyHotEncoder import ManyHotEncoder
from utilities.Transforms import get_transforms
from utilities.Logger import create_logger
import config as cfg
logger = create_logger(__name__)
def norm_alpha(x, alpha_val):
return ((1 / x.shape[0]) * (x ** alpha_val).sum(0)) ** (1 / alpha_val)
def get_predictions_ss_late_integration(model, valid_dataload, decoder, pooling_time_ratio=1, thresholds=[0.5],
median_window=1, save_predictions=None, alpha=1):
""" Get the predictions of a trained model on a specific set
Args:
model: torch.Module, a trained pytorch model (you usually want it to be in .eval() mode).
valid_dataload: DataLoadDf, giving ((input_data, label), index) but label is not used here, the multiple
data are the multiple sources (the mixture should always be the first one to appear, and then the sources)
example: if the input data is (3, 1, timesteps, freq) there is the mixture and 2 sources.
decoder: function, takes a numpy.array of shape (time_steps, n_labels) as input and return a list of lists
of ("event_label", "onset", "offset") for each label predicted.
pooling_time_ratio: the division to make between timesteps as input and timesteps as output
median_window: int, the median window (in number of time steps) to be applied
save_predictions: str or list, the path of the base_filename to save the predictions or a list of names
corresponding for each thresholds
thresholds: list, list of threshold to be applied
alpha: float, the value of the norm to combine the predictions
Returns:
dict of the different predictions with associated threshold
"""
# Init a dataframe per threshold
prediction_dfs = {}
for threshold in thresholds:
prediction_dfs[threshold] = pd.DataFrame()
# Get predictions
for i, ((input_data, _), index) in enumerate(valid_dataload):
input_data = to_cuda_if_available(input_data)
with torch.no_grad():
pred_strong, _ = model(input_data)
pred_strong = pred_strong.cpu()
pred_strong = pred_strong.detach().numpy()
if i == 0:
logger.debug(pred_strong)
pred_strong_sources = pred_strong[1:]
pred_strong_sources = norm_alpha(pred_strong_sources, alpha)
pred_strong_comb = norm_alpha(np.stack((pred_strong[0], pred_strong_sources), 0), alpha)
# Get different post processing per threshold
for threshold in thresholds:
pred_strong_bin = ProbabilityEncoder().binarization(pred_strong_comb,
binarization_type="global_threshold",
threshold=threshold)
pred_strong_m = scipy.ndimage.filters.median_filter(pred_strong_bin, (median_window, 1))
pred = decoder(pred_strong_m)
pred = pd.DataFrame(pred, columns=["event_label", "onset", "offset"])
# Put them in seconds
pred.loc[:, ["onset", "offset"]] *= pooling_time_ratio / (cfg.sample_rate / cfg.hop_size)
pred.loc[:, ["onset", "offset"]] = pred[["onset", "offset"]].clip(0, cfg.max_len_seconds)
pred["filename"] = valid_dataload.filenames.iloc[index]
prediction_dfs[threshold] = prediction_dfs[threshold].append(pred, ignore_index=True)
if i == 0:
logger.debug("predictions: \n{}".format(pred))
logger.debug("predictions strong: \n{}".format(pred_strong_comb))
# Save predictions
if save_predictions is not None:
if isinstance(save_predictions, str):
if len(thresholds) == 1:
save_predictions = [save_predictions]
else:
base, ext = osp.splitext(save_predictions)
save_predictions = [osp.join(base, f"{threshold:.3f}{ext}") for threshold in thresholds]
else:
assert len(save_predictions) == len(thresholds), \
f"There should be a prediction file per threshold: len predictions: {len(save_predictions)}\n" \
f"len thresholds: {len(thresholds)}"
save_predictions = save_predictions
for ind, threshold in enumerate(thresholds):
dir_to_create = osp.dirname(save_predictions[ind])
if dir_to_create != "":
os.makedirs(dir_to_create, exist_ok=True)
if ind % 10 == 0:
logger.info(f"Saving predictions at: {save_predictions[ind]}. {ind + 1} / {len(thresholds)}")
prediction_dfs[threshold].to_csv(save_predictions[ind], index=False, sep="\t", float_format="%.3f")
list_predictions = []
for key in prediction_dfs:
list_predictions.append(prediction_dfs[key])
if len(list_predictions) == 1:
list_predictions = list_predictions[0]
return list_predictions
def _load_state_vars(state, gtruth_df, median_win=None):
pred_df = gtruth_df.copy()
# Define dataloader
many_hot_encoder = ManyHotEncoder.load_state_dict(state["many_hot_encoder"])
scaler = _load_scaler(state)
crnn = _load_crnn(state)
# Note, need to unsqueeze axis 1
transforms_valid = get_transforms(cfg.max_frames, scaler=scaler, add_axis=1)
# Note, no dataloader here
strong_dataload = DataLoadDf(pred_df, many_hot_encoder.encode_strong_df, transforms_valid, return_indexes=True)
pooling_time_ratio = state["pooling_time_ratio"]
many_hot_encoder = ManyHotEncoder.load_state_dict(state["many_hot_encoder"])
if median_win is None:
median_win = state["median_window"]
return {
"model": crnn,
"dataload": strong_dataload,
"pooling_time_ratio": pooling_time_ratio,
"many_hot_encoder": many_hot_encoder,
"median_window": median_win
}
def get_variables(args):
model_pth = args.model_path
gt_fname, ext = osp.splitext(args.groundtruth_tsv)
median_win = args.median_window
meta_gt = args.meta_gt
gt_audio_pth = args.groundtruth_audio_dir
if meta_gt is None:
meta_gt = gt_fname + "_durations" + ext
if gt_audio_pth is None:
gt_audio_pth = meta_path_to_audio_dir(gt_fname)
# Useful because of the data format
if "validation" in gt_audio_pth:
gt_audio_pth = osp.dirname(gt_audio_pth)
if osp.exists(meta_gt):
meta_dur_df = pd.read_csv(meta_gt, sep='\t')
if len(meta_dur_df) == 0:
meta_dur_df = generate_tsv_wav_durations(gt_audio_pth, meta_gt)
else:
meta_dur_df = generate_tsv_wav_durations(gt_audio_pth, meta_gt)
keep_sources = args.keep_sources
if keep_sources is not None:
keep_sources = keep_sources.split(",")
return model_pth, median_win, gt_audio_pth, meta_dur_df, keep_sources
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument("-m", '--model_path', type=str, required=True,
help="Path of the model to be evaluated")
parser.add_argument("-g", '--groundtruth_tsv', type=str, required=True,
help="Path of the groundtruth tsv file")
# Source separation
parser.add_argument("-a", '--base_dir_ss', type=str, required=True,
help="Base directory of source separation. "
"Path where to search subdirectories in which there are isolated events")
parser.add_argument("-k", "--keep_sources", type=str, default=None,
help="The sources to be kept from the sound_separation (each source separated by a comma)."
"Example: '1,2' keeps the 2nd and 3rd sources (begins at 0).")
# Not required after that, but recommended to defined
parser.add_argument("-mw", "--median_window", type=int, default=None,
help="Nb of frames for the median window, "
"if None the one defined for testing after training is used")
# Next groundtruth variable could be ommited if same organization than DESED dataset
parser.add_argument('--meta_gt', type=str, default=None,
help="Path of the groundtruth description of feat_filenames and durations")
parser.add_argument("-ga", '--groundtruth_audio_dir', type=str, default=None,
help="Path of the groundtruth filename, (see in config, at dataset folder)")
parser.add_argument("-s", '--save_predictions_path', type=str, default=None,
help="Path for the predictions to be saved (if needed)")
# Dev only
parser.add_argument("-n", '--nb_files', type=int, default=None,
help="Number of files to be used. Useful when testing on small number of files.")
f_args = parser.parse_args()
# Get variables from f_args
model_path, median_window, gt_audio_dir, durations, keep_sources = get_variables(f_args)
expe_state = torch.load(model_path, map_location="cpu")
dataset = DESED(base_feature_dir=os.path.join(cfg.workspace, "dataset", "features"), compute_log=False)
groundtruth = pd.read_csv(f_args.groundtruth_tsv, sep="\t")
gt_df_feat_ss = dataset.initialize_and_get_df(f_args.groundtruth_tsv, gt_audio_dir, f_args.base_dir_ss,
pattern_ss="_events", nb_files=f_args.nb_files,
keep_sources=keep_sources)
params = _load_state_vars(expe_state, gt_df_feat_ss, median_window)
alpha_norm = 1
# Preds with only one value (note that in comparison of TestModel, here we do not use a dataloader)
single_predictions = get_predictions_ss_late_integration(params["model"], params["dataload"],
params["many_hot_encoder"].decode_strong,
params["pooling_time_ratio"],
median_window=params["median_window"],
save_predictions=f_args.save_predictions_path,
alpha=alpha_norm)
compute_metrics(single_predictions, groundtruth, durations)
# ##########
# Optional but recommended
# ##########
# Compute psds scores with multiple thresholds (more accurate). n_thresholds could be increased.
n_thresholds = 50
# Example of 5 thresholds: 0.1, 0.3, 0.5, 0.7, 0.9
thresholds = np.arange(1 / (n_thresholds * 2), 1, 1 / n_thresholds)
pred_ss_thresh = get_predictions_ss_late_integration(params["model"], params["dataload"],
params["many_hot_encoder"].decode_strong,
params["pooling_time_ratio"],
thresholds=thresholds,
median_window=params["median_window"],
save_predictions=f_args.save_predictions_path)
psds = compute_psds_from_operating_points(pred_ss_thresh, groundtruth, durations)
psds_score(psds, filename_roc_curves=osp.splitext(f_args.save_predictions_path)[0] + "_roc.png")
| 12,022 | 48.887967 | 118 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/evaluation_measures.py | # -*- coding: utf-8 -*-
import os
from os import path as osp
import psds_eval
import scipy
from dcase_util.data import ProbabilityEncoder
import sed_eval
import numpy as np
import pandas as pd
import torch
from psds_eval import plot_psd_roc, PSDSEval
import config as cfg
from utilities.Logger import create_logger
from utilities.utils import to_cuda_if_available
from utilities.ManyHotEncoder import ManyHotEncoder
logger = create_logger(__name__, terminal_level=cfg.terminal_level)
def get_event_list_current_file(df, fname):
"""
Get list of events for a given filename
:param df: pd.DataFrame, the dataframe to search on
:param fname: the filename to extract the value from the dataframe
:return: list of events (dictionaries) for the given filename
"""
event_file = df[df["filename"] == fname]
if len(event_file) == 1:
if pd.isna(event_file["event_label"].iloc[0]):
event_list_for_current_file = [{"filename": fname}]
else:
event_list_for_current_file = event_file.to_dict('records')
else:
event_list_for_current_file = event_file.to_dict('records')
return event_list_for_current_file
def event_based_evaluation_df(reference, estimated, t_collar=0.200, percentage_of_length=0.2):
""" Calculate EventBasedMetric given a reference and estimated dataframe
Args:
reference: pd.DataFrame containing "filename" "onset" "offset" and "event_label" columns which describe the
reference events
estimated: pd.DataFrame containing "filename" "onset" "offset" and "event_label" columns which describe the
estimated events to be compared with reference
t_collar: float, in seconds, the number of time allowed on onsets and offsets
percentage_of_length: float, between 0 and 1, the percentage of length of the file allowed on the offset
Returns:
sed_eval.sound_event.EventBasedMetrics with the scores
"""
evaluated_files = reference["filename"].unique()
classes = []
classes.extend(reference.event_label.dropna().unique())
classes.extend(estimated.event_label.dropna().unique())
classes = list(set(classes))
event_based_metric = sed_eval.sound_event.EventBasedMetrics(
event_label_list=classes,
t_collar=t_collar,
percentage_of_length=percentage_of_length,
empty_system_output_handling='zero_score'
)
for fname in evaluated_files:
reference_event_list_for_current_file = get_event_list_current_file(reference, fname)
estimated_event_list_for_current_file = get_event_list_current_file(estimated, fname)
event_based_metric.evaluate(
reference_event_list=reference_event_list_for_current_file,
estimated_event_list=estimated_event_list_for_current_file,
)
return event_based_metric
def segment_based_evaluation_df(reference, estimated, time_resolution=1.):
""" Calculate SegmentBasedMetrics given a reference and estimated dataframe
Args:
reference: pd.DataFrame containing "filename" "onset" "offset" and "event_label" columns which describe the
reference events
estimated: pd.DataFrame containing "filename" "onset" "offset" and "event_label" columns which describe the
estimated events to be compared with reference
time_resolution: float, the time resolution of the segment based metric
Returns:
sed_eval.sound_event.SegmentBasedMetrics with the scores
"""
evaluated_files = reference["filename"].unique()
classes = []
classes.extend(reference.event_label.dropna().unique())
classes.extend(estimated.event_label.dropna().unique())
classes = list(set(classes))
segment_based_metric = sed_eval.sound_event.SegmentBasedMetrics(
event_label_list=classes,
time_resolution=time_resolution
)
for fname in evaluated_files:
reference_event_list_for_current_file = get_event_list_current_file(reference, fname)
estimated_event_list_for_current_file = get_event_list_current_file(estimated, fname)
segment_based_metric.evaluate(
reference_event_list=reference_event_list_for_current_file,
estimated_event_list=estimated_event_list_for_current_file
)
return segment_based_metric
def get_predictions(model, dataloader, decoder, pooling_time_ratio=1, thresholds=[0.5],
median_window=1, save_predictions=None):
""" Get the predictions of a trained model on a specific set
Args:
model: torch.Module, a trained pytorch model (you usually want it to be in .eval() mode).
dataloader: torch.utils.data.DataLoader, giving ((input_data, label), indexes) but label is not used here
decoder: function, takes a numpy.array of shape (time_steps, n_labels) as input and return a list of lists
of ("event_label", "onset", "offset") for each label predicted.
pooling_time_ratio: the division to make between timesteps as input and timesteps as output
median_window: int, the median window (in number of time steps) to be applied
save_predictions: str or list, the path of the base_filename to save the predictions or a list of names
corresponding for each thresholds
thresholds: list, list of threshold to be applied
Returns:
dict of the different predictions with associated threshold
"""
# Init a dataframe per threshold
prediction_dfs = {}
for threshold in thresholds:
prediction_dfs[threshold] = pd.DataFrame()
# Get predictions
for i, ((input_data,_), indexes) in enumerate(dataloader):
indexes = indexes.numpy()
input_data = to_cuda_if_available(input_data)
with torch.no_grad():
pred_strong, _ = model(input_data)
pred_strong = pred_strong.cpu()
pred_strong = pred_strong.detach().numpy()
if i == 0:
logger.debug(pred_strong)
# Post processing and put predictions in a dataframe
for j, pred_strong_it in enumerate(pred_strong):
#savePath = "./Posterior/" + dataloader.dataset.filenames.iloc[indexes[j]]
#savePath.replace("wav", "npy")
#np.save(savePath, pred_strong_it)
for threshold in thresholds:
pred_strong_bin = ProbabilityEncoder().binarization(pred_strong_it,
binarization_type="global_threshold",
threshold=threshold)
pred_strong_m = scipy.ndimage.filters.median_filter(pred_strong_bin, (median_window, 1))
pred = decoder(pred_strong_m)
pred = pd.DataFrame(pred, columns=["event_label", "onset", "offset"])
# Put them in seconds
pred.loc[:, ["onset", "offset"]] *= pooling_time_ratio / (cfg.sample_rate / cfg.hop_size)
pred.loc[:, ["onset", "offset"]] = pred[["onset", "offset"]].clip(0, cfg.max_len_seconds)
pred["filename"] = dataloader.dataset.filenames.iloc[indexes[j]]
prediction_dfs[threshold] = prediction_dfs[threshold].append(pred, ignore_index=True)
if i == 0 and j == 0:
logger.debug("predictions: \n{}".format(pred))
logger.debug("predictions strong: \n{}".format(pred_strong_it))
# Save predictions
if save_predictions is not None:
if isinstance(save_predictions, str):
if len(thresholds) == 1:
save_predictions = [save_predictions]
else:
base, ext = osp.splitext(save_predictions)
save_predictions = [osp.join(base, f"{threshold:.3f}{ext}") for threshold in thresholds]
else:
assert len(save_predictions) == len(thresholds), \
f"There should be a prediction file per threshold: len predictions: {len(save_predictions)}\n" \
f"len thresholds: {len(thresholds)}"
save_predictions = save_predictions
for ind, threshold in enumerate(thresholds):
dir_to_create = osp.dirname(save_predictions[ind])
if dir_to_create != "":
os.makedirs(dir_to_create, exist_ok=True)
if ind % 10 == 0:
logger.info(f"Saving predictions at: {save_predictions[ind]}. {ind + 1} / {len(thresholds)}")
prediction_dfs[threshold].to_csv(save_predictions[ind], index=False, sep="\t", float_format="%.3f")
list_predictions = []
for key in prediction_dfs:
list_predictions.append(prediction_dfs[key])
if len(list_predictions) == 1:
list_predictions = list_predictions[0]
return list_predictions
def get_predictions_v2(model, dataloader, decoder, pooling_time_ratio=1, thresholds=[0.5],
median_window=1, save_dir=None, save_predictions=None):
""" Get the predictions of a trained model on a specific set
Args:
model: torch.Module, a trained pytorch model (you usually want it to be in .eval() mode).
dataloader: torch.utils.data.DataLoader, giving ((input_data, label), indexes) but label is not used here
decoder: function, takes a numpy.array of shape (time_steps, n_labels) as input and return a list of lists
of ("event_label", "onset", "offset") for each label predicted.
pooling_time_ratio: the division to make between timesteps as input and timesteps as output
median_window: int, the median window (in number of time steps) to be applied
save_predictions: str or list, the path of the base_filename to save the predictions or a list of names
corresponding for each thresholds
thresholds: list, list of threshold to be applied
Returns:
dict of the different predictions with associated threshold
"""
# Init a dataframe per threshold
prediction_dfs = {}
for threshold in thresholds:
prediction_dfs[threshold] = pd.DataFrame()
if save_dir is not None:
os.makedirs(save_dir, exist_ok=True)
# Get predictions
for i, ((input_data,_), indexes) in enumerate(dataloader):
indexes = indexes.numpy()
input_data = to_cuda_if_available(input_data)
with torch.no_grad():
pred_strong, _ = model(input_data)
pred_strong = pred_strong.cpu()
pred_strong = pred_strong.detach().numpy()
if i == 0:
logger.debug(pred_strong)
# Post processing and put predictions in a dataframe
for j, pred_strong_it in enumerate(pred_strong):
if save_dir is not None:
savePath = save_dir + dataloader.dataset.filenames.iloc[indexes[j]]
savePath.replace("wav", "npy")
np.save(savePath, pred_strong_it)
for threshold in thresholds:
pred_strong_bin = ProbabilityEncoder().binarization(pred_strong_it,
binarization_type="global_threshold",
threshold=threshold)
pred_strong_m = scipy.ndimage.filters.median_filter(pred_strong_bin, (median_window, 1))
pred = decoder(pred_strong_m)
pred = pd.DataFrame(pred, columns=["event_label", "onset", "offset"])
# Put them in seconds
pred.loc[:, ["onset", "offset"]] *= pooling_time_ratio / (cfg.sample_rate / cfg.hop_size)
pred.loc[:, ["onset", "offset"]] = pred[["onset", "offset"]].clip(0, cfg.max_len_seconds)
pred["filename"] = dataloader.dataset.filenames.iloc[indexes[j]]
prediction_dfs[threshold] = prediction_dfs[threshold].append(pred, ignore_index=True)
if i == 0 and j == 0:
logger.debug("predictions: \n{}".format(pred))
logger.debug("predictions strong: \n{}".format(pred_strong_it))
# Save predictions
if save_predictions is not None:
if isinstance(save_predictions, str):
if len(thresholds) == 1:
save_predictions = [save_predictions]
else:
base, ext = osp.splitext(save_predictions)
save_predictions = [osp.join(base, f"{threshold:.3f}{ext}") for threshold in thresholds]
else:
assert len(save_predictions) == len(thresholds), \
f"There should be a prediction file per threshold: len predictions: {len(save_predictions)}\n" \
f"len thresholds: {len(thresholds)}"
save_predictions = save_predictions
for ind, threshold in enumerate(thresholds):
dir_to_create = osp.dirname(save_predictions[ind])
if dir_to_create != "":
os.makedirs(dir_to_create, exist_ok=True)
if ind % 10 == 0:
logger.info(f"Saving predictions at: {save_predictions[ind]}. {ind + 1} / {len(thresholds)}")
prediction_dfs[threshold].to_csv(save_predictions[ind], index=False, sep="\t", float_format="%.3f")
list_predictions = []
for key in prediction_dfs:
list_predictions.append(prediction_dfs[key])
if len(list_predictions) == 1:
list_predictions = list_predictions[0]
return list_predictions
def psds_score(psds, filename_roc_curves=None):
""" add operating points to PSDSEval object and compute metrics
Args:
psds: psds.PSDSEval object initialized with the groundtruth corresponding to the predictions
filename_roc_curves: str, the base filename of the roc curve to be computed
"""
try:
psds_score = psds.psds(alpha_ct=0, alpha_st=0, max_efpr=100)
logger.info(f"\nPSD-Score (0, 0, 100): {psds_score.value:.5f}")
psds_ct_score = psds.psds(alpha_ct=1, alpha_st=0, max_efpr=100)
logger.info(f"\nPSD-Score (1, 0, 100): {psds_ct_score.value:.5f}")
psds_macro_score = psds.psds(alpha_ct=0, alpha_st=1, max_efpr=100)
logger.info(f"\nPSD-Score (0, 1, 100): {psds_macro_score.value:.5f}")
if filename_roc_curves is not None:
if osp.dirname(filename_roc_curves) != "":
os.makedirs(osp.dirname(filename_roc_curves), exist_ok=True)
base, ext = osp.splitext(filename_roc_curves)
plot_psd_roc(psds_score, filename=f"{base}_0_0_100{ext}")
plot_psd_roc(psds_ct_score, filename=f"{base}_1_0_100{ext}")
plot_psd_roc(psds_score, filename=f"{base}_0_1_100{ext}")
except psds_eval.psds.PSDSEvalError as e:
logger.error("psds score did not work ....")
logger.error(e)
def compute_sed_eval_metrics(predictions, groundtruth):
metric_event = event_based_evaluation_df(groundtruth, predictions, t_collar=0.200,
percentage_of_length=0.2)
metric_segment = segment_based_evaluation_df(groundtruth, predictions, time_resolution=1.)
logger.info(metric_event)
logger.info(metric_segment)
return metric_event
def format_df(df, mhe):
""" Make a weak labels dataframe from strongly labeled (join labels)
Args:
df: pd.DataFrame, the dataframe strongly labeled with onset and offset columns (+ event_label)
mhe: ManyHotEncoder object, the many hot encoder object that can encode the weak labels
Returns:
weakly labeled dataframe
"""
def join_labels(x):
return pd.Series(dict(filename=x['filename'].iloc[0],
event_label=mhe.encode_weak(x["event_label"].drop_duplicates().dropna().tolist())))
if "onset" in df.columns or "offset" in df.columns:
df = df.groupby("filename", as_index=False).apply(join_labels)
return df
def get_f_measure_by_class(torch_model, nb_tags, dataloader_, thresholds_=None):
""" get f measure for each class given a model and a generator of data (batch_x, y)
Args:
torch_model : Model, model to get predictions, forward should return weak and strong predictions
nb_tags : int, number of classes which are represented
dataloader_ : generator, data generator used to get f_measure
thresholds_ : int or list, thresholds to apply to each class to binarize probabilities
Returns:
macro_f_measure : list, f measure for each class
"""
if torch.cuda.is_available():
torch_model = torch_model.cuda()
# Calculate external metrics
tp = np.zeros(nb_tags)
tn = np.zeros(nb_tags)
fp = np.zeros(nb_tags)
fn = np.zeros(nb_tags)
for counter, (batch_x, y) in enumerate(dataloader_):
if torch.cuda.is_available():
batch_x = batch_x.cuda()
pred_strong, pred_weak = torch_model(batch_x)
pred_weak = pred_weak.cpu().data.numpy()
labels = y.numpy()
# Used only with a model predicting only strong outputs
if len(pred_weak.shape) == 3:
# average data to have weak labels
pred_weak = np.max(pred_weak, axis=1)
if len(labels.shape) == 3:
labels = np.max(labels, axis=1)
labels = ProbabilityEncoder().binarization(labels,
binarization_type="global_threshold",
threshold=0.5)
if thresholds_ is None:
binarization_type = 'global_threshold'
thresh = 0.5
else:
binarization_type = "class_threshold"
assert type(thresholds_) is list
thresh = thresholds_
batch_predictions = ProbabilityEncoder().binarization(pred_weak,
binarization_type=binarization_type,
threshold=thresh,
time_axis=0
)
tp_, fp_, fn_, tn_ = intermediate_at_measures(labels, batch_predictions)
tp += tp_
fp += fp_
fn += fn_
tn += tn_
macro_f_score = np.zeros(nb_tags)
mask_f_score = 2 * tp + fp + fn != 0
macro_f_score[mask_f_score] = 2 * tp[mask_f_score] / (2 * tp + fp + fn)[mask_f_score]
return macro_f_score
def intermediate_at_measures(encoded_ref, encoded_est):
""" Calculate true/false - positives/negatives.
Args:
encoded_ref: np.array, the reference array where a 1 means the label is present, 0 otherwise
encoded_est: np.array, the estimated array, where a 1 means the label is present, 0 otherwise
Returns:
tuple
number of (true positives, false positives, false negatives, true negatives)
"""
tp = (encoded_est + encoded_ref == 2).sum(axis=0)
fp = (encoded_est - encoded_ref == 1).sum(axis=0)
fn = (encoded_ref - encoded_est == 1).sum(axis=0)
tn = (encoded_est + encoded_ref == 0).sum(axis=0)
return tp, fp, fn, tn
def macro_f_measure(tp, fp, fn):
""" From intermediates measures, give the macro F-measure
Args:
tp: int, number of true positives
fp: int, number of false positives
fn: int, number of true negatives
Returns:
float
The macro F-measure
"""
macro_f_score = np.zeros(tp.shape[-1])
mask_f_score = 2 * tp + fp + fn != 0
macro_f_score[mask_f_score] = 2 * tp[mask_f_score] / (2 * tp + fp + fn)[mask_f_score]
return macro_f_score
def audio_tagging_results(reference, estimated):
classes = []
if "event_label" in reference.columns:
classes.extend(reference.event_label.dropna().unique())
classes.extend(estimated.event_label.dropna().unique())
classes = list(set(classes))
mhe = ManyHotEncoder(classes)
reference = format_df(reference, mhe)
estimated = format_df(estimated, mhe)
else:
classes.extend(reference.event_labels.str.split(',', expand=True).unstack().dropna().unique())
classes.extend(estimated.event_labels.str.split(',', expand=True).unstack().dropna().unique())
classes = list(set(classes))
mhe = ManyHotEncoder(classes)
matching = reference.merge(estimated, how='outer', on="filename", suffixes=["_ref", "_pred"])
def na_values(val):
if type(val) is np.ndarray:
return val
if pd.isna(val):
return np.zeros(len(classes))
return val
if not estimated.empty:
matching.event_label_pred = matching.event_label_pred.apply(na_values)
matching.event_label_ref = matching.event_label_ref.apply(na_values)
tp, fp, fn, tn = intermediate_at_measures(np.array(matching.event_label_ref.tolist()),
np.array(matching.event_label_pred.tolist()))
macro_res = macro_f_measure(tp, fp, fn)
else:
macro_res = np.zeros(len(classes))
results_serie = pd.DataFrame(macro_res, index=mhe.labels)
return results_serie[0]
def compute_psds_from_operating_points(list_predictions, groundtruth_df, meta_df, dtc_threshold=0.5, gtc_threshold=0.5,
cttc_threshold=0.3):
psds = PSDSEval(dtc_threshold, gtc_threshold, cttc_threshold, ground_truth=groundtruth_df, metadata=meta_df)
for prediction_df in list_predictions:
psds.add_operating_point(prediction_df)
return psds
def compute_metrics(predictions, gtruth_df, meta_df):
events_metric = compute_sed_eval_metrics(predictions, gtruth_df)
macro_f1_event = events_metric.results_class_wise_average_metrics()['f_measure']['f_measure']
dtc_threshold, gtc_threshold, cttc_threshold = 0.5, 0.5, 0.3
psds = PSDSEval(dtc_threshold, gtc_threshold, cttc_threshold, ground_truth=gtruth_df, metadata=meta_df)
psds_macro_f1, psds_f1_classes = psds.compute_macro_f_score(predictions)
logger.info(f"F1_score (psds_eval) accounting cross triggers: {psds_macro_f1}")
return macro_f1_event, psds_macro_f1
| 22,296 | 42.044402 | 119 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/TestModel_dual.py | # -*- coding: utf-8 -*-
import argparse
import os.path as osp
import torch
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
from data_utils.DataLoad import DataLoadDf
from data_utils.Desed import DESED
from evaluation_measures import psds_score, get_predictions_v2, \
compute_psds_from_operating_points, compute_metrics
from utilities.utils import to_cuda_if_available, generate_tsv_wav_durations, meta_path_to_audio_dir
from utilities.ManyHotEncoder import ManyHotEncoder
from utilities.Transforms import get_transforms
from utilities.Logger import create_logger
from utilities.Scaler import Scaler, ScalerPerAudio
from models.CRNN import CRNN
from models.Transformer import Transformer
from models.Conformer_bk import Conformer
import config as cfg
logger = create_logger(__name__)
torch.manual_seed(2020)
def _load_model(state, model_type, model_name="model"):
model_args = state[model_name]["args"]
model_kwargs = state[model_name]["kwargs"]
if model_type is 'crnn':
model = CRNN(*model_args, **model_kwargs)
elif model_type is 'transformer':
model = Transformer(*model_args, **model_kwargs)
elif model_type is 'conformer':
model = Conformer(*model_args, **model_kwargs)
model.load_state_dict(state[model_name]["state_dict"])
model.eval()
model = to_cuda_if_available(model)
logger.info("Model loaded at epoch: {}".format(state["epoch"]))
logger.info(model)
return model
def _load_model_v2(state, model_id, model_type, model_name="model"):
model_args = state[model_name]["args"]
model_kwargs = state[model_name]["kwargs"]
if model_type is 'crnn':
model = CRNN(*model_args, **model_kwargs)
elif model_type is 'transformer':
model = Transformer(*model_args, **model_kwargs)
elif model_type is 'conformer':
model = Conformer(*model_args, **model_kwargs)
if model_id == 1:
model.load_state_dict(state[model_name]["state_dict1"])
elif model_id == 2:
model.load_state_dict(state[model_name]["state_dict2"])
model.eval()
model = to_cuda_if_available(model)
logger.info("Model loaded at epoch: {}".format(state["epoch"]))
logger.info(model)
return model
def _load_scaler(state):
scaler_state = state["scaler"]
type_sc = scaler_state["type"]
if type_sc == "ScalerPerAudio":
scaler = ScalerPerAudio(*scaler_state["args"])
elif type_sc == "Scaler":
scaler = Scaler()
else:
raise NotImplementedError("Not the right type of Scaler has been saved in state")
scaler.load_state_dict(state["scaler"]["state_dict"])
return scaler
def _load_state_vars(state, gtruth_df, median_win=None):
pred_df = gtruth_df.copy()
# Define dataloader
many_hot_encoder = ManyHotEncoder.load_state_dict(state["many_hot_encoder"])
scaler = _load_scaler(state)
model = _load_model_v2(state, 1, 'crnn')
transforms = get_transforms(cfg.max_frames, scaler, 0, noise_dict_params={"mean":0, "snr": cfg.noise_snr})
transforms_valid = get_transforms(cfg.max_frames, scaler=scaler, add_axis=0)
strong_dataload = DataLoadDf(pred_df, many_hot_encoder.encode_strong_df, transforms_valid, return_indexes=True)
strong_dataloader_ind = DataLoader(strong_dataload, batch_size=cfg.batch_size, drop_last=False)
pooling_time_ratio = state["pooling_time_ratio"]
many_hot_encoder = ManyHotEncoder.load_state_dict(state["many_hot_encoder"])
if median_win is None:
median_win = state["median_window"]
return {
"model": model,
"dataloader": strong_dataloader_ind,
"pooling_time_ratio": pooling_time_ratio,
"many_hot_encoder": many_hot_encoder,
"median_window": median_win
}
def get_variables(args):
model_pth = args.model_path
gt_fname, ext = osp.splitext(args.groundtruth_tsv)
median_win = args.median_window
meta_gt = args.meta_gt
gt_audio_pth = args.groundtruth_audio_dir
if meta_gt is None:
meta_gt = gt_fname + "_durations" + ext
if gt_audio_pth is None:
gt_audio_pth = meta_path_to_audio_dir(gt_fname)
# Useful because of the data format
if "validation" in gt_audio_pth:
gt_audio_pth = osp.dirname(gt_audio_pth)
groundtruth = pd.read_csv(args.groundtruth_tsv, sep="\t")
if osp.exists(meta_gt):
meta_dur_df = pd.read_csv(meta_gt, sep='\t')
if len(meta_dur_df) == 0:
meta_dur_df = generate_tsv_wav_durations(gt_audio_pth, meta_gt)
else:
meta_dur_df = generate_tsv_wav_durations(gt_audio_pth, meta_gt)
return model_pth, median_win, gt_audio_pth, groundtruth, meta_dur_df
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument("-m", '--model_path', type=str, required=True,
help="Path of the model to be evaluated")
parser.add_argument("-g", '--groundtruth_tsv', type=str, required=True,
help="Path of the groundtruth tsv file")
# Not required after that, but recommended to defined
parser.add_argument("-mw", "--median_window", type=int, default=None,
help="Nb of frames for the median window, "
"if None the one defined for testing after training is used")
# Next groundtruth variable could be ommited if same organization than DESED dataset
parser.add_argument('--meta_gt', type=str, default=None,
help="Path of the groundtruth description of feat_filenames and durations")
parser.add_argument("-ga", '--groundtruth_audio_dir', type=str, default=None,
help="Path of the groundtruth filename, (see in config, at dataset folder)")
parser.add_argument("-s", '--save_predictions_path', type=str, default=None,
help="Path for the predictions to be saved (if needed)")
# Dev
parser.add_argument("-n", '--nb_files', type=int, default=None,
help="Number of files to be used. Useful when testing on small number of files.")
parser.add_argument("-sp", '--save_posterior', type=str, default=None,
help="Save path for posterior")
f_args = parser.parse_args()
# Get variables from f_args
model_path, median_window, gt_audio_dir, groundtruth, durations = get_variables(f_args)
# Model
expe_state = torch.load(model_path, map_location="cpu")
dataset = DESED(base_feature_dir=osp.join(cfg.workspace, "dataset", "features"), compute_log=False)
gt_df_feat = dataset.initialize_and_get_df(f_args.groundtruth_tsv, gt_audio_dir, nb_files=f_args.nb_files)
params = _load_state_vars(expe_state, gt_df_feat, median_window)
# Preds with only one value
single_predictions = get_predictions_v2(params["model"], params["dataloader"],
params["many_hot_encoder"].decode_strong, params["pooling_time_ratio"],
median_window=params["median_window"], save_dir=f_args.save_posterior,
save_predictions=f_args.save_predictions_path)
compute_metrics(single_predictions, groundtruth, durations)
'''
# ##########
# Optional but recommended
# ##########
# Compute psds scores with multiple thresholds (more accurate). n_thresholds could be increased.
n_thresholds = 50
# Example of 5 thresholds: 0.1, 0.3, 0.5, 0.7, 0.9
list_thresholds = np.arange(1 / (n_thresholds * 2), 1, 1 / n_thresholds)
pred_ss_thresh = get_predictions(params["model"], params["dataloader"],
params["many_hot_encoder"].decode_strong, params["pooling_time_ratio"],
thresholds=list_thresholds, median_window=params["median_window"],
save_predictions=f_args.save_predictions_path)
psds = compute_psds_from_operating_points(pred_ss_thresh, groundtruth, durations)
fname_roc = None
if f_args.save_predictions_path is not None:
fname_roc = osp.splitext(f_args.save_predictions_path)[0] + "_roc.png"
psds_score(psds, filename_roc_curves=fname_roc)
'''
| 8,284 | 40.633166 | 115 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/config.py | import logging
import math
import os
import pandas as pd
import numpy as np
dataspace = "/home/Databases/DESED/"
workspace = ".."
# DESED Paths
weak = os.path.join(dataspace, 'dcase2019/dataset/metadata/train/weak.tsv')
unlabel = os.path.join(dataspace, 'dcase2019/dataset/metadata/train/unlabel_in_domain.tsv')
synthetic = os.path.join(dataspace, 'dataset/metadata/train/synthetic20/soundscapes.tsv')
validation = os.path.join(dataspace, 'dcase2019/dataset/metadata/validation/validation.tsv')
test2018 = os.path.join(dataspace, 'dataset/metadata/validation/test_dcase2018.tsv')
eval2018 = os.path.join(dataspace, 'dataset/metadata/validation/eval_dcase2018.tsv')
eval_desed = os.path.join(dataspace, "dataset/metadata/eval/public.tsv")
# Useful because does not correspond to the tsv file path (metadata replace by audio), (due to subsets test/eval2018)
audio_validation_dir = os.path.join(dataspace, 'dcase2019/dataset/audio/validation')
# Separated data
weak_ss = os.path.join(dataspace, 'weaklabel_speech')
unlabel_ss = os.path.join(dataspace, 'unlabel_speech')
synthetic_ss = os.path.join(dataspace, 'dataset/audio/train/synthetic20/separated_sources')
validation_ss = os.path.join(dataspace, 'dataset/audio/validation_ss/separated_sources')
eval_desed_ss = os.path.join(dataspace, "dataset/audio/eval/public_ss/separated_sources")
# Scaling data
scaler_type = "dataset"
# Data preparation
ref_db = -55
sample_rate = 16000
max_len_seconds =10.
# features
n_window = 2048 #1024
hop_size = 255 #323
n_mels = 128 #64
max_frames = math.ceil(max_len_seconds * sample_rate / hop_size)
mel_f_min = 0.
mel_f_max = 8000.
# Model
max_consistency_cost = 2.0
max_rampup_weight = 3.0 # 1.0
# Training
in_memory = True
in_memory_unlab = False
num_workers = 12
batch_size = 24
noise_snr = 30
n_epoch = 300
n_epoch_rampup = 50
n_epoch_rampup2 = 100
checkpoint_epochs = 1
save_best = True
early_stopping = None
es_init_wait = 50 # es for early stopping
adjust_lr = True
max_learning_rate = 0.001 # Used if adjust_lr is True
default_learning_rate = 0.001 # Used if adjust_lr is False
# Post processing
median_window_s = 0.45
# Classes
file_path = os.path.abspath(os.path.dirname(__file__))
classes = pd.read_csv(os.path.join(file_path, validation), sep="\t").event_label.dropna().sort_values().unique()
nClass = len(classes)
# Logger
terminal_level = logging.INFO
# Make class label
tlab = np.diag(np.ones(nClass),-1)[:,:-1]
bag = [tlab]
for iter in range(1,nClass):
temp = np.diag(np.ones(nClass)) + np.diag(np.ones(nClass),iter)[:nClass,:nClass]
bag.append(temp[:nClass-iter,:])
for iter in range(1,nClass):
for jter in range(1,nClass-iter):
temp = np.diag(np.ones(nClass)) + np.diag(np.ones(nClass),iter)[:nClass, :nClass] + np.diag(np.ones(nClass),iter+jter)[:nClass,:nClass]
bag.append(temp[:nClass-(iter+jter),:])
class_label = np.concatenate(bag,0)
nComs = class_label.shape[0]
#temp = []
#for iter in range(157):
# temp.append(np.reshape(class_label,(1,nComs,nClass)))
#class_label_ext = np.concatenate(temp,0)
| 3,061 | 31.924731 | 143 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/main_CRST_model_v2.py | # -*- coding: utf-8 -*-
import argparse
import datetime
import inspect
import os
import time
from pprint import pprint
import pandas as pd
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch import nn
from data_utils.Desed import DESED
from data_utils.DataLoad import DataLoadDf, ConcatDataset, MultiStreamBatchSampler
from TestModel_dual import _load_model_v2
from evaluation_measures import get_predictions, psds_score, compute_psds_from_operating_points, compute_metrics
from models.CRNN import CRNN
import config as cfg
from utilities import ramps
from utilities.Logger import create_logger
from utilities.Scaler import ScalerPerAudio, Scaler
from utilities.utils import SaveBest, JSD, to_cuda_if_available, weights_init, AverageMeterSet, EarlyStopping, \
get_durations_df, median_smoothing
from utilities.ManyHotEncoder import ManyHotEncoder
from utilities.Transforms import get_transforms, get_transforms_v2
def adjust_learning_rate(optimizer, rampup_value, rampdown_value=1):
""" adjust the learning rate
Args:
optimizer: torch.Module, the optimizer to be updated
rampup_value: float, the float value between 0 and 1 that should increases linearly
rampdown_value: float, the float between 1 and 0 that should decrease linearly
Returns:
"""
#LR warm-up to handle large minibatch sizes from https://arxiv.org/abs/1706.02677
#We commented parts on betas and weight decay to match 2nd system of last year from Orange
lr = rampup_value * rampdown_value * cfg.max_learning_rate
# beta1 = rampdown_value * cfg.beta1_before_rampdown + (1. - rampdown_value) * cfg.beta1_after_rampdown
# beta2 = (1. - rampup_value) * cfg.beta2_during_rampdup + rampup_value * cfg.beta2_after_rampup
# weight_decay = (1 - rampup_value) * cfg.weight_decay_during_rampup + cfg.weight_decay_after_rampup * rampup_value
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# param_group['betas'] = (beta1, beta2)
# param_group['weight_decay'] = weight_decay
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_params, params in zip(ema_model.parameters(), model.parameters()):
ema_params.data.mul_(alpha).add_(1 - alpha, params.data)
def train(train_loader, model1, model2, optimizer1, optimizer2, c_epoch, ema_model1=None, ema_model2=None, mask_weak=None, mask_strong=None, adjust_lr=False):
""" One epoch of a Mean Teacher model
Args:
train_loader: torch.utils.data.DataLoader, iterator of training batches for an epoch.
Should return a tuple: ((teacher input, student input), labels)
model: torch.Module, model to be trained, should return a weak and strong prediction
optimizer: torch.Module, optimizer used to train the model
c_epoch: int, the current epoch of training
ema_model: torch.Module, student model, should return a weak and strong prediction
mask_weak: slice or list, mask the batch to get only the weak labeled data (used to calculate the loss)
mask_strong: slice or list, mask the batch to get only the strong labeled data (used to calcultate the loss)
adjust_lr: bool, Whether or not to adjust the learning rate during training (params in config)
"""
log = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
class_criterion = nn.BCELoss(reduction='none')
mse_criterion = nn.MSELoss(reduction='none')
reliability_criterion = nn.CrossEntropyLoss(reduction='none')
jsd = JSD()
softmax = nn.Softmax(dim=1)
class_label = torch.tensor(cfg.class_label).cuda()
class_criterion, mse_criterion, softmax = to_cuda_if_available(class_criterion, mse_criterion, softmax)
meters = AverageMeterSet()
log.debug("Nb batches: {}".format(len(train_loader)))
start = time.time()
#plabel = []
for i, (((batch_input, batch_input_ema), target2), target) in enumerate(train_loader):
global_step = c_epoch * len(train_loader) + i
rampup_value = ramps.exp_rampup(global_step, cfg.n_epoch_rampup2*len(train_loader))
if adjust_lr:
adjust_learning_rate(optimizer1, rampup_value, rampdown_value=1.0)
adjust_learning_rate(optimizer2, rampup_value, rampdown_value=0.9)
meters.update('lr', optimizer1.param_groups[0]['lr'])
target2 = target2.type(torch.FloatTensor)
batch_input, batch_input_ema, target, target2 = to_cuda_if_available(batch_input, batch_input_ema, target, target2)
# Outputs
strong_pred1, weak_pred1 = model1(batch_input)
strong_predict1, weak_predict1 = ema_model1(batch_input_ema)
strong_predict1 = strong_predict1.detach()
weak_predict1 = weak_predict1.detach()
# data augmentation
strong_pred2, weak_pred2 = model2(batch_input_ema)
strong_predict2, weak_predict2 = ema_model2(batch_input)
strong_predict2 = strong_predict2.detach()
weak_predict2 = weak_predict2.detach()
# Weak BCE Loss
target_weak = target.max(-2)[0] # Take the max in the time axis
target2_weak = target2.max(-2)[0]
if mask_weak is not None:
weak_class_loss1 = class_criterion(weak_pred1[mask_weak], target_weak[mask_weak]).mean()
weak_class_loss2 = class_criterion(weak_pred2[mask_weak], target2_weak[mask_weak]).mean()
if i == 0:
log.debug(f"target: {target.mean(-2)} \n Target_weak: {target_weak} \n "
f"Target weak mask: {target_weak[mask_weak]} \n "
f"Target strong mask: {target[mask_strong].sum(-2)}\n"
f"weak loss1: {weak_class_loss1} \t rampup_value: {rampup_value}"
f"weak loss2: {weak_class_loss2} \t rampup_value: {rampup_value}"
f"tensor mean: {batch_input.mean()}")
meters.update('weak_class_loss1', weak_class_loss1.item())
meters.update('weak_class_loss2', weak_class_loss2.item())
# Strong BCE loss
if mask_strong is not None:
strong_class_loss1 = class_criterion(strong_pred1[mask_strong], target[mask_strong]).mean()
strong_class_loss2 = class_criterion(strong_pred2[mask_strong], target2[mask_strong]).mean()
meters.update('Strong loss1', strong_class_loss1.item())
meters.update('Strong loss2', strong_class_loss2.item())
# Teacher-student consistency cost
if ema_model1 is not None:
rampup_weight = cfg.max_rampup_weight * rampup_value
meters.update('Rampup weight', rampup_weight)
# Self-labeling
n_unlabeled = int(3*cfg.batch_size/4)
est_strong_target1 = torch.zeros(cfg.batch_size,157,cfg.nClass).cuda()
est_strong_target2 = torch.zeros(cfg.batch_size,157,cfg.nClass).cuda()
for bter in range(cfg.batch_size):
sp1 = strong_predict1[bter]
sp1 = torch.clamp(sp1, 1.0e-4, 1-1.0e-4)
p1_h1 = torch.log(sp1)
p1_h0 = torch.log(1-sp1)
sp2 = strong_predict2[bter]
sp2 = torch.clamp(sp2, 1.0e-4, 1-1.0e-4)
p2_h1 = torch.log(sp2)
p2_h0 = torch.log(1-sp2)
p_h0 = torch.cat((p1_h0, p2_h0), 0)
p_h1 = torch.cat((p1_h1, p2_h1), 0)
# K = 0
P0 = p_h0.sum(1)
# K = 1
P1 = P0[:,None] + p_h1 - p_h0
#P = torch.cat([P0.reshape(157,1), P1], 1)
# K = 2
P2 = []
for cter in range(1,cfg.nClass):
P2.append(P1[:,:-cter]+P1[:,cter:])
P2 = torch.cat(P2, 1)
P2 = P2 - P0[:,None]
P = torch.cat([P0.reshape(157*2,1), P1, P2], 1)
# K: up to 3
#P3 = []
#for cter1 in range(1,cfg.nClass):
# for cter2 in range(1, cfg.nClass-cter1):
# P3.append(P1[:,:-(cter1+cter2)]+P1[:,cter1:-cter2]+P1[:,(cter1+cter2):])
#P3 = torch.cat(P3,1)
#P3 = P3 - 2*P0[:,None]
#P = torch.cat([P0.reshape(157,1), P1, P2, P3], 1)
P = softmax(P)
prob_v, prob_i = torch.sort(P, dim=1, descending=True)
norm_p = prob_v.sum(1)
prob_v = prob_v/norm_p[:,None]
cl = class_label[prob_i.tolist(),:]
cl = torch.mul(cl, prob_v[:,:,None]).sum(1)
est_strong_target1[bter,:,:] = torch.squeeze(cl[:157,:])
est_strong_target2[bter,:,:] = torch.squeeze(cl[157:,:])
est_weak_target1 = est_strong_target1.mean(1)
est_weak_target2 = est_strong_target2.mean(1)
strong_reliability1 = rampup_weight*(1-jsd.apply(est_strong_target1[mask_strong], target2[mask_strong]).mean())
strong_reliability2 = rampup_weight*(1-jsd.apply(est_strong_target2[mask_strong], target[mask_strong]).mean())
weak_reliability1 = rampup_weight*(1-jsd.apply(est_weak_target1[mask_weak], target2_weak[mask_weak]).mean())
weak_reliability2 = rampup_weight*(1-jsd.apply(est_weak_target2[mask_weak], target_weak[mask_weak]).mean())
meters.update('Reliability of pseudo label1', strong_reliability1.item())
meters.update('Reliability of pseudo label2', strong_reliability2.item())
# classification error with pseudo label
pred_strong_loss1 = mse_criterion(strong_pred1[6:n_unlabeled], est_strong_target2[6:n_unlabeled]).mean([1,2])
pred_weak_loss1 = mse_criterion(strong_pred1[mask_weak], est_strong_target2[mask_weak]).mean([1,2])
pred_strong_loss2 = mse_criterion(strong_pred2[6:n_unlabeled], est_strong_target1[6:n_unlabeled]).mean([1,2])
pred_weak_loss2 = mse_criterion(strong_pred2[mask_weak], est_strong_target1[mask_weak]).mean([1,2])
expect_loss1 = strong_reliability2*pred_strong_loss1.mean() + weak_reliability2*pred_weak_loss1.mean()
expect_loss2 = strong_reliability1*pred_strong_loss2.mean() + weak_reliability1*pred_weak_loss2.mean()
meters.update('Expectation of predict loss1', expect_loss1.item())
meters.update('Expectation of predict loss2', expect_loss2.item())
loss1 = weak_class_loss1 + strong_class_loss1 + expect_loss1
loss2 = weak_class_loss2 + strong_class_loss2 + expect_loss2
meters.update('Loss1', loss1.item())
meters.update('Loss2', loss2.item())
if (np.isnan(loss1.item()) or loss1.item() > 1e5):
print(loss1)
print(loss2)
else:
# compute gradient and do optimizer step
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
global_step += 1
if ema_model1 is not None:
update_ema_variables(model1, ema_model1, 0.999, global_step)
if ema_model2 is not None:
update_ema_variables(model2, ema_model2, 0.999, global_step)
epoch_time = time.time() - start
log.info(f"Epoch: {c_epoch}\t Time {epoch_time:.2f}\t {meters}")
return loss1, loss2
def get_dfs(desed_dataset, nb_files=None, separated_sources=False):
log = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
audio_weak_ss = None
audio_unlabel_ss = None
audio_validation_ss = None
audio_synthetic_ss = None
if separated_sources:
audio_weak_ss = cfg.weak_ss
audio_unlabel_ss = cfg.unlabel_ss
audio_validation_ss = cfg.validation_ss
audio_synthetic_ss = cfg.synthetic_ss
weak_df = desed_dataset.initialize_and_get_df(cfg.weak, audio_dir_ss=audio_weak_ss, nb_files=nb_files)
unlabel_df = desed_dataset.initialize_and_get_df(cfg.unlabel, audio_dir_ss=audio_unlabel_ss, nb_files=nb_files)
# Event if synthetic not used for training, used on validation purpose
synthetic_df = desed_dataset.initialize_and_get_df(cfg.synthetic, audio_dir_ss=audio_synthetic_ss,
nb_files=nb_files, download=False)
log.debug(f"synthetic: {synthetic_df.head()}")
validation_df = desed_dataset.initialize_and_get_df(cfg.validation, audio_dir=cfg.audio_validation_dir,
audio_dir_ss=audio_validation_ss, nb_files=nb_files)
# Divide synthetic in train and valid
filenames_train = synthetic_df.filename.drop_duplicates().sample(frac=0.8, random_state=26)
train_synth_df = synthetic_df[synthetic_df.filename.isin(filenames_train)]
valid_synth_df = synthetic_df.drop(train_synth_df.index).reset_index(drop=True)
# Put train_synth in frames so many_hot_encoder can work.
# Not doing it for valid, because not using labels (when prediction) and event based metric expect sec.
train_synth_df.onset = train_synth_df.onset * cfg.sample_rate // cfg.hop_size // pooling_time_ratio
train_synth_df.offset = train_synth_df.offset * cfg.sample_rate // cfg.hop_size // pooling_time_ratio
log.debug(valid_synth_df.event_label.value_counts())
data_dfs = {"weak": weak_df,
"unlabel": unlabel_df,
"synthetic": synthetic_df,
"train_synthetic": train_synth_df,
"valid_synthetic": valid_synth_df,
"validation": validation_df,
}
return data_dfs
if __name__ == '__main__':
torch.manual_seed(2020)
np.random.seed(2020)
logger = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
logger.info("Baseline 2020")
logger.info(f"Starting time: {datetime.datetime.now()}")
parser = argparse.ArgumentParser(description="")
parser.add_argument("-s", '--subpart_data', type=int, default=None, dest="subpart_data",
help="Number of files to be used. Useful when testing on small number of files.")
parser.add_argument("-n", '--no_synthetic', dest='no_synthetic', action='store_true', default=False,
help="Not using synthetic labels during training")
f_args = parser.parse_args()
pprint(vars(f_args))
reduced_number_of_data = f_args.subpart_data
no_synthetic = f_args.no_synthetic
store_dir = os.path.join("stored_data", "MeanTeacher_with_dual_v3_mixup6")
saved_model_dir = os.path.join(store_dir, "model")
saved_pred_dir = os.path.join(store_dir, "predictions")
if os.path.exists(store_dir):
if os.path.exists(saved_model_dir):
load_flag = True
else:
load_flag = False
os.makedirs(saved_model_dir, exist_ok=True)
os.makedirs(saved_pred_dir, exist_ok=True)
else:
load_flag = False
os.makedirs(store_dir, exist_ok=True)
os.makedirs(saved_model_dir, exist_ok=True)
os.makedirs(saved_pred_dir, exist_ok=True)
n_channel = 1
add_axis_conv = 0
# Model taken from 2nd of dcase19 challenge: see Delphin-Poulat2019 in the results.
n_layers = 7
crnn_kwargs = {"n_in_channel": n_channel, "nclass": len(cfg.classes), "attention": True, "n_RNN_cell": 128,
"n_layers_RNN": 2,
"activation": "glu",
"dropout": 0.5,
"kernel_size": n_layers * [3], "padding": n_layers * [1], "stride": n_layers * [1],
"nb_filters": [16, 32, 64, 128, 128, 128, 128],
"pooling": [[2, 2], [2, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2]]}
pooling_time_ratio = 4 # 2 * 2
out_nb_frames_1s = cfg.sample_rate / cfg.hop_size / pooling_time_ratio
median_window = max(int(cfg.median_window_s * out_nb_frames_1s), 1)
logger.debug(f"median_window: {median_window}")
# ##############
# DATA
# ##############
dataset = DESED(base_feature_dir=os.path.join(cfg.workspace, "dataset", "features"),
compute_log=False)
dfs = get_dfs(dataset, reduced_number_of_data)
# Meta path for psds
durations_synth = get_durations_df(cfg.synthetic)
many_hot_encoder = ManyHotEncoder(cfg.classes, n_frames=cfg.max_frames // pooling_time_ratio)
encod_func = many_hot_encoder.encode_strong_df
# Normalisation per audio or on the full dataset
if cfg.scaler_type == "dataset":
transforms = get_transforms(cfg.max_frames, add_axis=add_axis_conv)
weak_data = DataLoadDf(dfs["weak"], encod_func, transforms)
unlabel_data = DataLoadDf(dfs["unlabel"], encod_func, transforms)
train_synth_data = DataLoadDf(dfs["train_synthetic"], encod_func, transforms)
scaler_args = []
scaler = Scaler()
# # Only on real data since that's our final goal and test data are real
scaler.calculate_scaler(ConcatDataset([weak_data, unlabel_data, train_synth_data]))
logger.debug(f"scaler mean: {scaler.mean_}")
else:
scaler_args = ["global", "min-max"]
scaler = ScalerPerAudio(*scaler_args)
transforms = get_transforms_v2(cfg.max_frames, scaler, add_axis_conv,
shift_dict_params={"net_pooling": 4})
transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
weak_data = DataLoadDf(dfs["weak"], encod_func, transforms, in_memory=cfg.in_memory)
unlabel_data = DataLoadDf(dfs["unlabel"], encod_func, transforms, in_memory=cfg.in_memory_unlab)
train_synth_data = DataLoadDf(dfs["train_synthetic"], encod_func, transforms, in_memory=cfg.in_memory)
valid_synth_data = DataLoadDf(dfs["valid_synthetic"], encod_func, transforms_valid,
return_indexes=True, in_memory=cfg.in_memory)
logger.debug(f"len synth: {len(train_synth_data)}, len_unlab: {len(unlabel_data)}, len weak: {len(weak_data)}")
if not no_synthetic:
list_dataset = [weak_data, unlabel_data, train_synth_data]
batch_sizes = [cfg.batch_size//4, cfg.batch_size//2, cfg.batch_size//4]
strong_mask = slice((3*cfg.batch_size)//4, cfg.batch_size)
else:
list_dataset = [weak_data, unlabel_data]
batch_sizes = [cfg.batch_size // 4, 3 * cfg.batch_size // 4]
strong_mask = None
weak_mask = slice(batch_sizes[0]) # Assume weak data is always the first one
concat_dataset = ConcatDataset(list_dataset)
sampler = MultiStreamBatchSampler(concat_dataset, batch_sizes=batch_sizes)
training_loader = DataLoader(concat_dataset, batch_sampler=sampler)
valid_synth_loader = DataLoader(valid_synth_data, batch_size=cfg.batch_size)
# ##############
# Model
# ##############
if load_flag:
mlist = os.listdir(saved_model_dir)
modelName = mlist[-1]
n_epoch = np.int(modelName.split('_')[-1]) + 1
model_fname = os.path.join(saved_model_dir, modelName)
state = torch.load(model_fname)
crnn1 = _load_model_v2(state, 1, 'crnn')
crnn2 = _load_model_v2(state, 2, 'crnn')
logger.info(f"training model: {model_fname}, epoch: {state['epoch']}")
crnn1_ema = _load_model_v2(state, 1, 'crnn')
for param in crnn1_ema.parameters():
param.detach()
crnn2_ema = _load_model_v2(state, 2, 'crnn')
for param in crnn2_ema.parameters():
param.detach()
optim_kwargs = state['optimizer']["kwargs"]
optim1 = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn1.parameters()), **optim_kwargs)
optim2 = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn2.parameters()), **optim_kwargs)
else:
n_epoch = 0
crnn1 = CRNN(**crnn_kwargs)
crnn2 = CRNN(**crnn_kwargs)
pytorch_total_params = sum(p.numel() for p in crnn1.parameters() if p.requires_grad)
logger.info(crnn1)
logger.info("number of parameters in the model: {}".format(pytorch_total_params))
crnn1.apply(weights_init)
crnn2.apply(weights_init)
crnn1_ema = CRNN(**crnn_kwargs)
crnn2_ema = CRNN(**crnn_kwargs)
crnn1_ema.apply(weights_init)
crnn2_ema.apply(weights_init)
for param in crnn1_ema.parameters():
param.detach_()
for param in crnn2_ema.parameters():
param.detach_()
optim_kwargs = {"lr": cfg.default_learning_rate, "betas": (0.9, 0.999)}
optim1 = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn1.parameters()), **optim_kwargs)
optim2 = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn2.parameters()), **optim_kwargs)
state = {
'model': {"name1": crnn1.__class__.__name__,
"name2": crnn2.__class__.__name__,
'args': '',
"kwargs": crnn_kwargs,
'state_dict1': crnn1.state_dict(),
'state_dict2': crnn2.state_dict()},
'model_ema': {"name1": crnn1_ema.__class__.__name__,
"name2": crnn2_ema.__class__.__name__,
'args': '',
"kwargs": crnn_kwargs,
'state_dict1': crnn1_ema.state_dict(),
'state_dict2': crnn2_ema.state_dict()},
'optimizer': {"name1": optim1.__class__.__name__,
"name2": optim2.__class__.__name__,
'args': '',
"kwargs": optim_kwargs,
'state_dict1': optim1.state_dict(),
'state_dict2': optim2.state_dict()},
"pooling_time_ratio": pooling_time_ratio,
"scaler": {
"type": type(scaler).__name__,
"args": scaler_args,
"state_dict": scaler.state_dict()},
"many_hot_encoder": many_hot_encoder.state_dict(),
"median_window": median_window,
"desed": dataset.state_dict()
}
save_best_cb = SaveBest("sup")
# save_best_cb = SaveBest("inf")
if cfg.early_stopping is not None:
early_stopping_call = EarlyStopping(patience=cfg.early_stopping, val_comp="sup", init_patience=cfg.es_init_wait)
# ##############
# Train
# ##############
results = pd.DataFrame(columns=["loss", "valid_synth_f1", "weak_metric", "global_valid"])
for epoch in range(n_epoch, n_epoch+cfg.n_epoch):
crnn1.train()
crnn2.train()
crnn1_ema.train()
crnn2_ema.train()
crnn1, crnn2, crnn1_ema, crnn2_ema = to_cuda_if_available(crnn1, crnn2, crnn1_ema, crnn2_ema)
loss_value, loss_value2 = train(training_loader, crnn1, crnn2, optim1, optim2, epoch,
ema_model1=crnn1_ema, ema_model2=crnn2_ema, mask_weak=weak_mask, mask_strong=strong_mask, adjust_lr=cfg.adjust_lr)
# Validation
crnn1 = crnn1.eval()
logger.info("\n ### Valid synthetic metric ### \n")
predictions = get_predictions(crnn1, valid_synth_loader, many_hot_encoder.decode_strong, pooling_time_ratio,
median_window=median_window, save_predictions=None)
# Validation with synthetic data (dropping feature_filename for psds)
valid_synth = dfs["valid_synthetic"].drop("feature_filename", axis=1)
valid_synth_f1, psds_m_f1 = compute_metrics(predictions, valid_synth, durations_synth)
# Update state
state['model']['state_dict1'] = crnn1.state_dict()
state['model']['state_dict2'] = crnn2.state_dict()
state['model_ema']['state_dict1'] = crnn1_ema.state_dict()
state['model_ema']['state_dict2'] = crnn2_ema.state_dict()
state['optimizer']['state_dict1'] = optim1.state_dict()
state['optimizer']['state_dict2'] = optim2.state_dict()
state['epoch'] = epoch
state['valid_metric'] = valid_synth_f1
state['valid_f1_psds'] = psds_m_f1
# Callbacks
if cfg.checkpoint_epochs is not None and (epoch + 1) % cfg.checkpoint_epochs == 0:
model_fname = os.path.join(saved_model_dir, "baseline_epoch_" + str(epoch))
torch.save(state, model_fname)
if cfg.save_best:
#stop_criterior = (loss_value.item()+loss_value2.item())/2 + np.abs(loss_value.item()-loss_value2.item())
if save_best_cb.apply(valid_synth_f1):
model_fname = os.path.join(saved_model_dir, "baseline_best")
torch.save(state, model_fname)
#crnn1.eval()
#transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
#predicitons_fname = os.path.join(saved_pred_dir, "baseline_validation.tsv")
#validation_data = DataLoadDf(dfs["validation"], encod_func, transform=transforms_valid, return_indexes=True)
#validation_dataloader = DataLoader(validation_data, batch_size=cfg.batch_size, shuffle=False, drop_last=False)
#validation_labels_df = dfs["validation"].drop("feature_filename", axis=1)
#durations_validation = get_durations_df(cfg.validation, cfg.audio_validation_dir)
# Preds with only one value
#valid_predictions = get_predictions(crnn1, validation_dataloader, many_hot_encoder.decode_strong,
# pooling_time_ratio, median_window=median_window,
# save_predictions=predicitons_fname)
#compute_metrics(valid_predictions, validation_labels_df, durations_validation)
results.loc[epoch, "global_valid"] = valid_synth_f1
results.loc[epoch, "loss"] = loss_value.item()
results.loc[epoch, "valid_synth_f1"] = valid_synth_f1
if cfg.early_stopping:
if early_stopping_call.apply(valid_synth_f1):
logger.warn("EARLY STOPPING")
break
if cfg.save_best:
model_fname = os.path.join(saved_model_dir, "baseline_best")
state = torch.load(model_fname)
crnn = _load_model_v2(state, 1, 'crnn')
logger.info(f"testing model: {model_fname}, epoch: {state['epoch']}")
else:
logger.info("testing model of last epoch: {}".format(cfg.n_epoch))
results_df = pd.DataFrame(results).to_csv(os.path.join(saved_pred_dir, "results.tsv"),
sep="\t", index=False, float_format="%.4f")
# ##############
# Validation
# ##############
crnn.eval()
transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
predicitons_fname = os.path.join(saved_pred_dir, "baseline_validation.tsv")
validation_data = DataLoadDf(dfs["validation"], encod_func, transform=transforms_valid, return_indexes=True)
validation_dataloader = DataLoader(validation_data, batch_size=cfg.batch_size, shuffle=False, drop_last=False)
validation_labels_df = dfs["validation"].drop("feature_filename", axis=1)
durations_validation = get_durations_df(cfg.validation, cfg.audio_validation_dir)
# Preds with only one value
valid_predictions = get_predictions(crnn, validation_dataloader, many_hot_encoder.decode_strong,
pooling_time_ratio, median_window=median_window,
save_predictions=predicitons_fname)
compute_metrics(valid_predictions, validation_labels_df, durations_validation)
# ##########
# Optional but recommended
# ##########
# Compute psds scores with multiple thresholds (more accurate). n_thresholds could be increased.
# n_thresholds = 50
# Example of 5 thresholds: 0.1, 0.3, 0.5, 0.7, 0.9
# list_thresholds = np.arange(1 / (n_thresholds * 2), 1, 1 / n_thresholds)
# pred_ss_thresh = get_predictions(crnn, validation_dataloader, many_hot_encoder.decode_strong,
# pooling_time_ratio, thresholds=list_thresholds, median_window=median_window,
# save_predictions=predicitons_fname)
# psds = compute_psds_from_operating_points(pred_ss_thresh, validation_labels_df, durations_validation)
# psds_score(psds, filename_roc_curves=os.path.join(saved_pred_dir, "figures/psds_roc.png"))
| 28,622 | 48.35 | 158 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/main_CRST_model.py | # -*- coding: utf-8 -*-
import argparse
import datetime
import inspect
import os
import time
from pprint import pprint
import pandas as pd
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch import nn
from data_utils.Desed import DESED
from data_utils.DataLoad import DataLoadDf, ConcatDataset, MultiStreamBatchSampler
from TestModel_dual import _load_model_v2
from evaluation_measures import get_predictions, psds_score, compute_psds_from_operating_points, compute_metrics
from models.CRNN import CRNN
import config as cfg
from utilities import ramps
from utilities.Logger import create_logger
from utilities.Scaler import ScalerPerAudio, Scaler
from utilities.utils import SaveBest, JSD, to_cuda_if_available, weights_init, AverageMeterSet, EarlyStopping, \
get_durations_df, median_smoothing
from utilities.ManyHotEncoder import ManyHotEncoder
from utilities.Transforms import get_transforms
def adjust_learning_rate(optimizer, rampup_value, rampdown_value=1):
""" adjust the learning rate
Args:
optimizer: torch.Module, the optimizer to be updated
rampup_value: float, the float value between 0 and 1 that should increases linearly
rampdown_value: float, the float between 1 and 0 that should decrease linearly
Returns:
"""
#LR warm-up to handle large minibatch sizes from https://arxiv.org/abs/1706.02677
#We commented parts on betas and weight decay to match 2nd system of last year from Orange
lr = rampup_value * rampdown_value * cfg.max_learning_rate
# beta1 = rampdown_value * cfg.beta1_before_rampdown + (1. - rampdown_value) * cfg.beta1_after_rampdown
# beta2 = (1. - rampup_value) * cfg.beta2_during_rampdup + rampup_value * cfg.beta2_after_rampup
# weight_decay = (1 - rampup_value) * cfg.weight_decay_during_rampup + cfg.weight_decay_after_rampup * rampup_value
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# param_group['betas'] = (beta1, beta2)
# param_group['weight_decay'] = weight_decay
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_params, params in zip(ema_model.parameters(), model.parameters()):
ema_params.data.mul_(alpha).add_(1 - alpha, params.data)
def train(train_loader, model1, model2, optimizer1, optimizer2, c_epoch, ema_model1=None, ema_model2=None, mask_weak=None, mask_strong=None, adjust_lr=False):
""" One epoch of a Mean Teacher model
Args:
train_loader: torch.utils.data.DataLoader, iterator of training batches for an epoch.
Should return a tuple: ((teacher input, student input), labels)
model: torch.Module, model to be trained, should return a weak and strong prediction
optimizer: torch.Module, optimizer used to train the model
c_epoch: int, the current epoch of training
ema_model: torch.Module, student model, should return a weak and strong prediction
mask_weak: slice or list, mask the batch to get only the weak labeled data (used to calculate the loss)
mask_strong: slice or list, mask the batch to get only the strong labeled data (used to calcultate the loss)
adjust_lr: bool, Whether or not to adjust the learning rate during training (params in config)
"""
log = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
class_criterion = nn.BCELoss(reduction='none')
mse_criterion = nn.MSELoss(reduction='none')
reliability_criterion = nn.CrossEntropyLoss(reduction='none')
jsd = JSD()
softmax = nn.Softmax(dim=1)
class_label = torch.tensor(cfg.class_label).cuda()
class_criterion, mse_criterion, softmax = to_cuda_if_available(class_criterion, mse_criterion, softmax)
meters = AverageMeterSet()
log.debug("Nb batches: {}".format(len(train_loader)))
start = time.time()
#plabel = []
for i, ((batch_input, batch_input_ema), target) in enumerate(train_loader):
global_step = c_epoch * len(train_loader) + i
rampup_value = ramps.exp_rampup(global_step, cfg.n_epoch_rampup2*len(train_loader))
if adjust_lr:
adjust_learning_rate(optimizer1, rampup_value, rampdown_value=1.0)
adjust_learning_rate(optimizer2, rampup_value, rampdown_value=0.9)
meters.update('lr', optimizer1.param_groups[0]['lr'])
batch_input, batch_input_ema, target = to_cuda_if_available(batch_input, batch_input_ema, target)
# Outputs
strong_pred1, weak_pred1 = model1(batch_input)
strong_predict1, weak_predict1 = ema_model1(batch_input_ema)
strong_predict1 = strong_predict1.detach()
weak_predict1 = weak_predict1.detach()
# data augmentation
strong_pred2, weak_pred2 = model2(batch_input_ema)
strong_predict2, weak_predict2 = ema_model2(batch_input)
strong_predict2 = strong_predict2.detach()
weak_predict2 = weak_predict2.detach()
# Weak BCE Loss
target_weak = target.max(-2)[0] # Take the max in the time axis
if mask_weak is not None:
weak_class_loss1 = class_criterion(weak_pred1[mask_weak], target_weak[mask_weak]).mean()
weak_class_loss2 = class_criterion(weak_pred2[mask_weak], target_weak[mask_weak]).mean()
if i == 0:
log.debug(f"target: {target.mean(-2)} \n Target_weak: {target_weak} \n "
f"Target weak mask: {target_weak[mask_weak]} \n "
f"Target strong mask: {target[mask_strong].sum(-2)}\n"
f"weak loss1: {weak_class_loss1} \t rampup_value: {rampup_value}"
f"weak loss2: {weak_class_loss2} \t rampup_value: {rampup_value}"
f"tensor mean: {batch_input.mean()}")
meters.update('weak_class_loss1', weak_class_loss1.item())
meters.update('weak_class_loss2', weak_class_loss2.item())
# Strong BCE loss
if mask_strong is not None:
strong_class_loss1 = class_criterion(strong_pred1[mask_strong], target[mask_strong]).mean()
strong_class_loss2 = class_criterion(strong_pred2[mask_strong], target[mask_strong]).mean()
meters.update('Strong loss1', strong_class_loss1.item())
meters.update('Strong loss2', strong_class_loss2.item())
# Teacher-student consistency cost
if ema_model1 is not None:
rampup_weight = cfg.max_rampup_weight * rampup_value
meters.update('Rampup weight', rampup_weight)
# Self-labeling
n_unlabeled = int(3*cfg.batch_size/4)
est_strong_target1 = torch.zeros(cfg.batch_size,157,cfg.nClass).cuda()
est_strong_target2 = torch.zeros(cfg.batch_size,157,cfg.nClass).cuda()
for bter in range(cfg.batch_size):
sp1 = strong_predict1[bter]
sp1 = torch.clamp(sp1, 1.0e-4, 1-1.0e-4)
p1_h1 = torch.log(sp1)
p1_h0 = torch.log(1-sp1)
sp2 = strong_predict2[bter]
sp2 = torch.clamp(sp2, 1.0e-4, 1-1.0e-4)
p2_h1 = torch.log(sp2)
p2_h0 = torch.log(1-sp2)
p_h0 = torch.cat((p1_h0, p2_h0), 0)
p_h1 = torch.cat((p1_h1, p2_h1), 0)
# K = 0
P0 = p_h0.sum(1)
# K = 1
P1 = P0[:,None] + p_h1 - p_h0
#P = torch.cat([P0.reshape(157,1), P1], 1)
# K = 2
P2 = []
for cter in range(1,cfg.nClass):
P2.append(P1[:,:-cter]+P1[:,cter:])
P2 = torch.cat(P2, 1)
P2 = P2 - P0[:,None]
P = torch.cat([P0.reshape(157*2,1), P1, P2], 1)
# K: up to 3
#P3 = []
#for cter1 in range(1,cfg.nClass):
# for cter2 in range(1, cfg.nClass-cter1):
# P3.append(P1[:,:-(cter1+cter2)]+P1[:,cter1:-cter2]+P1[:,(cter1+cter2):])
#P3 = torch.cat(P3,1)
#P3 = P3 - 2*P0[:,None]
#P = torch.cat([P0.reshape(157,1), P1, P2, P3], 1)
P = softmax(P)
prob_v, prob_i = torch.sort(P, dim=1, descending=True)
norm_p = prob_v.sum(1)
prob_v = prob_v/norm_p[:,None]
cl = class_label[prob_i.tolist(),:]
cl = torch.mul(cl, prob_v[:,:,None]).sum(1)
est_strong_target1[bter,:,:] = torch.squeeze(cl[:157,:])
est_strong_target2[bter,:,:] = torch.squeeze(cl[157:,:])
est_weak_target1 = est_strong_target1.mean(1)
est_weak_target2 = est_strong_target2.mean(1)
strong_reliability1 = rampup_weight*(1-jsd.apply(est_strong_target1[mask_strong], target[mask_strong]).mean())
strong_reliability2 = rampup_weight*(1-jsd.apply(est_strong_target2[mask_strong], target[mask_strong]).mean())
weak_reliability1 = rampup_weight*(1-jsd.apply(est_weak_target1[mask_weak], target_weak[mask_weak]).mean())
weak_reliability2 = rampup_weight*(1-jsd.apply(est_weak_target2[mask_weak], target_weak[mask_weak]).mean())
meters.update('Reliability of pseudo label1', strong_reliability1.item())
meters.update('Reliability of pseudo label2', strong_reliability2.item())
# classification error with pseudo label
pred_strong_loss1 = mse_criterion(strong_pred1[6:n_unlabeled], est_strong_target2[6:n_unlabeled]).mean([1,2])
pred_weak_loss1 = mse_criterion(strong_pred1[mask_weak], est_strong_target2[mask_weak]).mean([1,2])
pred_strong_loss2 = mse_criterion(strong_pred2[6:n_unlabeled], est_strong_target1[6:n_unlabeled]).mean([1,2])
pred_weak_loss2 = mse_criterion(strong_pred2[mask_weak], est_strong_target1[mask_weak]).mean([1,2])
expect_loss1 = strong_reliability2*pred_strong_loss1.mean() + weak_reliability2*pred_weak_loss1.mean()
expect_loss2 = strong_reliability1*pred_strong_loss2.mean() + weak_reliability1*pred_weak_loss2.mean()
meters.update('Expectation of predict loss1', expect_loss1.item())
meters.update('Expectation of predict loss2', expect_loss2.item())
loss1 = weak_class_loss1 + strong_class_loss1 + expect_loss1
loss2 = weak_class_loss2 + strong_class_loss2 + expect_loss2
meters.update('Loss1', loss1.item())
meters.update('Loss2', loss2.item())
if (np.isnan(loss1.item()) or loss1.item() > 1e5):
print(loss1)
print(loss2)
else:
# compute gradient and do optimizer step
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
global_step += 1
if ema_model1 is not None:
update_ema_variables(model1, ema_model1, 0.999, global_step)
if ema_model2 is not None:
update_ema_variables(model2, ema_model2, 0.999, global_step)
epoch_time = time.time() - start
log.info(f"Epoch: {c_epoch}\t Time {epoch_time:.2f}\t {meters}")
return loss1, loss2
def get_dfs(desed_dataset, nb_files=None, separated_sources=False):
log = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
audio_weak_ss = None
audio_unlabel_ss = None
audio_validation_ss = None
audio_synthetic_ss = None
if separated_sources:
audio_weak_ss = cfg.weak_ss
audio_unlabel_ss = cfg.unlabel_ss
audio_validation_ss = cfg.validation_ss
audio_synthetic_ss = cfg.synthetic_ss
weak_df = desed_dataset.initialize_and_get_df(cfg.weak, audio_dir_ss=audio_weak_ss, nb_files=nb_files)
unlabel_df = desed_dataset.initialize_and_get_df(cfg.unlabel, audio_dir_ss=audio_unlabel_ss, nb_files=nb_files)
# Event if synthetic not used for training, used on validation purpose
synthetic_df = desed_dataset.initialize_and_get_df(cfg.synthetic, audio_dir_ss=audio_synthetic_ss,
nb_files=nb_files, download=False)
log.debug(f"synthetic: {synthetic_df.head()}")
validation_df = desed_dataset.initialize_and_get_df(cfg.validation, audio_dir=cfg.audio_validation_dir,
audio_dir_ss=audio_validation_ss, nb_files=nb_files)
# Divide synthetic in train and valid
filenames_train = synthetic_df.filename.drop_duplicates().sample(frac=0.8, random_state=26)
train_synth_df = synthetic_df[synthetic_df.filename.isin(filenames_train)]
valid_synth_df = synthetic_df.drop(train_synth_df.index).reset_index(drop=True)
# Put train_synth in frames so many_hot_encoder can work.
# Not doing it for valid, because not using labels (when prediction) and event based metric expect sec.
train_synth_df.onset = train_synth_df.onset * cfg.sample_rate // cfg.hop_size // pooling_time_ratio
train_synth_df.offset = train_synth_df.offset * cfg.sample_rate // cfg.hop_size // pooling_time_ratio
log.debug(valid_synth_df.event_label.value_counts())
data_dfs = {"weak": weak_df,
"unlabel": unlabel_df,
"synthetic": synthetic_df,
"train_synthetic": train_synth_df,
"valid_synthetic": valid_synth_df,
"validation": validation_df,
}
return data_dfs
if __name__ == '__main__':
torch.manual_seed(2020)
np.random.seed(2020)
logger = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
logger.info("Baseline 2020")
logger.info(f"Starting time: {datetime.datetime.now()}")
parser = argparse.ArgumentParser(description="")
parser.add_argument("-s", '--subpart_data', type=int, default=None, dest="subpart_data",
help="Number of files to be used. Useful when testing on small number of files.")
parser.add_argument("-n", '--no_synthetic', dest='no_synthetic', action='store_true', default=False,
help="Not using synthetic labels during training")
f_args = parser.parse_args()
pprint(vars(f_args))
reduced_number_of_data = f_args.subpart_data
no_synthetic = f_args.no_synthetic
store_dir = os.path.join("stored_data", "MeanTeacher_with_dual_v2_max3_v2_2")
saved_model_dir = os.path.join(store_dir, "model")
saved_pred_dir = os.path.join(store_dir, "predictions")
if os.path.exists(store_dir):
if os.path.exists(saved_model_dir):
load_flag = True
else:
load_flag = False
os.makedirs(saved_model_dir, exist_ok=True)
os.makedirs(saved_pred_dir, exist_ok=True)
else:
load_flag = False
os.makedirs(store_dir, exist_ok=True)
os.makedirs(saved_model_dir, exist_ok=True)
os.makedirs(saved_pred_dir, exist_ok=True)
n_channel = 1
add_axis_conv = 0
# Model taken from 2nd of dcase19 challenge: see Delphin-Poulat2019 in the results.
n_layers = 7
crnn_kwargs = {"n_in_channel": n_channel, "nclass": len(cfg.classes), "attention": True, "n_RNN_cell": 128,
"n_layers_RNN": 2,
"activation": "glu",
"dropout": 0.5,
"kernel_size": n_layers * [3], "padding": n_layers * [1], "stride": n_layers * [1],
"nb_filters": [16, 32, 64, 128, 128, 128, 128],
"pooling": [[2, 2], [2, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2]]}
pooling_time_ratio = 4 # 2 * 2
out_nb_frames_1s = cfg.sample_rate / cfg.hop_size / pooling_time_ratio
median_window = max(int(cfg.median_window_s * out_nb_frames_1s), 1)
logger.debug(f"median_window: {median_window}")
# ##############
# DATA
# ##############
dataset = DESED(base_feature_dir=os.path.join(cfg.workspace, "dataset", "features"),
compute_log=False)
dfs = get_dfs(dataset, reduced_number_of_data)
# Meta path for psds
durations_synth = get_durations_df(cfg.synthetic)
many_hot_encoder = ManyHotEncoder(cfg.classes, n_frames=cfg.max_frames // pooling_time_ratio)
encod_func = many_hot_encoder.encode_strong_df
# Normalisation per audio or on the full dataset
if cfg.scaler_type == "dataset":
transforms = get_transforms(cfg.max_frames, add_axis=add_axis_conv)
weak_data = DataLoadDf(dfs["weak"], encod_func, transforms)
unlabel_data = DataLoadDf(dfs["unlabel"], encod_func, transforms)
train_synth_data = DataLoadDf(dfs["train_synthetic"], encod_func, transforms)
scaler_args = []
scaler = Scaler()
# # Only on real data since that's our final goal and test data are real
scaler.calculate_scaler(ConcatDataset([weak_data, unlabel_data, train_synth_data]))
logger.debug(f"scaler mean: {scaler.mean_}")
else:
scaler_args = ["global", "min-max"]
scaler = ScalerPerAudio(*scaler_args)
transforms = get_transforms(cfg.max_frames, scaler, add_axis_conv,
noise_dict_params={"mean": 0., "snr": cfg.noise_snr})
transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
weak_data = DataLoadDf(dfs["weak"], encod_func, transforms, in_memory=cfg.in_memory)
unlabel_data = DataLoadDf(dfs["unlabel"], encod_func, transforms, in_memory=cfg.in_memory_unlab)
train_synth_data = DataLoadDf(dfs["train_synthetic"], encod_func, transforms, in_memory=cfg.in_memory)
valid_synth_data = DataLoadDf(dfs["valid_synthetic"], encod_func, transforms_valid,
return_indexes=True, in_memory=cfg.in_memory)
logger.debug(f"len synth: {len(train_synth_data)}, len_unlab: {len(unlabel_data)}, len weak: {len(weak_data)}")
if not no_synthetic:
list_dataset = [weak_data, unlabel_data, train_synth_data]
batch_sizes = [cfg.batch_size//4, cfg.batch_size//2, cfg.batch_size//4]
strong_mask = slice((3*cfg.batch_size)//4, cfg.batch_size)
else:
list_dataset = [weak_data, unlabel_data]
batch_sizes = [cfg.batch_size // 4, 3 * cfg.batch_size // 4]
strong_mask = None
weak_mask = slice(batch_sizes[0]) # Assume weak data is always the first one
concat_dataset = ConcatDataset(list_dataset)
sampler = MultiStreamBatchSampler(concat_dataset, batch_sizes=batch_sizes)
training_loader = DataLoader(concat_dataset, batch_sampler=sampler)
valid_synth_loader = DataLoader(valid_synth_data, batch_size=cfg.batch_size)
# ##############
# Model
# ##############
if load_flag:
mlist = os.listdir(saved_model_dir)
modelName = mlist[-1]
n_epoch = np.int(modelName.split('_')[-1]) + 1
model_fname = os.path.join(saved_model_dir, modelName)
state = torch.load(model_fname)
crnn1 = _load_model_v2(state, 1, 'crnn')
crnn2 = _load_model_v2(state, 2, 'crnn')
logger.info(f"training model: {model_fname}, epoch: {state['epoch']}")
crnn1_ema = _load_model_v2(state, 1, 'crnn')
for param in crnn1_ema.parameters():
param.detach()
crnn2_ema = _load_model_v2(state, 2, 'crnn')
for param in crnn2_ema.parameters():
param.detach()
optim_kwargs = state['optimizer']["kwargs"]
optim1 = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn1.parameters()), **optim_kwargs)
optim2 = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn2.parameters()), **optim_kwargs)
else:
n_epoch = 0
crnn1 = CRNN(**crnn_kwargs)
crnn2 = CRNN(**crnn_kwargs)
pytorch_total_params = sum(p.numel() for p in crnn1.parameters() if p.requires_grad)
logger.info(crnn1)
logger.info("number of parameters in the model: {}".format(pytorch_total_params))
crnn1.apply(weights_init)
crnn2.apply(weights_init)
crnn1_ema = CRNN(**crnn_kwargs)
crnn2_ema = CRNN(**crnn_kwargs)
crnn1_ema.apply(weights_init)
crnn2_ema.apply(weights_init)
for param in crnn1_ema.parameters():
param.detach_()
for param in crnn2_ema.parameters():
param.detach_()
optim_kwargs = {"lr": cfg.default_learning_rate, "betas": (0.9, 0.999)}
optim1 = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn1.parameters()), **optim_kwargs)
optim2 = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn2.parameters()), **optim_kwargs)
state = {
'model': {"name1": crnn1.__class__.__name__,
"name2": crnn2.__class__.__name__,
'args': '',
"kwargs": crnn_kwargs,
'state_dict1': crnn1.state_dict(),
'state_dict2': crnn2.state_dict()},
'model_ema': {"name1": crnn1_ema.__class__.__name__,
"name2": crnn2_ema.__class__.__name__,
'args': '',
"kwargs": crnn_kwargs,
'state_dict1': crnn1_ema.state_dict(),
'state_dict2': crnn2_ema.state_dict()},
'optimizer': {"name1": optim1.__class__.__name__,
"name2": optim2.__class__.__name__,
'args': '',
"kwargs": optim_kwargs,
'state_dict1': optim1.state_dict(),
'state_dict2': optim2.state_dict()},
"pooling_time_ratio": pooling_time_ratio,
"scaler": {
"type": type(scaler).__name__,
"args": scaler_args,
"state_dict": scaler.state_dict()},
"many_hot_encoder": many_hot_encoder.state_dict(),
"median_window": median_window,
"desed": dataset.state_dict()
}
# save_best_cb = SaveBest("sup")
save_best_cb = SaveBest("inf")
if cfg.early_stopping is not None:
early_stopping_call = EarlyStopping(patience=cfg.early_stopping, val_comp="sup", init_patience=cfg.es_init_wait)
# ##############
# Train
# ##############
results = pd.DataFrame(columns=["loss", "valid_synth_f1", "weak_metric", "global_valid"])
for epoch in range(n_epoch, n_epoch+cfg.n_epoch):
crnn1.train()
crnn2.train()
crnn1_ema.train()
crnn2_ema.train()
crnn1, crnn2, crnn1_ema, crnn2_ema = to_cuda_if_available(crnn1, crnn2, crnn1_ema, crnn2_ema)
loss_value, loss_value2 = train(training_loader, crnn1, crnn2, optim1, optim2, epoch,
ema_model1=crnn1_ema, ema_model2=crnn2_ema, mask_weak=weak_mask, mask_strong=strong_mask, adjust_lr=cfg.adjust_lr)
# Validation
crnn1 = crnn1.eval()
logger.info("\n ### Valid synthetic metric ### \n")
predictions = get_predictions(crnn1, valid_synth_loader, many_hot_encoder.decode_strong, pooling_time_ratio,
median_window=median_window, save_predictions=None)
# Validation with synthetic data (dropping feature_filename for psds)
valid_synth = dfs["valid_synthetic"].drop("feature_filename", axis=1)
valid_synth_f1, psds_m_f1 = compute_metrics(predictions, valid_synth, durations_synth)
# Update state
state['model']['state_dict1'] = crnn1.state_dict()
state['model']['state_dict2'] = crnn2.state_dict()
state['model_ema']['state_dict1'] = crnn1_ema.state_dict()
state['model_ema']['state_dict2'] = crnn2_ema.state_dict()
state['optimizer']['state_dict1'] = optim1.state_dict()
state['optimizer']['state_dict2'] = optim2.state_dict()
state['epoch'] = epoch
state['valid_metric'] = valid_synth_f1
state['valid_f1_psds'] = psds_m_f1
# Callbacks
if cfg.checkpoint_epochs is not None and (epoch + 1) % cfg.checkpoint_epochs == 0:
model_fname = os.path.join(saved_model_dir, "baseline_epoch_" + str(epoch))
torch.save(state, model_fname)
if cfg.save_best:
stop_criterior = (loss_value.item()+loss_value2.item())/2 + np.abs(loss_value.item()-loss_value2.item())
if save_best_cb.apply(stop_criterior):
model_fname = os.path.join(saved_model_dir, "baseline_best")
torch.save(state, model_fname)
#crnn1.eval()
#transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
#predicitons_fname = os.path.join(saved_pred_dir, "baseline_validation.tsv")
#validation_data = DataLoadDf(dfs["validation"], encod_func, transform=transforms_valid, return_indexes=True)
#validation_dataloader = DataLoader(validation_data, batch_size=cfg.batch_size, shuffle=False, drop_last=False)
#validation_labels_df = dfs["validation"].drop("feature_filename", axis=1)
#durations_validation = get_durations_df(cfg.validation, cfg.audio_validation_dir)
# Preds with only one value
#valid_predictions = get_predictions(crnn1, validation_dataloader, many_hot_encoder.decode_strong,
# pooling_time_ratio, median_window=median_window,
# save_predictions=predicitons_fname)
#compute_metrics(valid_predictions, validation_labels_df, durations_validation)
results.loc[epoch, "global_valid"] = valid_synth_f1
results.loc[epoch, "loss"] = loss_value.item()
results.loc[epoch, "valid_synth_f1"] = valid_synth_f1
if cfg.early_stopping:
if early_stopping_call.apply(valid_synth_f1):
logger.warn("EARLY STOPPING")
break
if cfg.save_best:
model_fname = os.path.join(saved_model_dir, "baseline_best")
state = torch.load(model_fname)
crnn = _load_model_v2(state, 1, 'crnn')
logger.info(f"testing model: {model_fname}, epoch: {state['epoch']}")
else:
logger.info("testing model of last epoch: {}".format(cfg.n_epoch))
results_df = pd.DataFrame(results).to_csv(os.path.join(saved_pred_dir, "results.tsv"),
sep="\t", index=False, float_format="%.4f")
# ##############
# Validation
# ##############
crnn.eval()
transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
predicitons_fname = os.path.join(saved_pred_dir, "baseline_validation.tsv")
validation_data = DataLoadDf(dfs["validation"], encod_func, transform=transforms_valid, return_indexes=True)
validation_dataloader = DataLoader(validation_data, batch_size=cfg.batch_size, shuffle=False, drop_last=False)
validation_labels_df = dfs["validation"].drop("feature_filename", axis=1)
durations_validation = get_durations_df(cfg.validation, cfg.audio_validation_dir)
# Preds with only one value
valid_predictions = get_predictions(crnn, validation_dataloader, many_hot_encoder.decode_strong,
pooling_time_ratio, median_window=median_window,
save_predictions=predicitons_fname)
compute_metrics(valid_predictions, validation_labels_df, durations_validation)
# ##########
# Optional but recommended
# ##########
# Compute psds scores with multiple thresholds (more accurate). n_thresholds could be increased.
# n_thresholds = 50
# Example of 5 thresholds: 0.1, 0.3, 0.5, 0.7, 0.9
# list_thresholds = np.arange(1 / (n_thresholds * 2), 1, 1 / n_thresholds)
# pred_ss_thresh = get_predictions(crnn, validation_dataloader, many_hot_encoder.decode_strong,
# pooling_time_ratio, thresholds=list_thresholds, median_window=median_window,
# save_predictions=predicitons_fname)
# psds = compute_psds_from_operating_points(pred_ss_thresh, validation_labels_df, durations_validation)
# psds_score(psds, filename_roc_curves=os.path.join(saved_pred_dir, "figures/psds_roc.png"))
| 28,492 | 48.295848 | 158 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/EvaluatePredictions.py | import glob
import os.path as osp
import pandas as pd
from evaluation_measures import psds_score, compute_psds_from_operating_points, compute_metrics
from utilities.utils import generate_tsv_wav_durations
if __name__ == '__main__':
groundtruth_path = "../dataset/metadata/validation/validation.tsv"
durations_path = "../dataset/metadata/validation/validation_durations.tsv"
# If durations do not exists, audio dir is needed
groundtruth_audio_path = "../dataset/audio/validation"
base_prediction_path = "stored_data/MeanTeacher_with_synthetic/predictions/baseline_validation"
groundtruth = pd.read_csv(groundtruth_path, sep="\t")
if osp.exists(durations_path):
meta_dur_df = pd.read_csv(durations_path, sep='\t')
else:
meta_dur_df = generate_tsv_wav_durations(groundtruth_audio_path, durations_path)
# Evaluate a single prediction
single_predictions = pd.read_csv(base_prediction_path + ".tsv", sep="\t")
compute_metrics(single_predictions, groundtruth, meta_dur_df)
# Evaluate predictions with multiple thresholds (better). Need a list of predictions.
prediction_list_path = glob.glob(osp.join(base_prediction_path, "*.tsv"))
list_predictions = []
for fname in prediction_list_path:
pred_df = pd.read_csv(fname, sep="\t")
list_predictions.append(pred_df)
psds = compute_psds_from_operating_points(list_predictions, groundtruth, meta_dur_df)
psds_score(psds, filename_roc_curves=osp.join(base_prediction_path, "figures/psds_roc.png"))
| 1,543 | 45.787879 | 99 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/main_SRST_model.py | # -*- coding: utf-8 -*-
import argparse
import datetime
import inspect
import os
import time
from pprint import pprint
import pandas as pd
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch import nn
from data_utils.Desed import DESED
from data_utils.DataLoad import DataLoadDf, ConcatDataset, MultiStreamBatchSampler
from TestModel import _load_model
from evaluation_measures import get_predictions, psds_score, compute_psds_from_operating_points, compute_metrics
from models.CRNN import CRNN
import config as cfg
from utilities import ramps
from utilities.Logger import create_logger
from utilities.Scaler import ScalerPerAudio, Scaler
from utilities.utils import SaveBest, to_cuda_if_available, weights_init, AverageMeterSet, EarlyStopping, \
get_durations_df, median_smoothing
from utilities.ManyHotEncoder import ManyHotEncoder
from utilities.Transforms import get_transforms
def adjust_learning_rate(optimizer, rampup_value, rampdown_value=1):
""" adjust the learning rate
Args:
optimizer: torch.Module, the optimizer to be updated
rampup_value: float, the float value between 0 and 1 that should increases linearly
rampdown_value: float, the float between 1 and 0 that should decrease linearly
Returns:
"""
#LR warm-up to handle large minibatch sizes from https://arxiv.org/abs/1706.02677
#We commented parts on betas and weight decay to match 2nd system of last year from Orange
lr = rampup_value * rampdown_value * cfg.max_learning_rate
# beta1 = rampdown_value * cfg.beta1_before_rampdown + (1. - rampdown_value) * cfg.beta1_after_rampdown
# beta2 = (1. - rampup_value) * cfg.beta2_during_rampdup + rampup_value * cfg.beta2_after_rampup
# weight_decay = (1 - rampup_value) * cfg.weight_decay_during_rampup + cfg.weight_decay_after_rampup * rampup_value
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# param_group['betas'] = (beta1, beta2)
# param_group['weight_decay'] = weight_decay
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_params, params in zip(ema_model.parameters(), model.parameters()):
ema_params.data.mul_(alpha).add_(1 - alpha, params.data)
def train(train_loader, model, optimizer, c_epoch, ema_model=None, mask_weak=None, mask_strong=None, adjust_lr=False):
""" One epoch of a Mean Teacher model
Args:
train_loader: torch.utils.data.DataLoader, iterator of training batches for an epoch.
Should return a tuple: ((teacher input, student input), labels)
model: torch.Module, model to be trained, should return a weak and strong prediction
optimizer: torch.Module, optimizer used to train the model
c_epoch: int, the current epoch of training
ema_model: torch.Module, student model, should return a weak and strong prediction
mask_weak: slice or list, mask the batch to get only the weak labeled data (used to calculate the loss)
mask_strong: slice or list, mask the batch to get only the strong labeled data (used to calcultate the loss)
adjust_lr: bool, Whether or not to adjust the learning rate during training (params in config)
"""
log = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
class_criterion = nn.BCELoss(reduction='none')
mse_criterion = nn.MSELoss(reduction='none')
reliability_criterion = nn.CrossEntropyLoss(reduction='none')
softmax = nn.Softmax(dim=1)
class_label = torch.tensor(cfg.class_label).cuda()
class_criterion, mse_criterion, softmax = to_cuda_if_available(class_criterion, mse_criterion, softmax)
meters = AverageMeterSet()
log.debug("Nb batches: {}".format(len(train_loader)))
start = time.time()
#plabel = []
for i, ((batch_input, batch_input_ema), target) in enumerate(train_loader):
global_step = c_epoch * len(train_loader) + i
rampup_value = ramps.exp_rampup(global_step, cfg.n_epoch_rampup2*len(train_loader))
if adjust_lr:
adjust_learning_rate(optimizer, rampup_value)
meters.update('lr', optimizer.param_groups[0]['lr'])
batch_input, batch_input_ema, target = to_cuda_if_available(batch_input, batch_input_ema, target)
# Outputs
strong_pred, weak_pred = model(batch_input)
strong_predict1, weak_predict1 = ema_model(batch_input)
strong_predict2, weak_predict2 = ema_model(batch_input_ema)
strong_predict = (strong_predict1 + strong_predict2)/2
weak_predict = (weak_predict1 + weak_predict2)/2
strong_predict = strong_predict.detach()
weak_predict = weak_predict.detach()
# core for Interpolation Consistency Training (ICT)
n_unlabeled = int(3*cfg.batch_size/4) # mask for unlabeled and weakly labeled data
unlabeled_data1 = batch_input[:n_unlabeled]
unlabeled_data2 = batch_input_ema[:n_unlabeled]
strong_prediction1, weak_prediction1 = ema_model(unlabeled_data1)
strong_prediction2, weak_prediction2 = ema_model(unlabeled_data2)
lambda_ = torch.rand(1).cuda()
mixed_unlabeled_data = lambda_*unlabeled_data1 + (1.0-lambda_)*unlabeled_data2
mixed_strong_plabel = lambda_*strong_prediction1 + (1.0-lambda_)*strong_prediction2
mixed_weak_plabel = lambda_*weak_prediction1 + (1.0-lambda_)*weak_prediction2
strong_prediction_mixed, weak_prediction_mixed = model(mixed_unlabeled_data)
loss = None
# Weak BCE Loss
target_weak = target.max(-2)[0] # Take the max in the time axis
if mask_weak is not None:
temp = class_criterion(weak_pred[mask_weak], target_weak[mask_weak])
weak_class_loss = temp.mean()
if i == 0:
log.debug(f"target: {target.mean(-2)} \n Target_weak: {target_weak} \n "
f"Target weak mask: {target_weak[mask_weak]} \n "
f"Target strong mask: {target[mask_strong].sum(-2)}\n"
f"weak loss: {weak_class_loss} \t rampup_value: {rampup_value}"
f"tensor mean: {batch_input.mean()}")
meters.update('weak_class_loss', weak_class_loss.item())
#meters.update('Weak EMA loss', ema_class_loss.mean().item())
# Strong BCE loss
if mask_strong is not None:
temp = class_criterion(strong_pred[mask_strong], target[mask_strong])
strong_class_loss = temp.mean()
meters.update('Strong loss', strong_class_loss.item())
# Teacher-student consistency cost
if ema_model is not None:
rampup_weight = cfg.max_rampup_weight * rampup_value
meters.update('Rampup weight', rampup_weight)
# Take consistency about strong predictions (all data)
consistency_loss_strong = rampup_weight * mse_criterion(strong_prediction_mixed, mixed_strong_plabel).mean()
meters.update('Consistency strong', consistency_loss_strong.item())
#if loss is not None:
# loss += consistency_loss_strong
#else:
# loss = consistency_loss_strong
#meters.update('Consistency weight', consistency_cost)
# Take consistency about weak predictions (all data)
consistency_loss_weak = rampup_weight * mse_criterion(weak_prediction_mixed, mixed_weak_plabel).mean()
meters.update('Consistency weak', consistency_loss_weak.item())
#if loss is not None:
# loss += consistency_loss_weak
#else:
# loss = consistency_loss_weak
# Self-labeling
est_strong_target = torch.zeros(cfg.batch_size,157,cfg.nClass).cuda()
for bter in range(cfg.batch_size):
sp = strong_predict[bter]
sp = torch.clamp(sp, 0.0001, 0.9999)
p_h1 = torch.log(sp)
p_h0 = torch.log(1-sp)
# K = 0
P0 = p_h0.sum(1)
# K = 1
P1 = P0[:,None] + p_h1 - p_h0
#P = torch.cat([P0.reshape(157,1), P1], 1)
# K = 2
P2 = []
for cter in range(1,cfg.nClass):
P2.append(P1[:,:-cter]+P1[:,cter:])
P2 = torch.cat(P2, 1)
P2 = P2 - P0[:,None]
P = torch.cat([P0.reshape(157,1), P1, P2], 1)
# K: up to 3
#P3 = []
#for cter1 in range(1,cfg.nClass):
# for cter2 in range(1, cfg.nClass-cter1):
# P3.append(P1[:,:-(cter1+cter2)]+P1[:,cter1:-cter2]+P1[:,(cter1+cter2):])
#P3 = torch.cat(P3,1)
#P3 = P3 - 2*P0[:,None]
#P = torch.cat([P0.reshape(157,1), P1, P2, P3], 1)
P = softmax(P)
prob_v, prob_i = torch.sort(P, dim=1, descending=True)
norm_p = prob_v.sum(1)
prob_v = prob_v/norm_p[:,None]
cl = class_label[prob_i.tolist(),:]
cl = torch.mul(cl, prob_v[:,:,None]).sum(1)
est_strong_target[bter,:,:] = torch.squeeze(cl)
est_weak_target = est_strong_target.mean(1)
reliability = rampup_weight/class_criterion(est_strong_target[mask_strong], target[mask_strong]).mean()
reliability = torch.clamp(reliability, 0, 2*rampup_weight)
meters.update('Reliability of pseudo label', reliability.item())
# classification error with pseudo label
pred_strong_loss = mse_criterion(strong_pred[:n_unlabeled], est_strong_target[:n_unlabeled]).mean([1,2])
pred_weak_loss = mse_criterion(weak_pred[:n_unlabeled], est_weak_target[:n_unlabeled]).mean(1)
pred_loss = pred_strong_loss + pred_weak_loss
expect_loss = reliability * pred_loss.mean()
meters.update('Expectation of predict loss', expect_loss.item())
loss = weak_class_loss + strong_class_loss + consistency_loss_strong + consistency_loss_weak + expect_loss
meters.update('Loss', loss.item())
if (np.isnan(loss.item()) or loss.item() > 1e5):
print(loss)
else:
# compute gradient and do optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
global_step += 1
if ema_model is not None:
update_ema_variables(model, ema_model, 0.999, global_step)
epoch_time = time.time() - start
log.info(f"Epoch: {c_epoch}\t Time {epoch_time:.2f}\t {meters}")
return loss
def get_dfs(desed_dataset, nb_files=None, separated_sources=False):
log = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
audio_weak_ss = None
audio_unlabel_ss = None
audio_validation_ss = None
audio_synthetic_ss = None
if separated_sources:
audio_weak_ss = cfg.weak_ss
audio_unlabel_ss = cfg.unlabel_ss
audio_validation_ss = cfg.validation_ss
audio_synthetic_ss = cfg.synthetic_ss
weak_df = desed_dataset.initialize_and_get_df(cfg.weak, audio_dir_ss=audio_weak_ss, nb_files=nb_files)
unlabel_df = desed_dataset.initialize_and_get_df(cfg.unlabel, audio_dir_ss=audio_unlabel_ss, nb_files=nb_files)
# Event if synthetic not used for training, used on validation purpose
synthetic_df = desed_dataset.initialize_and_get_df(cfg.synthetic, audio_dir_ss=audio_synthetic_ss,
nb_files=nb_files, download=False)
log.debug(f"synthetic: {synthetic_df.head()}")
validation_df = desed_dataset.initialize_and_get_df(cfg.validation, audio_dir=cfg.audio_validation_dir,
audio_dir_ss=audio_validation_ss, nb_files=nb_files)
# Divide synthetic in train and valid
filenames_train = synthetic_df.filename.drop_duplicates().sample(frac=0.8, random_state=26)
train_synth_df = synthetic_df[synthetic_df.filename.isin(filenames_train)]
valid_synth_df = synthetic_df.drop(train_synth_df.index).reset_index(drop=True)
# Put train_synth in frames so many_hot_encoder can work.
# Not doing it for valid, because not using labels (when prediction) and event based metric expect sec.
train_synth_df.onset = train_synth_df.onset * cfg.sample_rate // cfg.hop_size // pooling_time_ratio
train_synth_df.offset = train_synth_df.offset * cfg.sample_rate // cfg.hop_size // pooling_time_ratio
log.debug(valid_synth_df.event_label.value_counts())
data_dfs = {"weak": weak_df,
"unlabel": unlabel_df,
"synthetic": synthetic_df,
"train_synthetic": train_synth_df,
"valid_synthetic": valid_synth_df,
"validation": validation_df,
}
return data_dfs
if __name__ == '__main__':
torch.manual_seed(2020)
np.random.seed(2020)
logger = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
logger.info("Baseline 2020")
logger.info(f"Starting time: {datetime.datetime.now()}")
parser = argparse.ArgumentParser(description="")
parser.add_argument("-s", '--subpart_data', type=int, default=None, dest="subpart_data",
help="Number of files to be used. Useful when testing on small number of files.")
parser.add_argument("-n", '--no_synthetic', dest='no_synthetic', action='store_true', default=False,
help="Not using synthetic labels during training")
f_args = parser.parse_args()
pprint(vars(f_args))
reduced_number_of_data = f_args.subpart_data
no_synthetic = f_args.no_synthetic
store_dir = os.path.join("stored_data", "MeanTeacher_with_ICT_plabel")
saved_model_dir = os.path.join(store_dir, "model")
saved_pred_dir = os.path.join(store_dir, "predictions")
if os.path.exists(store_dir):
if os.path.exists(saved_model_dir):
load_flag = True
else:
load_flag = False
os.makedirs(saved_model_dir, exist_ok=True)
os.makedirs(saved_pred_dir, exist_ok=True)
else:
load_flag = False
os.makedirs(store_dir, exist_ok=True)
os.makedirs(saved_model_dir, exist_ok=True)
os.makedirs(saved_pred_dir, exist_ok=True)
n_channel = 1
add_axis_conv = 0
# Model taken from 2nd of dcase19 challenge: see Delphin-Poulat2019 in the results.
n_layers = 7
crnn_kwargs = {"n_in_channel": n_channel, "nclass": len(cfg.classes), "attention": True, "n_RNN_cell": 128,
"n_layers_RNN": 2,
"activation": "glu",
"dropout": 0.5,
"kernel_size": n_layers * [3], "padding": n_layers * [1], "stride": n_layers * [1],
"nb_filters": [16, 32, 64, 128, 128, 128, 128],
"pooling": [[2, 2], [2, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2]]}
pooling_time_ratio = 4 # 2 * 2
out_nb_frames_1s = cfg.sample_rate / cfg.hop_size / pooling_time_ratio
median_window = max(int(cfg.median_window_s * out_nb_frames_1s), 1)
logger.debug(f"median_window: {median_window}")
# ##############
# DATA
# ##############
dataset = DESED(base_feature_dir=os.path.join(cfg.workspace, "dataset", "features"),
compute_log=False)
dfs = get_dfs(dataset, reduced_number_of_data)
# Meta path for psds
durations_synth = get_durations_df(cfg.synthetic)
many_hot_encoder = ManyHotEncoder(cfg.classes, n_frames=cfg.max_frames // pooling_time_ratio)
encod_func = many_hot_encoder.encode_strong_df
# Normalisation per audio or on the full dataset
if cfg.scaler_type == "dataset":
transforms = get_transforms(cfg.max_frames, add_axis=add_axis_conv)
weak_data = DataLoadDf(dfs["weak"], encod_func, transforms)
unlabel_data = DataLoadDf(dfs["unlabel"], encod_func, transforms)
train_synth_data = DataLoadDf(dfs["train_synthetic"], encod_func, transforms)
scaler_args = []
scaler = Scaler()
# # Only on real data since that's our final goal and test data are real
scaler.calculate_scaler(ConcatDataset([weak_data, unlabel_data, train_synth_data]))
logger.debug(f"scaler mean: {scaler.mean_}")
else:
scaler_args = ["global", "min-max"]
scaler = ScalerPerAudio(*scaler_args)
transforms = get_transforms(cfg.max_frames, scaler, add_axis_conv,
noise_dict_params={"mean": 0., "snr": cfg.noise_snr})
transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
weak_data = DataLoadDf(dfs["weak"], encod_func, transforms, in_memory=cfg.in_memory)
unlabel_data = DataLoadDf(dfs["unlabel"], encod_func, transforms, in_memory=cfg.in_memory_unlab)
train_synth_data = DataLoadDf(dfs["train_synthetic"], encod_func, transforms, in_memory=cfg.in_memory)
valid_synth_data = DataLoadDf(dfs["valid_synthetic"], encod_func, transforms_valid,
return_indexes=True, in_memory=cfg.in_memory)
logger.debug(f"len synth: {len(train_synth_data)}, len_unlab: {len(unlabel_data)}, len weak: {len(weak_data)}")
if not no_synthetic:
list_dataset = [weak_data, unlabel_data, train_synth_data]
batch_sizes = [cfg.batch_size//4, cfg.batch_size//2, cfg.batch_size//4]
strong_mask = slice((3*cfg.batch_size)//4, cfg.batch_size)
else:
list_dataset = [weak_data, unlabel_data]
batch_sizes = [cfg.batch_size // 4, 3 * cfg.batch_size // 4]
strong_mask = None
weak_mask = slice(batch_sizes[0]) # Assume weak data is always the first one
concat_dataset = ConcatDataset(list_dataset)
sampler = MultiStreamBatchSampler(concat_dataset, batch_sizes=batch_sizes)
training_loader = DataLoader(concat_dataset, batch_sampler=sampler)
valid_synth_loader = DataLoader(valid_synth_data, batch_size=cfg.batch_size)
# ##############
# Model
# ##############
if load_flag:
mlist = os.listdir(saved_model_dir)
modelName = mlist[-1]
n_epoch = np.int(modelName.split('_')[-1]) + 1
model_fname = os.path.join(saved_model_dir, modelName)
state = torch.load(model_fname)
crnn = _load_model(state, 'crnn')
logger.info(f"training model: {model_fname}, epoch: {state['epoch']}")
crnn_ema = _load_model(state, 'crnn')
for param in crnn_ema.parameters():
param.detach()
optim_kwargs = state['optimizer']["kwargs"]
optim = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn.parameters()), **optim_kwargs)
else:
n_epoch = 0
crnn = CRNN(**crnn_kwargs)
pytorch_total_params = sum(p.numel() for p in crnn.parameters() if p.requires_grad)
logger.info(crnn)
logger.info("number of parameters in the model: {}".format(pytorch_total_params))
crnn.apply(weights_init)
crnn_ema = CRNN(**crnn_kwargs)
crnn_ema.apply(weights_init)
for param in crnn_ema.parameters():
param.detach_()
optim_kwargs = {"lr": cfg.default_learning_rate, "betas": (0.9, 0.999)}
optim = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn.parameters()), **optim_kwargs)
state = {
'model': {"name": crnn.__class__.__name__,
'args': '',
"kwargs": crnn_kwargs,
'state_dict': crnn.state_dict()},
'model_ema': {"name": crnn_ema.__class__.__name__,
'args': '',
"kwargs": crnn_kwargs,
'state_dict': crnn_ema.state_dict()},
'optimizer': {"name": optim.__class__.__name__,
'args': '',
"kwargs": optim_kwargs,
'state_dict': optim.state_dict()},
"pooling_time_ratio": pooling_time_ratio,
"scaler": {
"type": type(scaler).__name__,
"args": scaler_args,
"state_dict": scaler.state_dict()},
"many_hot_encoder": many_hot_encoder.state_dict(),
"median_window": median_window,
"desed": dataset.state_dict()
}
save_best_cb = SaveBest("sup")
if cfg.early_stopping is not None:
early_stopping_call = EarlyStopping(patience=cfg.early_stopping, val_comp="sup", init_patience=cfg.es_init_wait)
# ##############
# Train
# ##############
results = pd.DataFrame(columns=["loss", "valid_synth_f1", "weak_metric", "global_valid"])
for epoch in range(n_epoch, n_epoch+cfg.n_epoch):
crnn.train()
crnn_ema.train()
crnn, crnn_ema = to_cuda_if_available(crnn, crnn_ema)
loss_value = train(training_loader, crnn, optim, epoch,
ema_model=crnn_ema, mask_weak=weak_mask, mask_strong=strong_mask, adjust_lr=cfg.adjust_lr)
# Validation
crnn = crnn.eval()
logger.info("\n ### Valid synthetic metric ### \n")
predictions = get_predictions(crnn, valid_synth_loader, many_hot_encoder.decode_strong, pooling_time_ratio,
median_window=median_window, save_predictions=None)
# Validation with synthetic data (dropping feature_filename for psds)
valid_synth = dfs["valid_synthetic"].drop("feature_filename", axis=1)
valid_synth_f1, psds_m_f1 = compute_metrics(predictions, valid_synth, durations_synth)
# Update state
state['model']['state_dict'] = crnn.state_dict()
state['model_ema']['state_dict'] = crnn_ema.state_dict()
state['optimizer']['state_dict'] = optim.state_dict()
state['epoch'] = epoch
state['valid_metric'] = valid_synth_f1
state['valid_f1_psds'] = psds_m_f1
# Callbacks
if cfg.checkpoint_epochs is not None and (epoch + 1) % cfg.checkpoint_epochs == 0:
model_fname = os.path.join(saved_model_dir, "baseline_epoch_" + str(epoch))
torch.save(state, model_fname)
if cfg.save_best:
if save_best_cb.apply(valid_synth_f1):
model_fname = os.path.join(saved_model_dir, "baseline_best")
torch.save(state, model_fname)
results.loc[epoch, "global_valid"] = valid_synth_f1
results.loc[epoch, "loss"] = loss_value.item()
results.loc[epoch, "valid_synth_f1"] = valid_synth_f1
if cfg.early_stopping:
if early_stopping_call.apply(valid_synth_f1):
logger.warn("EARLY STOPPING")
break
if cfg.save_best:
model_fname = os.path.join(saved_model_dir, "baseline_best")
state = torch.load(model_fname)
crnn = _load_model(state, 'crnn')
logger.info(f"testing model: {model_fname}, epoch: {state['epoch']}")
else:
logger.info("testing model of last epoch: {}".format(cfg.n_epoch))
results_df = pd.DataFrame(results).to_csv(os.path.join(saved_pred_dir, "results.tsv"),
sep="\t", index=False, float_format="%.4f")
# ##############
# Validation
# ##############
crnn.eval()
transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
predicitons_fname = os.path.join(saved_pred_dir, "baseline_validation.tsv")
validation_data = DataLoadDf(dfs["validation"], encod_func, transform=transforms_valid, return_indexes=True)
validation_dataloader = DataLoader(validation_data, batch_size=cfg.batch_size, shuffle=False, drop_last=False)
validation_labels_df = dfs["validation"].drop("feature_filename", axis=1)
durations_validation = get_durations_df(cfg.validation, cfg.audio_validation_dir)
# Preds with only one value
valid_predictions = get_predictions(crnn, validation_dataloader, many_hot_encoder.decode_strong,
pooling_time_ratio, median_window=median_window,
save_predictions=predicitons_fname)
compute_metrics(valid_predictions, validation_labels_df, durations_validation)
# ##########
# Optional but recommended
# ##########
# Compute psds scores with multiple thresholds (more accurate). n_thresholds could be increased.
n_thresholds = 50
# Example of 5 thresholds: 0.1, 0.3, 0.5, 0.7, 0.9
list_thresholds = np.arange(1 / (n_thresholds * 2), 1, 1 / n_thresholds)
pred_ss_thresh = get_predictions(crnn, validation_dataloader, many_hot_encoder.decode_strong,
pooling_time_ratio, thresholds=list_thresholds, median_window=median_window,
save_predictions=predicitons_fname)
psds = compute_psds_from_operating_points(pred_ss_thresh, validation_labels_df, durations_validation)
psds_score(psds, filename_roc_curves=os.path.join(saved_pred_dir, "figures/psds_roc.png"))
| 25,288 | 46.535714 | 120 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/models/CNN.py | import torch.nn as nn
import torch
class GLU(nn.Module):
def __init__(self, input_num):
super(GLU, self).__init__()
self.sigmoid = nn.Sigmoid()
self.linear = nn.Linear(input_num, input_num)
def forward(self, x):
lin = self.linear(x.permute(0, 2, 3, 1))
lin = lin.permute(0, 3, 1, 2)
sig = self.sigmoid(x)
res = lin * sig
return res
class ContextGating(nn.Module):
def __init__(self, input_num):
super(ContextGating, self).__init__()
self.sigmoid = nn.Sigmoid()
self.linear = nn.Linear(input_num, input_num)
def forward(self, x):
lin = self.linear(x.permute(0, 2, 3, 1))
lin = lin.permute(0, 3, 1, 2)
sig = self.sigmoid(lin)
res = x * sig
return res
class CNN(nn.Module):
def __init__(self, n_in_channel, activation="Relu", conv_dropout=0,
kernel_size=[3, 3, 3], padding=[1, 1, 1], stride=[1, 1, 1], nb_filters=[64, 64, 64],
pooling=[(1, 4), (1, 4), (1, 4)]
):
super(CNN, self).__init__()
self.nb_filters = nb_filters
cnn = nn.Sequential()
def conv(i, batchNormalization=False, dropout=None, activ="relu"):
nIn = n_in_channel if i == 0 else nb_filters[i - 1]
nOut = nb_filters[i]
cnn.add_module('conv{0}'.format(i),
nn.Conv2d(nIn, nOut, kernel_size[i], stride[i], padding[i]))
if batchNormalization:
cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut, eps=0.001, momentum=0.99))
if activ.lower() == "leakyrelu":
cnn.add_module('relu{0}'.format(i),
nn.LeakyReLU(0.2))
elif activ.lower() == "relu":
cnn.add_module('relu{0}'.format(i), nn.ReLU())
elif activ.lower() == "glu":
cnn.add_module('glu{0}'.format(i), GLU(nOut))
elif activ.lower() == "cg":
cnn.add_module('cg{0}'.format(i), ContextGating(nOut))
if dropout is not None:
cnn.add_module('dropout{0}'.format(i),
nn.Dropout(dropout))
batch_norm = True
# 128x862x64
for i in range(len(nb_filters)):
#nIn = n_in_channel if i == 0 else nb_filters[i-1]
#nOut = nb_filters[i]
conv(i, batch_norm, conv_dropout, activ=activation)
cnn.add_module('pooling{0}'.format(i), nn.AvgPool2d(pooling[i])) # bs x tframe x mels
#cnn.add_module('pooling{0}'.format(i), nn.MaxPool2d(pooling[i])) # bs x tframe x mels
#if batch_norm:
# cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut, eps=0.001, momentum=0.99))
#if activation.lower() == "leakyrelu":
# cnn.add_module('relu{0}'.format(i),
# nn.LeakyReLU(0.2))
#elif activation.lower() == "relu":
# cnn.add_module('relu{0}'.format(i), nn.ReLU())
#elif activation.lower() == "glu":
# cnn.add_module('glu{0}'.format(i), GLU(nOut))
#elif activation.lower() == "cg":
# cnn.add_module('cg{0}'.format(i), ContextGating(nOut))
#if conv_dropout is not None:
# cnn.add_module('dropout{0}'.format(i),
# nn.Dropout(conv_dropout))
self.cnn = cnn
def load_state_dict(self, state_dict, strict=True):
self.cnn.load_state_dict(state_dict)
def state_dict(self, destination=None, prefix='', keep_vars=False):
return self.cnn.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
def save(self, filename):
torch.save(self.cnn.state_dict(), filename)
def forward(self, x):
# input size : (batch_size, n_channels, n_frames, n_freq)
# conv features
x = self.cnn(x)
return x
| 4,002 | 37.12381 | 105 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/models/CRNN.py | import warnings
import torch.nn as nn
import torch
from models.RNN import BidirectionalGRU
from models.CNN import CNN
class CRNN(nn.Module):
def __init__(self, n_in_channel, nclass, attention=False, activation="Relu", dropout=0,
train_cnn=True, rnn_type='BGRU', n_RNN_cell=64, n_layers_RNN=1, dropout_recurrent=0,
cnn_integration=False, **kwargs):
super(CRNN, self).__init__()
self.n_in_channel = n_in_channel
self.attention = attention
self.cnn_integration = cnn_integration
n_in_cnn = n_in_channel
if cnn_integration:
n_in_cnn = 1
self.cnn = CNN(n_in_cnn, activation, dropout, **kwargs)
if not train_cnn:
for param in self.cnn.parameters():
param.requires_grad = False
self.train_cnn = train_cnn
if rnn_type == 'BGRU':
nb_in = self.cnn.nb_filters[-1]
if self.cnn_integration:
# self.fc = nn.Linear(nb_in * n_in_channel, nb_in)
nb_in = nb_in * n_in_channel
self.rnn = BidirectionalGRU(nb_in,
n_RNN_cell, dropout=dropout_recurrent, num_layers=n_layers_RNN)
else:
NotImplementedError("Only BGRU supported for CRNN for now")
self.dropout = nn.Dropout(dropout)
self.dense = nn.Linear(n_RNN_cell*2, nclass)
self.sigmoid = nn.Sigmoid()
if self.attention:
self.dense_softmax = nn.Linear(n_RNN_cell*2, nclass)
self.softmax = nn.Softmax(dim=-1)
def load_cnn(self, state_dict):
self.cnn.load_state_dict(state_dict)
if not self.train_cnn:
for param in self.cnn.parameters():
param.requires_grad = False
def load_state_dict(self, state_dict, strict=True):
self.cnn.load_state_dict(state_dict["cnn"])
self.rnn.load_state_dict(state_dict["rnn"])
self.dense.load_state_dict(state_dict["dense"])
def state_dict(self, destination=None, prefix='', keep_vars=False):
state_dict = {"cnn": self.cnn.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars),
"rnn": self.rnn.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars),
'dense': self.dense.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)}
return state_dict
def save(self, filename):
parameters = {'cnn': self.cnn.state_dict(), 'rnn': self.rnn.state_dict(), 'dense': self.dense.state_dict()}
torch.save(parameters, filename)
def forward(self, x):
# input size : (batch_size, n_channels, n_frames, n_freq)
if self.cnn_integration:
bs_in, nc_in = x.size(0), x.size(1)
x = x.view(bs_in * nc_in, 1, *x.shape[2:])
# conv features
x = self.cnn(x)
bs, chan, frames, freq = x.size()
if self.cnn_integration:
x = x.reshape(bs_in, chan * nc_in, frames, freq)
if freq != 1:
warnings.warn(f"Output shape is: {(bs, frames, chan * freq)}, from {freq} staying freq")
x = x.permute(0, 2, 1, 3)
x = x.contiguous().view(bs, frames, chan * freq)
else:
x = x.squeeze(-1)
x = x.permute(0, 2, 1) # [bs, frames, chan]
# rnn features
x = self.rnn(x)
x = self.dropout(x)
strong = self.dense(x) # [bs, frames, nclass]
strong = self.sigmoid(strong)
if self.attention:
sof = self.dense_softmax(x) # [bs, frames, nclass]
sof = self.softmax(sof)
sof = torch.clamp(sof, min=1e-7, max=1)
weak = (strong * sof).sum(1) / sof.sum(1) # [bs, nclass]
else:
weak = strong.mean(1)
return strong, weak
if __name__ == '__main__':
CRNN(64, 10, kernel_size=[3, 3, 3], padding=[1, 1, 1], stride=[1, 1, 1], nb_filters=[64, 64, 64],
pooling=[(1, 4), (1, 4), (1, 4)])
| 4,037 | 38.588235 | 115 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/models/RNN.py | import warnings
import torch
from torch import nn as nn
class BidirectionalGRU(nn.Module):
def __init__(self, n_in, n_hidden, dropout=0, num_layers=1):
super(BidirectionalGRU, self).__init__()
self.rnn = nn.GRU(n_in, n_hidden, bidirectional=True, dropout=dropout, batch_first=True, num_layers=num_layers)
def forward(self, input_feat):
recurrent, _ = self.rnn(input_feat)
return recurrent
class BidirectionalLSTM(nn.Module):
def __init__(self, nIn, nHidden, nOut, dropout=0, num_layers=1):
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(nIn, nHidden // 2, bidirectional=True, batch_first=True,
dropout=dropout, num_layers=num_layers)
self.embedding = nn.Linear(nHidden * 2, nOut)
def save(self, filename):
torch.save(self.state_dict(), filename)
def load(self, filename=None, parameters=None):
if filename is not None:
self.load_state_dict(torch.load(filename))
elif parameters is not None:
self.load_state_dict(parameters)
else:
raise NotImplementedError("load is a filename or a list of parameters (state_dict)")
def forward(self, input_feat):
recurrent, _ = self.rnn(input_feat)
b, T, h = recurrent.size()
t_rec = recurrent.contiguous().view(b * T, h)
output = self.embedding(t_rec) # [T * b, nOut]
output = output.view(b, T, -1)
return output
| 1,498 | 31.586957 | 119 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/utilities/ManyHotEncoder.py | import numpy as np
import pandas as pd
from dcase_util.data import DecisionEncoder
class ManyHotEncoder:
""""
Adapted after DecisionEncoder.find_contiguous_regions method in
https://github.com/DCASE-REPO/dcase_util/blob/master/dcase_util/data/decisions.py
Encode labels into numpy arrays where 1 correspond to presence of the class and 0 absence.
Multiple 1 can appear on the same line, it is for multi label problem.
Args:
labels: list, the classes which will be encoded
n_frames: int, (Default value = None) only useful for strong labels. The number of frames of a segment.
Attributes:
labels: list, the classes which will be encoded
n_frames: int, only useful for strong labels. The number of frames of a segment.
"""
def __init__(self, labels, n_frames=None):
if type(labels) in [np.ndarray, np.array]:
labels = labels.tolist()
self.labels = labels
self.n_frames = n_frames
def encode_weak(self, labels):
""" Encode a list of weak labels into a numpy array
Args:
labels: list, list of labels to encode (to a vector of 0 and 1)
Returns:
numpy.array
A vector containing 1 for each label, and 0 everywhere else
"""
# useful for tensor empty labels
if type(labels) is str:
if labels == "empty":
y = np.zeros(len(self.labels)) - 1
return y
if type(labels) is pd.DataFrame:
if labels.empty:
labels = []
elif "event_label" in labels.columns:
labels = labels["event_label"]
y = np.zeros(len(self.labels))
for label in labels:
if not pd.isna(label):
i = self.labels.index(label)
y[i] = 1
return y
def encode_strong_df(self, label_df):
"""Encode a list (or pandas Dataframe or Serie) of strong labels, they correspond to a given filename
Args:
label_df: pandas DataFrame or Series, contains filename, onset (in frames) and offset (in frames)
If only filename (no onset offset) is specified, it will return the event on all the frames
onset and offset should be in frames
Returns:
numpy.array
Encoded labels, 1 where the label is present, 0 otherwise
"""
assert self.n_frames is not None, "n_frames need to be specified when using strong encoder"
if type(label_df) is str:
if label_df == 'empty':
y = np.zeros((self.n_frames, len(self.labels))) - 1
return y
y = np.zeros((self.n_frames, len(self.labels)))
if type(label_df) is pd.DataFrame:
if {"onset", "offset", "event_label"}.issubset(label_df.columns):
for _, row in label_df.iterrows():
if not pd.isna(row["event_label"]):
i = self.labels.index(row["event_label"])
onset = int(row["onset"])
offset = int(row["offset"])
y[onset:offset, i] = 1 # means offset not included (hypothesis of overlapping frames, so ok)
elif type(label_df) in [pd.Series, list, np.ndarray]: # list of list or list of strings
if type(label_df) is pd.Series:
if {"onset", "offset", "event_label"}.issubset(label_df.index): # means only one value
if not pd.isna(label_df["event_label"]):
i = self.labels.index(label_df["event_label"])
onset = int(label_df["onset"])
offset = int(label_df["offset"])
y[onset:offset, i] = 1
return y
for event_label in label_df:
# List of string, so weak labels to be encoded in strong
if type(event_label) is str:
if event_label is not "":
i = self.labels.index(event_label)
y[:, i] = 1
# List of list, with [label, onset, offset]
elif len(event_label) == 3:
if event_label[0] is not "":
i = self.labels.index(event_label[0])
onset = int(event_label[1])
offset = int(event_label[2])
y[onset:offset, i] = 1
else:
raise NotImplementedError("cannot encode strong, type mismatch: {}".format(type(event_label)))
else:
raise NotImplementedError("To encode_strong, type is pandas.Dataframe with onset, offset and event_label"
"columns, or it is a list or pandas Series of event labels, "
"type given: {}".format(type(label_df)))
return y
def decode_weak(self, labels):
""" Decode the encoded weak labels
Args:
labels: numpy.array, the encoded labels to be decoded
Returns:
list
Decoded labels, list of string
"""
result_labels = []
for i, value in enumerate(labels):
if value == 1:
result_labels.append(self.labels[i])
return result_labels
def decode_strong(self, labels):
""" Decode the encoded strong labels
Args:
labels: numpy.array, the encoded labels to be decoded
Returns:
list
Decoded labels, list of list: [[label, onset offset], ...]
"""
result_labels = []
for i, label_column in enumerate(labels.T):
change_indices = DecisionEncoder().find_contiguous_regions(label_column)
# append [label, onset, offset] in the result list
for row in change_indices:
result_labels.append([self.labels[i], row[0], row[1]])
return result_labels
def state_dict(self):
return {"labels": self.labels,
"n_frames": self.n_frames}
@classmethod
def load_state_dict(cls, state_dict):
labels = state_dict["labels"]
n_frames = state_dict["n_frames"]
return cls(labels, n_frames)
| 6,343 | 39.407643 | 117 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/utilities/utils.py | from __future__ import print_function
import glob
import warnings
import numpy as np
import pandas as pd
import soundfile
import os
import os.path as osp
import librosa
import torch
from desed.utils import create_folder
from torch import nn
import config as cfg
def median_smoothing(input_tensor, win_length):
nFrms, nClass = input_tensor.shape[0], input_tensor.shape[1]
pad_length = (win_length-1) // 2
output_tensor = torch.zeros(nFrms, nClass).cuda()
for cter in range(nClass):
tensor1D = input_tensor[:,cter]
indices = torch.nn.functional.pad(tensor1D, (pad_length,0), mode="constant", value=0.)
indices = torch.nn.functional.pad(indices, (0,pad_length), mode="constant", value=0.)
indices[..., :pad_length] = torch.cat(pad_length*[indices[..., pad_length].unsqueeze(-1)], dim=-1)
roll = indices.unfold(-1, win_length, 1)
values, _ = torch.median(roll, -1)
output_tensor[:, cter] = values[:nFrms]
return output_tensor
def read_audio(path, target_fs=None):
""" Read a wav file
Args:
path: str, path of the audio file
target_fs: int, (Default value = None) sampling rate of the returned audio file, if not specified, the sampling
rate of the audio file is taken
Returns:
tuple
(numpy.array, sampling rate), array containing the audio at the sampling rate given
"""
(audio, fs) = soundfile.read(path)
if audio.ndim > 1:
audio = np.mean(audio, axis=1)
if target_fs is not None and fs != target_fs:
audio = librosa.resample(audio, orig_sr=fs, target_sr=target_fs)
fs = target_fs
return audio, fs
def weights_init(m):
""" Initialize the weights of some layers of neural networks, here Conv2D, BatchNorm, GRU, Linear
Based on the work of Xavier Glorot
Args:
m: the model to initialize
"""
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
nn.init.xavier_uniform_(m.weight, gain=np.sqrt(2))
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('GRU') != -1:
for weight in m.parameters():
if len(weight.size()) > 1:
nn.init.orthogonal_(weight.data)
elif classname.find('Linear') != -1:
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def to_cuda_if_available(*args):
""" Transfer object (Module, Tensor) to GPU if GPU available
Args:
args: torch object to put on cuda if available (needs to have object.cuda() defined)
Returns:
Objects on GPU if GPUs available
"""
res = list(args)
if torch.cuda.is_available():
for i, torch_obj in enumerate(args):
res[i] = torch_obj.cuda()
if len(res) == 1:
return res[0]
return res
class SaveBest:
""" Callback to get the best value and epoch
Args:
val_comp: str, (Default value = "inf") "inf" or "sup", inf when we store the lowest model, sup when we
store the highest model
Attributes:
val_comp: str, "inf" or "sup", inf when we store the lowest model, sup when we
store the highest model
best_val: float, the best values of the model based on the criterion chosen
best_epoch: int, the epoch when the model was the best
current_epoch: int, the current epoch of the model
"""
def __init__(self, val_comp="inf"):
self.comp = val_comp
if val_comp in ["inf", "lt", "desc"]:
self.best_val = np.inf
elif val_comp in ["sup", "gt", "asc"]:
self.best_val = 0
else:
raise NotImplementedError("value comparison is only 'inf' or 'sup'")
self.best_epoch = 0
self.current_epoch = 0
def apply(self, value):
""" Apply the callback
Args:
value: float, the value of the metric followed
"""
decision = False
if self.current_epoch == 0:
decision = True
if (self.comp == "inf" and value < self.best_val) or (self.comp == "sup" and value > self.best_val):
self.best_epoch = self.current_epoch
self.best_val = value
decision = True
self.current_epoch += 1
return decision
class JSD(nn.Module):
def __init__(self):
super(JSD, self).__init__()
self.kld = nn.KLDivLoss().cuda()
def apply(self, p, q):
m = 0.5*(p+q)
return -0.5*(self.kld(p,m)+self.kld(q,m))
class Entropy(nn.Module):
def __init__(self):
super(Entropy, self).__init__()
def forward(self, x, dim):
b = x*torch.log(x)
b = -1.0 * b.sum(dim)
return b
class EarlyStopping:
""" Callback to stop training if the metric have not improved during multiple epochs.
Args:
patience: int, number of epochs with no improvement before stopping the model
val_comp: str, (Default value = "inf") "inf" or "sup", inf when we store the lowest model, sup when we
store the highest model
Attributes:
patience: int, number of epochs with no improvement before stopping the model
val_comp: str, "inf" or "sup", inf when we store the lowest model, sup when we
store the highest model
best_val: float, the best values of the model based on the criterion chosen
best_epoch: int, the epoch when the model was the best
current_epoch: int, the current epoch of the model
"""
def __init__(self, patience, val_comp="inf", init_patience=0):
self.patience = patience
self.first_early_wait = init_patience
self.val_comp = val_comp
if val_comp == "inf":
self.best_val = np.inf
elif val_comp == "sup":
self.best_val = 0
else:
raise NotImplementedError("value comparison is only 'inf' or 'sup'")
self.current_epoch = 0
self.best_epoch = 0
def apply(self, value):
""" Apply the callback
Args:
value: the value of the metric followed
"""
current = False
if self.val_comp == "inf":
if value < self.best_val:
current = True
if self.val_comp == "sup":
if value > self.best_val:
current = True
if current:
self.best_val = value
self.best_epoch = self.current_epoch
elif self.current_epoch - self.best_epoch > self.patience and self.current_epoch > self.first_early_wait:
self.current_epoch = 0
return True
self.current_epoch += 1
return False
class AverageMeterSet:
def __init__(self):
self.meters = {}
def __getitem__(self, key):
return self.meters[key]
def update(self, name, value, n=1):
if name not in self.meters:
self.meters[name] = AverageMeter()
self.meters[name].update(value, n)
def reset(self):
for meter in self.meters.values():
meter.reset()
def values(self, postfix=''):
return {name + postfix: meter.val for name, meter in self.meters.items()}
def averages(self, postfix='/avg'):
return {name + postfix: meter.avg for name, meter in self.meters.items()}
def sums(self, postfix='/sum'):
return {name + postfix: meter.sum for name, meter in self.meters.items()}
def counts(self, postfix='/count'):
return {name + postfix: meter.count for name, meter in self.meters.items()}
def __str__(self):
string = ""
for name, meter in self.meters.items():
fmat = ".4f"
if meter.val < 0.01:
fmat = ".2E"
string += "{} {:{format}} \t".format(name, meter.val, format=fmat)
return string
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __format__(self, format):
return "{self.avg:{format}}".format(self=self, format=format)
def generate_tsv_wav_durations(audio_dir, out_tsv):
""" Generate a dataframe with filename and duration of the file
Args:
audio_dir: str, the path of the folder where audio files are (used by glob.glob)
out_tsv: str, the path of the output tsv file
Returns:
pd.DataFrame, the dataframe containing filenames and durations
"""
meta_list = []
for file in glob.glob(os.path.join(audio_dir, "*.wav")):
d = soundfile.info(file).duration
meta_list.append([os.path.basename(file), d])
meta_df = pd.DataFrame(meta_list, columns=["filename", "duration"])
if out_tsv is not None:
meta_df.to_csv(out_tsv, sep="\t", index=False, float_format="%.1f")
return meta_df
def generate_tsv_from_isolated_events(wav_folder, out_tsv=None):
""" Generate list of separated wav files in a folder and export them in a tsv file
Separated audio files considered are all wav files in 'subdirectories' of the 'wav_folder'
Args:
wav_folder: str, path of the folder containing subdirectories (one for each mixture separated)
out_tsv: str, path of the csv in which to save the list of files
Returns:
pd.DataFrame, having only one column with the filename considered
"""
if out_tsv is not None and os.path.exists(out_tsv):
source_sep_df = pd.read_csv(out_tsv, sep="\t")
else:
source_sep_df = pd.DataFrame()
list_dirs = [d for d in os.listdir(wav_folder) if osp.isdir(osp.join(wav_folder, d))]
for dirname in list_dirs:
list_isolated_files = []
for directory, subdir, fnames in os.walk(osp.join(wav_folder, dirname)):
for fname in fnames:
if osp.splitext(fname)[1] in [".wav"]:
# Get the level folders and keep it in the tsv
subfolder = directory.split(dirname + os.sep)[1:]
if len(subfolder) > 0:
subdirs = osp.join(*subfolder)
else:
subdirs = ""
# Append the subfolders and name in the list of files
list_isolated_files.append(osp.join(dirname, subdirs, fname))
else:
warnings.warn(f"Not only wav audio files in the separated source folder,"
f"{fname} not added to the .tsv file")
source_sep_df = source_sep_df.append(pd.DataFrame(list_isolated_files, columns=["filename"]))
if out_tsv is not None:
create_folder(os.path.dirname(out_tsv))
source_sep_df.to_csv(out_tsv, sep="\t", index=False, float_format="%.3f")
return source_sep_df
def meta_path_to_audio_dir(tsv_path):
return os.path.splitext(tsv_path.replace("metadata", "audio"))[0]
def audio_dir_to_meta_path(audio_dir):
return audio_dir.replace("audio", "metadata") + ".tsv"
def get_durations_df(gtruth_path, audio_dir=None):
if audio_dir is None:
audio_dir = meta_path_to_audio_dir(cfg.synthetic)
path, ext = os.path.splitext(gtruth_path)
path_durations_synth = "./validation_durations" + ext
if not os.path.exists(path_durations_synth):
durations_df = generate_tsv_wav_durations(audio_dir, path_durations_synth)
else:
durations_df = pd.read_csv(path_durations_synth, sep="\t")
return durations_df
| 11,860 | 33.988201 | 119 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/utilities/Logger.py | import logging
import sys
import logging.config
def create_logger(logger_name, terminal_level=logging.INFO):
""" Create a logger.
Args:
logger_name: str, name of the logger
terminal_level: int, logging level in the terminal
"""
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
})
logger = logging.getLogger(logger_name)
tool_formatter = logging.Formatter('%(levelname)s - %(name)s - %(message)s')
if type(terminal_level) is str:
if terminal_level.lower() == "debug":
res_terminal_level = logging.DEBUG
elif terminal_level.lower() == "info":
res_terminal_level = logging.INFO
elif "warn" in terminal_level.lower():
res_terminal_level = logging.WARNING
elif terminal_level.lower() == "error":
res_terminal_level = logging.ERROR
elif terminal_level.lower() == "critical":
res_terminal_level = logging.CRITICAL
else:
res_terminal_level = logging.NOTSET
else:
res_terminal_level = terminal_level
logger.setLevel(res_terminal_level)
# Remove the stdout handler
logger_handlers = logger.handlers[:]
if not len(logger_handlers):
terminal_h = logging.StreamHandler(sys.stdout)
terminal_h.setLevel(res_terminal_level)
terminal_h.set_name('stdout')
terminal_h.setFormatter(tool_formatter)
logger.addHandler(terminal_h)
return logger
| 1,509 | 33.318182 | 80 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/utilities/ramps.py | import numpy as np
def exp_rampup(current, rampup_length):
"""Exponential rampup inspired by https://arxiv.org/abs/1610.02242
Args:
current: float, current step of the rampup
rampup_length: float: length of the rampup
"""
if rampup_length == 0:
return 1.0
else:
current = np.clip(current, 0.0, rampup_length)
phase = 1.0 - current / rampup_length
return float(np.exp(-5.0 * phase * phase))
| 473 | 26.882353 | 70 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/utilities/Scaler.py | import time
import warnings
import numpy as np
import torch
import json
from utilities.Logger import create_logger
logger = create_logger(__name__)
class Scaler:
"""
operates on one or multiple existing datasets and applies operations
"""
def __init__(self):
self.mean_ = None
self.mean_of_square_ = None
self.std_ = None
# compute the mean incrementaly
def mean(self, data, axis=-1):
# -1 means have at the end a mean vector of the last dimension
if axis == -1:
mean = data
while len(mean.shape) != 1:
mean = np.mean(mean, axis=0, dtype=np.float64)
else:
mean = np.mean(data, axis=axis, dtype=np.float64)
return mean
# compute variance thanks to mean and mean of square
def variance(self, mean, mean_of_square):
return mean_of_square - mean**2
def means(self, dataset):
"""
Splits a dataset in to train test validation.
:param dataset: dataset, from DataLoad class, each sample is an (X, y) tuple.
"""
logger.info('computing mean')
start = time.time()
shape = None
counter = 0
for sample in dataset:
if type(sample) in [tuple, list] and len(sample) == 2:
batch_x, _ = sample
else:
batch_x = sample
if type(batch_x) is torch.Tensor:
batch_x_arr = batch_x.numpy()
else:
batch_x_arr = batch_x
data_square = batch_x_arr ** 2
counter += 1
if shape is None:
shape = batch_x_arr.shape
else:
if not batch_x_arr.shape == shape:
raise NotImplementedError("Not possible to add data with different shape in mean calculation yet")
# assume first item will have shape info
if self.mean_ is None:
self.mean_ = self.mean(batch_x_arr, axis=-1)
else:
self.mean_ += self.mean(batch_x_arr, axis=-1)
if self.mean_of_square_ is None:
self.mean_of_square_ = self.mean(data_square, axis=-1)
else:
self.mean_of_square_ += self.mean(data_square, axis=-1)
self.mean_ /= counter
self.mean_of_square_ /= counter
# ### To be used if data different shape, but need to stop the iteration before.
# rest = len(dataset) - i
# if rest != 0:
# weight = rest / float(i + rest)
# X, y = dataset[-1]
# data_square = X ** 2
# mean = mean * (1 - weight) + self.mean(X, axis=-1) * weight
# mean_of_square = mean_of_square * (1 - weight) + self.mean(data_square, axis=-1) * weight
logger.debug('time to compute means: ' + str(time.time() - start))
return self
def std(self, variance):
return np.sqrt(variance)
def calculate_scaler(self, dataset):
self.means(dataset)
variance = self.variance(self.mean_, self.mean_of_square_)
self.std_ = self.std(variance)
return self.mean_, self.std_
def normalize(self, batch):
if type(batch) is torch.Tensor:
batch_ = batch.numpy()
batch_ = (batch_ - self.mean_) / self.std_
return torch.Tensor(batch_)
else:
return (batch - self.mean_) / self.std_
def state_dict(self):
if type(self.mean_) is not np.ndarray:
raise NotImplementedError("Save scaler only implemented for numpy array means_")
dict_save = {"mean_": self.mean_.tolist(),
"mean_of_square_": self.mean_of_square_.tolist()}
return dict_save
def save(self, path):
dict_save = self.state_dict()
with open(path, "w") as f:
json.dump(dict_save, f)
def load(self, path):
with open(path, "r") as f:
dict_save = json.load(f)
self.load_state_dict(dict_save)
def load_state_dict(self, state_dict):
self.mean_ = np.array(state_dict["mean_"])
self.mean_of_square_ = np.array(state_dict["mean_of_square_"])
variance = self.variance(self.mean_, self.mean_of_square_)
self.std_ = self.std(variance)
class ScalerPerAudio:
"""Normalize inputs one by one
Args:
normalization: str, in {"global", "per_channel"}
type_norm: str, in {"mean", "max"}
"""
def __init__(self, normalization="global", type_norm="mean"):
self.normalization = normalization
self.type_norm = type_norm
def normalize(self, spectrogram):
""" Apply the transformation on data
Args:
spectrogram: np.array, the data to be modified, assume to have 3 dimensions
Returns:
np.array
The transformed data
"""
if type(spectrogram) is torch.Tensor:
tensor = True
spectrogram = spectrogram.numpy()
else:
tensor = False
if self.normalization == "global":
axis = None
elif self.normalization == "per_band":
axis = 0
else:
raise NotImplementedError("normalization is 'global' or 'per_band'")
if self.type_norm == "standard":
res_data = (spectrogram - spectrogram[0].mean(axis)) / (spectrogram[0].std(axis) + np.finfo(float).eps)
elif self.type_norm == "max":
res_data = spectrogram[0] / (np.abs(spectrogram[0].max(axis)) + np.finfo(float).eps)
elif self.type_norm == "min-max":
res_data = (spectrogram - spectrogram[0].min(axis)) / (spectrogram[0].max(axis) - spectrogram[0].min(axis)
+ np.finfo(float).eps)
else:
raise NotImplementedError("No other type_norm implemented except {'standard', 'max', 'min-max'}")
if np.isnan(res_data).any():
res_data = np.nan_to_num(res_data, posinf=0, neginf=0)
warnings.warn("Trying to divide by zeros while normalizing spectrogram, replacing nan by 0")
if tensor:
res_data = torch.Tensor(res_data)
return res_data
def state_dict(self):
pass
def save(self, path):
pass
def load(self, path):
pass
def load_state_dict(self, state_dict):
pass
| 6,478 | 31.888325 | 118 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/utilities/Transforms.py | import warnings
import librosa
import random
import numpy as np
import torch
class Transform:
def transform_data(self, data):
# Mandatory to be defined by subclasses
raise NotImplementedError("Abstract object")
def transform_label(self, label):
# Do nothing, to be changed in subclasses if needed
return label
def _apply_transform(self, sample_no_index):
data, label = sample_no_index
if type(data) is tuple: # meaning there is more than one data_input (could be duet, triplet...)
data = list(data)
if type(data[0]) is tuple:
data2, label2 = data
data2 = list(data2)
for k in range(len(data2)):
data2[k] = self.transform_data(data2[k])
data2 = tuple(data2)
data = data2, label2
else:
for k in range(len(data)):
data[k] = self.transform_data(data[k])
data = tuple(data)
else:
if self.flag:
data = self.transform_data(data, target = label)
else:
data = self.transform_data(data)
label = self.transform_label(label)
return data, label
def __call__(self, sample):
""" Apply the transformation
Args:
sample: tuple, a sample defined by a DataLoad class
Returns:
tuple
The transformed tuple
"""
if type(sample[1]) is int: # Means there is an index, may be another way to make it cleaner
sample_data, index = sample
sample_data = self._apply_transform(sample_data)
sample = sample_data, index
else:
sample = self._apply_transform(sample)
return sample
class GaussianNoise(Transform):
""" Apply gaussian noise
Args:
mean: float, the mean of the gaussian distribution.
std: float, standard deviation of the gaussian distribution.
Attributes:
mean: float, the mean of the gaussian distribution.
std: float, standard deviation of the gaussian distribution.
"""
def __init__(self, mean=0, std=0.5):
self.mean = mean
self.std = std
def transform_data(self, data):
""" Apply the transformation on data
Args:
data: np.array, the data to be modified
Returns:
np.array
The transformed data
"""
return data + np.abs(np.random.normal(0, 0.5 ** 2, data.shape))
class ApplyLog(Transform):
"""Convert ndarrays in sample to Tensors."""
def __init__(self):
self.flag = False
def transform_data(self, data):
""" Apply the transformation on data
Args:
data: np.array, the data to be modified
Returns:
np.array
The transformed data
"""
return librosa.amplitude_to_db(data.T).T
def pad_trunc_seq(x, max_len):
"""Pad or truncate a sequence data to a fixed length.
The sequence should be on axis -2.
Args:
x: ndarray, input sequence data.
max_len: integer, length of sequence to be padded or truncated.
Returns:
ndarray, Padded or truncated input sequence data.
"""
shape = x.shape
if shape[-2] <= max_len:
padded = max_len - shape[-2]
padded_shape = ((0, 0),)*len(shape[:-2]) + ((0, padded), (0, 0))
x = np.pad(x, padded_shape, mode="constant")
else:
x = x[..., :max_len, :]
return x
class PadOrTrunc(Transform):
""" Pad or truncate a sequence given a number of frames
Args:
nb_frames: int, the number of frames to match
Attributes:
nb_frames: int, the number of frames to match
"""
def __init__(self, nb_frames, apply_to_label=False):
self.flag = False
self.nb_frames = nb_frames
self.apply_to_label = apply_to_label
def transform_label(self, label):
if self.apply_to_label:
return pad_trunc_seq(label, self.nb_frames)
else:
return label
def transform_data(self, data):
""" Apply the transformation on data
Args:
data: np.array, the data to be modified
Returns:
np.array
The transformed data
"""
return pad_trunc_seq(data, self.nb_frames)
class AugmentGaussianNoise(Transform):
""" Pad or truncate a sequence given a number of frames
Args:
mean: float, mean of the Gaussian noise to add
Attributes:
std: float, std of the Gaussian noise to add
"""
def __init__(self, mean=0., std=None, snr=None):
self.flag = False
self.mean = mean
self.std = std
self.snr = snr
@staticmethod
def gaussian_noise(features, snr):
"""Apply gaussian noise on each point of the data
Args:
features: numpy.array, features to be modified
snr: float, average snr to be used for data augmentation
Returns:
numpy.ndarray
Modified features
"""
# If using source separation, using only the first audio (the mixture) to compute the gaussian noise,
# Otherwise it just removes the first axis if it was an extended one
if len(features.shape) == 3:
feat_used = features[0]
else:
feat_used = features
std = np.sqrt(np.mean((feat_used ** 2) * (10 ** (-snr / 10)), axis=-2))
try:
noise = np.random.normal(0, std, features.shape)
except Exception as e:
warnings.warn(f"the computed noise did not work std: {std}, using 0.5 for std instead")
noise = np.random.normal(0, 0.5, features.shape)
return features + noise
def transform_data(self, data):
""" Apply the transformation on data
Args:
data: np.array, the data to be modified
Returns:
(np.array, np.array)
(original data, noisy_data (data + noise))
"""
if self.std is not None:
noisy_data = data + np.abs(np.random.normal(0, 0.5 ** 2, data.shape))
elif self.snr is not None:
noisy_data = self.gaussian_noise(data, self.snr)
else:
raise NotImplementedError("Only (mean, std) or snr can be given")
return data, noisy_data
class ToTensor(Transform):
"""Convert ndarrays in sample to Tensors.
Args:
unsqueeze_axis: int, (Default value = None) add an dimension to the axis mentioned.
Useful to add a channel axis to use CNN.
Attributes:
unsqueeze_axis: int, add an dimension to the axis mentioned.
Useful to add a channel axis to use CNN.
"""
def __init__(self, unsqueeze_axis=None):
self.flag = False
self.unsqueeze_axis = unsqueeze_axis
def transform_data(self, data):
""" Apply the transformation on data
Args:
data: np.array, the data to be modified
Returns:
np.array
The transformed data
"""
res_data = torch.from_numpy(data).float()
if self.unsqueeze_axis is not None:
res_data = res_data.unsqueeze(self.unsqueeze_axis)
return res_data
def transform_label(self, label):
return torch.from_numpy(label).float() # float otherwise error
class Normalize(Transform):
"""Normalize inputs
Args:
scaler: Scaler object, the scaler to be used to normalize the data
Attributes:
scaler : Scaler object, the scaler to be used to normalize the data
"""
def __init__(self, scaler):
self.flag = False
self.scaler = scaler
def transform_data(self, data):
""" Apply the transformation on data
Args:
data: np.array, the data to be modified
Returns:
np.array
The transformed data
"""
return self.scaler.normalize(data)
class Mixup(Transform):
def __init__(self, alpha=0.2, beta=0.2, mixup_label_type="soft"):
self.flag = True
self.alpha = alpha
self.beta = beta
self.mixup_label_type=mixup_label_type
def transform_data(self, data, target=None):
batch_size = data.shape[0]
c = np.random.beta(self.alpha, self.beta)
perm = torch.randperm(batch_size)
mixed_data = c*data + (1-c)*data[perm,:]
if target is not None:
if self.mixup_label_type == "soft":
mixed_target = np.clip(
c*target + (1-c)*target[perm,:], a_min=0, a_max=1)
elif self.mixup_label_type == "hard":
mixed_target = np.clip(target+target[perm,:], a_min=0, a_max=1)
else:
raise NotImplementedError(
f"mixup_label_type: {mixup_label_type} not implemented. choise in "
f"{'soft', 'hard'}"
)
return (data, mixed_data), mixed_target
else:
return data, mixed_data
class TemporalShifting(Transform):
def __init__(self, net_pooling=4):
self.flag = True
self.net_pooling = net_pooling
def transform_data(self, data, target=None):
frames, n_bands = data.shape
shift = int(random.gauss(0, 40))
shifted = np.roll(data, shift, axis=0)
if target is not None:
shift = -abs(shift) // self.net_pooling if shift < 0 else shift // self.net_pooling
new_labels = np.roll(target, shift, axis=0)
return (data, shifted), new_labels
else:
return data, shifted
class CombineChannels(Transform):
""" Combine channels when using source separation (to remove the channels with low intensity)
Args:
combine_on: str, in {"max", "min"}, the channel in which to combine the channels with the smallest energy
n_channel_mix: int, the number of lowest energy channel to combine in another one
"""
def __init__(self, combine_on="max", n_channel_mix=2):
self.flag = False
self.combine_on = combine_on
self.n_channel_mix = n_channel_mix
def transform_data(self, data):
""" Apply the transformation on data
Args:
data: np.array, the data to be modified, assuming the first values are the mixture,
and the other channels the sources
Returns:
np.array
The transformed data
"""
mix = data[:1] # :1 is just to keep the first axis
sources = data[1:]
channels_en = (sources ** 2).sum(-1).sum(-1) # Get the energy per channel
indexes_sorted = channels_en.argsort()
sources_to_add = sources[indexes_sorted[:2]].sum(0)
if self.combine_on == "min":
sources[indexes_sorted[2]] += sources_to_add
elif self.combine_on == "max":
sources[indexes_sorted[-1]] += sources_to_add
return np.concatenate((mix, sources[indexes_sorted[2:]]))
def get_transforms(frames, scaler=None, add_axis=0, noise_dict_params=None, combine_channels_args=None):
transf = []
unsqueeze_axis = None
if add_axis is not None:
unsqueeze_axis = add_axis
if combine_channels_args is not None:
transf.append(CombineChannels(*combine_channels_args))
if noise_dict_params is not None:
transf.append(AugmentGaussianNoise(**noise_dict_params))
transf.extend([ApplyLog(), PadOrTrunc(nb_frames=frames), ToTensor(unsqueeze_axis=unsqueeze_axis)])
if scaler is not None:
transf.append(Normalize(scaler=scaler))
return Compose(transf)
def get_transforms_v2(frames, scaler=None, add_axis=0, noise_dict_params=None, mixup_dict_params=None, shift_dict_params=None, combine_channels_args=None):
transf = []
unsqueeze_axis = None
if add_axis is not None:
unsqueeze_axis = add_axis
if combine_channels_args is not None:
transf.append(CombineChannels(*combine_channels_args))
if noise_dict_params is not None:
transf.append(AugmentGaussianNoise(**noise_dict_params))
if mixup_dict_params is not None:
transf.append(Mixup(**mixup_dict_params))
if shift_dict_params is not None:
transf.append(TemporalShifting(**shift_dict_params))
transf.extend([ApplyLog(), PadOrTrunc(nb_frames=frames), ToTensor(unsqueeze_axis=unsqueeze_axis)])
if scaler is not None:
transf.append(Normalize(scaler=scaler))
return Compose(transf)
class Compose(object):
"""Composes several transforms together.
Args:
transforms: list of ``Transform`` objects, list of transforms to compose.
Example of transform: ToTensor()
"""
def __init__(self, transforms):
self.transforms = transforms
def add_transform(self, transform):
t = self.transforms.copy()
t.append(transform)
return Compose(t)
def __call__(self, audio):
for t in self.transforms:
audio = t(audio)
return audio
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
| 13,603 | 30.710956 | 155 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/data_utils/Desed.py | # -*- coding: utf-8 -*-
from __future__ import print_function
import functools
import glob
import multiprocessing
from contextlib import closing
import scipy.signal as sp
import numpy as np
import os
import os.path as osp
import librosa
import time
import pandas as pd
import desed
from tqdm import tqdm
import config as cfg
from utilities.Logger import create_logger
from utilities.utils import read_audio, meta_path_to_audio_dir
logger = create_logger(__name__, terminal_level=cfg.terminal_level)
class DESED:
"""DCASE 2020 task 4 dataset, uses DESED dataset
Data are organized in `audio/` and corresponding `metadata/` folders.
audio folder contains wav files, and metadata folder contains .tsv files.
The organisation should always be the same in the audio and metadata folders. (See example)
If there are multiple metadata files for a single audio files, add the name in the list of `merged_folders_name`.
(See validation folder example). Be careful, it works only for one level of folder.
tab separated value metadata files (.tsv) contains columns:
- filename (unlabeled data)
- filename event_labels (weakly labeled data)
- filename onset offset event_label (strongly labeled data)
Example:
- dataset
- metadata
- train
- synthetic20
- soundscapes.tsv (audio_dir associated: audio/train/synthetic20/soundscapes)
- unlabel_in_domain.tsv (audio_dir associated: audio/train/unlabel_in_domain)
- weak.tsv (audio_dir associated: audio/train/weak)
- validation
- validation.tsv (audio_dir associated: audio/validation) --> so audio_dir has to be declared
- test_dcase2018.tsv (audio_dir associated: audio/validation)
- eval_dcase2018.tsv (audio_dir associated: audio/validation)
-eval
- public.tsv (audio_dir associated: audio/eval/public)
- audio
- train
- synthetic20 (synthetic data generated for dcase 2020, you can create your own)
- soundscapes
- separated_sources (optional, only using source separation)
- unlabel_in_domain
- unlabel_in_domain_ss (optional, only using source separation)
- weak
- weak_ss (optional, only using source separation)
- validation
- validation_ss (optional, only using source separation)
Args:
base_feature_dir: str, optional, base directory to store the features
recompute_features: bool, optional, whether or not to recompute features
compute_log: bool, optional, whether or not saving the logarithm of the feature or not
(particularly useful to put False to apply some data augmentation)
Attributes:
base_feature_dir: str, base directory to store the features
recompute_features: bool, whether or not to recompute features
compute_log: bool, whether or not saving the logarithm of the feature or not
(particularly useful to put False to apply some data augmentation)
feature_dir : str, directory to store the features
"""
def __init__(self, base_feature_dir="features", recompute_features=False, compute_log=True):
# Parameters, they're kept if we need to reproduce the dataset
self.sample_rate = cfg.sample_rate
self.n_window = cfg.n_window
self.hop_size = cfg.hop_size
self.n_mels = cfg.n_mels
self.mel_min_max_freq = (cfg.mel_f_min, cfg.mel_f_max)
# Defined parameters
self.recompute_features = recompute_features
self.compute_log = compute_log
# Feature dir to not have the same name with different parameters
ext_freq = ''
if self.mel_min_max_freq != (0, self.sample_rate / 2):
ext_freq = f"_{'_'.join(self.mel_min_max_freq)}"
feature_dir = osp.join(base_feature_dir, f"sr{self.sample_rate}_win{self.n_window}_hop{self.hop_size}"
f"_mels{self.n_mels}{ext_freq}")
if not self.compute_log:
feature_dir += "_nolog"
self.feature_dir = osp.join(feature_dir, "features")
self.meta_feat_dir = osp.join(feature_dir, "metadata")
# create folder if not exist
os.makedirs(self.feature_dir, exist_ok=True)
os.makedirs(self.meta_feat_dir, exist_ok=True)
def state_dict(self):
""" get the important parameters to save for the class
Returns:
dict
"""
parameters = {
"feature_dir": self.feature_dir,
"meta_feat_dir": self.meta_feat_dir,
"compute_log": self.compute_log,
"sample_rate": self.sample_rate,
"n_window": self.n_window,
"hop_size": self.hop_size,
"n_mels": self.n_mels,
"mel_min_max_freq": self.mel_min_max_freq
}
return parameters
@classmethod
def load_state_dict(cls, state_dict):
""" load the dataset from previously saved parameters
Args:
state_dict: dict, parameter saved with state_dict function
Returns:
DESED class object with the right parameters
"""
desed_obj = cls()
desed_obj.feature_dir = state_dict["feature_dir"]
desed_obj.meta_feat_dir = state_dict["meta_feat_dir"]
desed_obj.compute_log = state_dict["compute_log"]
desed_obj.sample_rate = state_dict["sample_rate"]
desed_obj.n_window = state_dict["n_window"]
desed_obj.hop_size = state_dict["hop_size"]
desed_obj.n_mels = state_dict["n_mels"]
desed_obj.mel_min_max_freq = state_dict["mel_min_max_freq"]
return desed_obj
def initialize_and_get_df(self, tsv_path, audio_dir=None, audio_dir_ss=None, pattern_ss=None,
ext_ss_feature_file="_ss", nb_files=None, download=False, keep_sources=None):
""" Initialize the dataset, extract the features dataframes
Args:
tsv_path: str, tsv path in the initial dataset
audio_dir: str, the path where to search the filename of the df
audio_dir_ss: str, the path where to search the separated_sources
pattern_ss: str, only when audio_dir_ss is not None, this should be defined. The pattern that's added
after normal filenames to get associated separated sources (have been done during source separation)
ext_ss_feature_file: str, only when audio_dir_ss is not None, what to add at the end of the feature files
nb_files: int, optional, the number of file to take in the dataframe if taking a small part of the dataset.
download: bool, optional, whether or not to download the data from the internet (youtube).
keep_sources: list, if sound_separation is used, it indicates which source is kept to create the features
Returns:
pd.DataFrame
The dataframe containing the right features and labels
"""
# Check parameters
if audio_dir_ss is not None:
assert osp.exists(audio_dir_ss), f"the directory of separated sources: {audio_dir_ss} does not exist, " \
f"cannot extract features from it"
if pattern_ss is None:
pattern_ss = "_events"
if audio_dir is None:
audio_dir = meta_path_to_audio_dir(tsv_path)
assert osp.exists(audio_dir), f"the directory {audio_dir} does not exist"
# Path to save features, subdir, otherwise could have duplicate paths for synthetic data
fdir = audio_dir if audio_dir_ss is None else audio_dir_ss
fdir = fdir[:-1] if fdir.endswith(osp.sep) else fdir
subdir = osp.sep.join(fdir.split(osp.sep)[-2:])
meta_feat_dir = osp.join(self.meta_feat_dir, subdir)
feature_dir = osp.join(self.feature_dir, subdir)
logger.debug(feature_dir)
os.makedirs(meta_feat_dir, exist_ok=True)
os.makedirs(feature_dir, exist_ok=True)
df_meta = self.get_df_from_meta(tsv_path, nb_files, pattern_ss=pattern_ss)
logger.info(f"{tsv_path} Total file number: {len(df_meta.filename.unique())}")
# Download real data
if download: # Get only one filename once
filenames = df_meta.filename.drop_duplicates()
self.download(filenames, audio_dir)
# Meta filename
ext_tsv_feature = ""
if audio_dir_ss is not None:
ext_tsv_feature = ext_ss_feature_file
fname, ext = osp.splitext(osp.basename(tsv_path))
feat_fname = fname + ext_tsv_feature + ext
if nb_files is not None:
feat_fname = f"{nb_files}_{feat_fname}"
features_tsv = osp.join(meta_feat_dir, feat_fname)
# if not osp.exists(features_tsv):
t = time.time()
logger.info(f"Getting features ...")
df_features = self.extract_features_from_df(df_meta, audio_dir, feature_dir,
audio_dir_ss, pattern_ss,
ext_ss_feature_file, keep_sources)
if len(df_features) != 0:
#df_features.to_csv(features_tsv, sep="\t", index=False)
logger.info(f"features created/retrieved in {time.time() - t:.2f}s, metadata: {features_tsv}")
else:
raise IndexError(f"Empty features DataFrames {features_tsv}")
return df_features
def calculate_mel_spec(self, audio, compute_log=False):
"""
Calculate a mal spectrogram from raw audio waveform
Note: The parameters of the spectrograms are in the config.py file.
Args:
audio : numpy.array, raw waveform to compute the spectrogram
compute_log: bool, whether to get the output in dB (log scale) or not
Returns:
numpy.array
containing the mel spectrogram
"""
# Compute spectrogram
ham_win = np.hamming(self.n_window)
# preemphasis
audio = sp.lfilter([1, -0.97], [1], audio)
spec = librosa.stft(
audio,
n_fft=self.n_window,
hop_length=self.hop_size,
window=ham_win,
center=True,
pad_mode='reflect'
)
mel_spec = librosa.feature.melspectrogram(
S=np.abs(spec), # amplitude, for energy: spec**2 but don't forget to change amplitude_to_db.
sr=self.sample_rate,
n_mels=self.n_mels,
fmin=self.mel_min_max_freq[0], fmax=self.mel_min_max_freq[1],
htk=False, norm=None)
if compute_log:
mel_spec = librosa.amplitude_to_db(mel_spec) # 10 * log10(S**2 / ref), ref default is 1
mel_spec = mel_spec.T
mel_spec = mel_spec.astype(np.float32)
return mel_spec
def load_and_compute_mel_spec(self, wav_path):
(audio, _) = read_audio(wav_path, self.sample_rate)
if audio.shape[0] == 0:
raise IOError("File {wav_path} is corrupted!")
else:
t1 = time.time()
mel_spec = self.calculate_mel_spec(audio, self.compute_log)
logger.debug(f"compute features time: {time.time() - t1}")
return mel_spec
def _extract_features(self, wav_path, out_path):
if not osp.exists(out_path):
try:
mel_spec = self.load_and_compute_mel_spec(wav_path)
os.makedirs(osp.dirname(out_path), exist_ok=True)
np.save(out_path, mel_spec)
except IOError as e:
logger.error(e)
def _extract_features_ss(self, wav_path, wav_paths_ss, out_path):
try:
features = np.expand_dims(self.load_and_compute_mel_spec(wav_path), axis=0)
for wav_path_ss in wav_paths_ss:
sep_features = np.expand_dims(self.load_and_compute_mel_spec(wav_path_ss), axis=0)
features = np.concatenate((features, sep_features))
os.makedirs(osp.dirname(out_path), exist_ok=True)
np.save(out_path, features)
except IOError as e:
logger.error(e)
def _extract_features_file(self, filename, audio_dir, feature_dir, audio_dir_ss=None, pattern_ss=None,
ext_ss_feature_file="_ss", keep_sources=None):
wav_path = osp.join(audio_dir, filename)
if not osp.isfile(wav_path):
logger.error("File %s is in the tsv file but the feature is not extracted because "
"file do not exist!" % wav_path)
out_path = None
# df_meta = df_meta.drop(df_meta[df_meta.filename == filename].index)
else:
if audio_dir_ss is None:
out_filename = osp.join(osp.splitext(filename)[0] + ".npy")
out_path = osp.join(feature_dir, out_filename)
self._extract_features(wav_path, out_path)
else:
# To be changed if you have new separated sounds from the same mixture
out_filename = osp.join(osp.splitext(filename)[0] + ext_ss_feature_file + ".npy")
out_path = osp.join(feature_dir, out_filename)
bname, ext = osp.splitext(filename)
if keep_sources is None:
wav_paths_ss = glob.glob(osp.join(audio_dir_ss, bname + pattern_ss, "*" + ext))
else:
wav_paths_ss = []
for s_ind in keep_sources:
audio_file = osp.join(audio_dir_ss, bname + pattern_ss, s_ind + ext)
assert osp.exists(audio_file), f"Audio file does not exists: {audio_file}"
wav_paths_ss.append(audio_file)
if not osp.exists(out_path):
self._extract_features_ss(wav_path, wav_paths_ss, out_path)
return filename, out_path
def extract_features_from_df(self, df_meta, audio_dir, feature_dir, audio_dir_ss=None, pattern_ss=None,
ext_ss_feature_file="_ss", keep_sources=None):
"""Extract log mel spectrogram features.
Args:
df_meta : pd.DataFrame, containing at least column "filename" with name of the wav to compute features
audio_dir: str, the path where to find the wav files specified by the dataframe
feature_dir: str, the path where to search and save the features.
audio_dir_ss: str, the path where to find the separated files (associated to the mixture)
pattern_ss: str, the pattern following the normal filename to match the folder to find separated sources
ext_ss_feature_file: str, only when audio_dir_ss is not None
keep_sources: list, the index of the sources to be kept if sound separation is used
Returns:
pd.DataFrame containing the initial meta + column with the "feature_filename"
"""
if bool(audio_dir_ss) != bool(pattern_ss):
raise NotImplementedError("if audio_dir_ss is not None, you must specify a pattern_ss")
df_features = pd.DataFrame()
fpaths = df_meta["filename"]
uniq_fpaths = fpaths.drop_duplicates().to_list()
extract_file_func = functools.partial(self._extract_features_file,
audio_dir=audio_dir,
feature_dir=feature_dir,
audio_dir_ss=audio_dir_ss,
pattern_ss=pattern_ss,
ext_ss_feature_file=ext_ss_feature_file,
keep_sources=keep_sources)
n_jobs = multiprocessing.cpu_count() - 1
logger.info(f"Using {n_jobs} cpus")
with closing(multiprocessing.Pool(n_jobs)) as p:
for filename, out_path in tqdm(p.imap_unordered(extract_file_func, uniq_fpaths, 200), total=len(uniq_fpaths)):
row_features = df_meta[df_meta.filename == filename]
row_features.loc[:, "feature_filename"] = out_path
df_features = df_features.append(row_features, ignore_index=True)
return df_features.reset_index(drop=True)
@staticmethod
def get_classes(list_dfs):
""" Get the different classes of the dataset
Returns:
A list containing the classes
"""
classes = []
for df in list_dfs:
if "event_label" in df.columns:
classes.extend(df["event_label"].dropna().unique()) # dropna avoid the issue between string and float
elif "event_labels" in df.columns:
classes.extend(df.event_labels.str.split(',', expand=True).unstack().dropna().unique())
return list(set(classes))
@staticmethod
def get_subpart_data(df, nb_files, pattern_ss=None):
"""Get a subpart of a dataframe (only the number of files specified)
Args:
df : pd.DataFrame, the dataframe to extract a subpart of it (nb of filenames)
nb_files: int, the number of file to take in the dataframe if taking a small part of the dataset.
pattern_ss: str, if nb_files is not None, the pattern is needed to get same ss than soundscapes
Returns:
pd.DataFrame containing the only the number of files specified
"""
column = "filename"
if not nb_files > len(df[column].unique()):
if pattern_ss is not None:
filenames = df[column].apply(lambda x: x.split(pattern_ss)[0])
filenames = filenames.drop_duplicates()
# sort_values and random_state are used to have the same filenames each time (also for normal and ss)
filenames_kept = filenames.sort_values().sample(nb_files, random_state=10)
df_kept = df[df[column].apply(lambda x: x.split(pattern_ss)[0]).isin(filenames_kept)].reset_index(
drop=True)
else:
filenames = df[column].drop_duplicates()
# sort_values and random_state are used to have the same filenames each time (also for normal and ss)
filenames_kept = filenames.sort_values().sample(nb_files, random_state=10)
df_kept = df[df[column].isin(filenames_kept)].reset_index(drop=True)
logger.debug(f"Taking subpart of the data, len : {nb_files}, df_len: {len(df)}")
else:
df_kept = df
return df_kept
@staticmethod
def get_df_from_meta(meta_name, nb_files=None, pattern_ss=None):
"""
Extract a pandas dataframe from a tsv file
Args:
meta_name : str, path of the tsv file to extract the df
nb_files: int, the number of file to take in the dataframe if taking a small part of the dataset.
pattern_ss: str, if nb_files is not None, the pattern is needed to get same ss than soundscapes
Returns:
dataframe
"""
df = pd.read_csv(meta_name, header=0, sep="\t")
if nb_files is not None:
df = DESED.get_subpart_data(df, nb_files, pattern_ss=pattern_ss)
return df
@staticmethod
def download(filenames, audio_dir, n_jobs=3, chunk_size=10):
"""
Download files contained in a list of filenames
Args:
filenames: list or pd.Series, filenames of files to be downloaded ()
audio_dir: str, the directory where the wav file should be downloaded (if not exist)
chunk_size: int, (Default value = 10) number of files to download in a chunk
n_jobs : int, (Default value = 3) number of parallel jobs
"""
desed.download_real.download(filenames, audio_dir, n_jobs=n_jobs, chunk_size=chunk_size)
| 20,210 | 46.332553 | 122 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/data_utils/DataLoad.py | import bisect
import numpy as np
import pandas as pd
import torch
import random
import warnings
from torch.utils.data import Dataset
from torch.utils.data.sampler import Sampler
from utilities.Logger import create_logger
import config as cfg
from utilities.Transforms import Compose
torch.manual_seed(0)
random.seed(0)
logger = create_logger(__name__, terminal_level=cfg.terminal_level)
class DataLoadDf(Dataset):
""" Class derived from pytorch DESED
Prepare the data to be use in a batch mode
Args:
df: pandas.DataFrame, the dataframe containing the set infromation (feat_filenames, labels),
it should contain these columns :
"feature_filename"
"feature_filename", "event_labels"
"feature_filename", "onset", "offset", "event_label"
encode_function: function(), function which encode labels
transform: function(), (Default value = None), function to be applied to the sample (pytorch transformations)
return_indexes: bool, (Default value = False) whether or not to return indexes when use __getitem__
Attributes:
df: pandas.DataFrame, the dataframe containing the set information (feat_filenames, labels, ...)
encode_function: function(), function which encode labels
transform : function(), function to be applied to the sample (pytorch transformations)
return_indexes: bool, whether or not to return indexes when use __getitem__
"""
def __init__(self, df, encode_function=None, transform=None, return_indexes=False, in_memory=False):
self.df = df
self.encode_function = encode_function
self.transform = transform
self.return_indexes = return_indexes
self.feat_filenames = df.feature_filename.drop_duplicates()
self.filenames = df.filename.drop_duplicates()
self.in_memory = in_memory
if self.in_memory:
self.features = {}
def set_return_indexes(self, val):
""" Set the value of self.return_indexes
Args:
val : bool, whether or not to return indexes when use __getitem__
"""
self.return_indexes = val
def get_feature_file_func(self, filename):
"""Get a feature file from a filename
Args:
filename: str, name of the file to get the feature
Returns:
numpy.array
containing the features computed previously
"""
if not self.in_memory:
data = np.load(filename).astype(np.float32)
else:
if self.features.get(filename) is None:
data = np.load(filename).astype(np.float32)
self.features[filename] = data
else:
data = self.features[filename]
return data
def __len__(self):
"""
Returns:
int
Length of the object
"""
length = len(self.feat_filenames)
return length
def get_sample(self, index):
"""From an index, get the features and the labels to create a sample
Args:
index: int, Index of the sample desired
Returns:
tuple
Tuple containing the features and the labels (numpy.array, numpy.array)
"""
features = self.get_feature_file_func(self.feat_filenames.iloc[index])
if len(features) == 1:
features = features[0]
# event_labels means weak labels, event_label means strong labels
if "event_labels" in self.df.columns or {"onset", "offset", "event_label"}.issubset(self.df.columns):
if "event_labels" in self.df.columns:
label = self.df.iloc[index]["event_labels"]
if pd.isna(label):
label = []
if type(label) is str:
if label == "":
label = []
else:
label = label.split(",")
else:
cols = ["onset", "offset", "event_label"]
label = self.df[self.df.filename == self.filenames.iloc[index]][cols]
if label.empty:
label = []
else:
label = "empty" # trick to have -1 for unlabeled data and concat them with labeled
if "filename" not in self.df.columns:
raise NotImplementedError(
"Dataframe to be encoded doesn't have specified columns: columns allowed: 'filename' for unlabeled;"
"'filename', 'event_labels' for weak labels; 'filename' 'onset' 'offset' 'event_label' "
"for strong labels, yours: {}".format(self.df.columns))
if index == 0:
logger.debug("label to encode: {}".format(label))
if self.encode_function is not None:
# labels are a list of string or list of list [[label, onset, offset]]
y = self.encode_function(label)
else:
y = label
sample = features, y
return sample
def __getitem__(self, index):
""" Get a sample and transform it to be used in a ss_model, use the transformations
Args:
index : int, index of the sample desired
Returns:
tuple
Tuple containing the features and the labels (numpy.array, numpy.array) or
Tuple containing the features, the labels and the index (numpy.array, numpy.array, int)
"""
sample = self.get_sample(index)
if self.transform:
sample = self.transform(sample)
if self.return_indexes:
sample = (sample, index)
return sample
def set_transform(self, transform):
"""Set the transformations used on a sample
Args:
transform: function(), the new transformations
"""
self.transform = transform
def add_transform(self, transform):
if type(self.transform) is not Compose:
raise TypeError("To add transform, the transform should already be a compose of transforms")
transforms = self.transform.add_transform(transform)
return DataLoadDf(self.df, self.encode_function, transforms, self.return_indexes, self.in_memory)
class ConcatDataset(Dataset):
"""
DESED to concatenate multiple datasets.
Purpose: useful to assemble different existing datasets, possibly
large-scale datasets as the concatenation operation is done in an
on-the-fly manner.
Args:
datasets : sequence, list of datasets to be concatenated
"""
@staticmethod
def cumsum(sequence):
r, s = [], 0
for e in sequence:
l = len(e)
r.append(l + s)
s += l
return r
@property
def cluster_indices(self):
cluster_ind = []
prec = 0
for size in self.cumulative_sizes:
cluster_ind.append(range(prec, size))
prec = size
return cluster_ind
def __init__(self, datasets):
assert len(datasets) > 0, 'datasets should not be an empty iterable'
self.datasets = list(datasets)
self.cumulative_sizes = self.cumsum(self.datasets)
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx][sample_idx]
@property
def cummulative_sizes(self):
warnings.warn("cummulative_sizes attribute is renamed to "
"cumulative_sizes", DeprecationWarning, stacklevel=2)
return self.cumulative_sizes
@property
def df(self):
df = self.datasets[0].df
for dataset in self.datasets[1:]:
df = pd.concat([df, dataset.df], axis=0, ignore_index=True, sort=False)
return df
class MultiStreamBatchSampler(Sampler):
"""Takes a dataset with cluster_indices property, cuts it into batch-sized chunks
Drops the extra items, not fitting into exact batches
Args:
data_source : DESED, a DESED to sample from. Should have a cluster_indices property
batch_size : int, a batch size that you would like to use later with Dataloader class
shuffle : bool, whether to shuffle the data or not
Attributes:
data_source : DESED, a DESED to sample from. Should have a cluster_indices property
batch_size : int, a batch size that you would like to use later with Dataloader class
shuffle : bool, whether to shuffle the data or not
"""
def __init__(self, data_source, batch_sizes, shuffle=True):
super(MultiStreamBatchSampler, self).__init__(data_source)
self.data_source = data_source
self.batch_sizes = batch_sizes
l_bs = len(batch_sizes)
nb_dataset = len(self.data_source.cluster_indices)
assert l_bs == nb_dataset, "batch_sizes must be the same length as the number of datasets in " \
"the source {} != {}".format(l_bs, nb_dataset)
self.shuffle = shuffle
def __iter__(self):
indices = self.data_source.cluster_indices
if self.shuffle:
for i in range(len(self.batch_sizes)):
indices[i] = np.random.permutation(indices[i])
iterators = []
for i in range(len(self.batch_sizes)):
iterators.append(grouper(indices[i], self.batch_sizes[i]))
return (sum(subbatch_ind, ()) for subbatch_ind in zip(*iterators))
def __len__(self):
val = np.inf
for i in range(len(self.batch_sizes)):
val = min(val, len(self.data_source.cluster_indices[i]) // self.batch_sizes[i])
return val
def grouper(iterable, n):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3) --> ABC DEF"
args = [iter(iterable)] * n
return zip(*args)
| 10,066 | 35.607273 | 120 | py |
GalaxyDataset | GalaxyDataset-master/test.py | import numpy as np
# array = [[1, 2], 3]
#
# np.save("./test.npy", array)
#
# print(np.load("./test.npy", allow_pickle=True))
import yaml
import os
f = open("./config.yaml")
y = yaml.load(f)
print(y)
print(y["split_mode"])
# def readYaml(path, args):
# if not os.path.exists(path):
# return args
# f = open(path)
# config = yaml.load(f)
# args.node_num = int(config[0]["node_num"])
# args.isaverage_dataset_size = config[0]["isaverage_dataset_size"]
# args.dataset_size_list = config[0]["dataset_size_list"]
# args.split_mode = int(config[0]["split_mode"])
# args.node_label_num = config[0]["node_label_num"]
# args.isadd_label = config[0]["isadd_label"]
# args.add_label_rate = float(config[0]["add_label_rate"])
# args.isadd_error = config[0]["isadd_error"]
# args.add_error_rate = float(config[0]["add_error_rate"])
# return args
#
# readYaml("./config.yaml") | 925 | 27.9375 | 71 | py |
GalaxyDataset | GalaxyDataset-master/GalaxyDataset.py | # -*- coding: utf-8 -*-
import torch
import torch.utils.data as Data
import numpy as np
import argparse
import os
import random
import yaml
import downloadData
import fdata
import preprocess
import mnist_bias
# 1. download dataset 2. split dataset
def make_dataset():
parser = argparse.ArgumentParser('parameters')
# dataset
parser.add_argument('--dataset-mode', type=str, default="CIFAR10", help="dataset")
# node num
parser.add_argument('--node-num', type=int, default=4,
help="Number of node (default n=10) one node corresponding to one dataset")
# small dataset config
parser.add_argument('--isaverage-dataset-size', type=bool, default=True, help="if average splits dataset")
parser.add_argument('--dataset-size-list', type=list, default=[5000, 3000, 2000, 3300],
help= "each small dataset size,if isaverage-dataset-size == True, list contain one element")
# split mode
parser.add_argument('--split-mode', type=int, default = 1,
help="dataset split: randomSplit(0), splitByLabels(1)")
# each node - label kind
parser.add_argument('--node-label-num', type=list, default=[4, 3, 2, 1],
help="each node consists of label kind, default each node has one kind of label")
parser.add_argument('--isadd-label', type=bool, default=True,
help="whether add error dataset default=False")
parser.add_argument('--add-label-rate', type=float, default=0.1,
help="if split-mode == 2 or 3, add same normal small dataset")
parser.add_argument('--isadd-error', type=bool, default=True,
help="whether add error dataset default=False")
parser.add_argument('--add-error-rate', type=float, default=0.01,
help="if split-mode == 3, add same error dataset")
parser.add_argument('--isuse-yaml', type= bool, default= True,
help='isuse-yaml = True means using yaml file, false means using command line')
parser.add_argument('--RandomResizedCrop', type=list, default=[0.2, 1.],
help='RandomResizedCrop')
parser.add_argument('--GaussianBlur', type=list, default=[0.1, .2],
help='GaussianBlur')
parser.add_argument('--RandomGrayscale', type=float, default=0.2,
help='GaussianBlur')
parser.add_argument('--Normalize-mean', type=list, default=[0.4914, 0.4822, 0.4465],
help='Normalize-mean')
parser.add_argument('--Normalize-std', type=list, default=[0.2023, 0.1994, 0.2010],
help='Normalize-std')
# args.RandomResizedCrop = config["RandomResizedCrop"]
# args.GaussianBlur = config["GaussianBlur"]
# args.RandomGrayscale = config["RandomGrayscale"]
# args.Normalize_mean = config["Normalize_mean"]
# args.Normalize_std = config["Normalize_std"]
args = parser.parse_args()
args = readYaml("./config.yaml", args)
# valid parameters
if args.dataset_mode != "CIFAR10" and args.dataset_mode != "MNIST":
print("currently only for CIFAR10 and MNIST")
return
if len(args.dataset_size_list) < args.node_num:
print("Error: the number of dataset smaller than node num")
return
if args.node_num != len(args.dataset_size_list) or args.node_num != len(args.node_label_num):
print("Error: nodes num is not equal to the length of dataset_size_list or node_label_num ")
return
if args.split_mode == 3:
#file_path, batch_size , sub_num, dataset_ident = 'CIFAR10C' , download = False, train_transform = cifar_train_transforms(), test_transform = cifar_test_transforms(), use_cuda =True
Xloader = fdata.Loader("./data", batch_size = 32, sub_num=args.sub_num, dataset_ident = 'CIFAR10C', download = False, train_transform=fdata.cifar_train_transforms(args), test_transform=fdata.cifar_test_transforms(), use_cuda=True)
return
if args.split_mode == 4:
mnist_bias.mnist_process(args.datasetpath)
return
train_loader, test_loader = downloadData.load_data(args)
splitDataset(args, train_loader)
def readYaml(path, args):
if args.isuse_yaml == False:
return args
if not os.path.exists(path):
return args
f = open(path)
config = yaml.load(f)
args.dataset_mode = config["dataset_mode"]
args.datasetpath = str(config["datasetpath"])
args.node_num = int(config["node_num"])
args.isaverage_dataset_size = config["isaverage_dataset_size"]
args.dataset_size_list = config["dataset_size_list"]
args.split_mode = int(config["split_mode"])
args.node_label_num = config["node_label_num"]
args.isadd_label = config["isadd_label"]
args.add_label_rate = float(config["add_label_rate"])
args.isadd_error = config["isadd_error"]
args.add_error_rate = float(config["add_error_rate"])
args.RandomResizedCrop = config["RandomResizedCrop"]
args.GaussianBlur = config["GaussianBlur"]
args.RandomGrayscale = config["RandomGrayscale"]
args.Normalize_mean = config["Normalize_mean"]
args.Normalize_std = config["Normalize_std"]
args.sub_num = config["sub_num"]
return args
def splitDataset(args, train_loader):
# sub_datasets [
# [[imgs, label], [imgs, label]....],
# [[imgs, label], [imgs, label]....],
# ]
# randomSplit : 1. no error dataset 2. add error dataset
# splitByLabel: 1. just 2. add other dataset, no error 3. add error no other 4. add both
parent_path = "./" + args.dataset_mode
if args.split_mode == 0: # 1. Randomly split CIFAR10 into n small datasets
if args.isadd_error == False:
args.add_error_rate = 0.0
sub_datasets = randomSplit(args, train_loader)
savenpy(parent_path+"/randomSplit/", sub_datasets, args)
else:
temp_sub_datasets = randomSplit(args, train_loader)
sub_datasets = addErrorDataset(args, temp_sub_datasets)
savenpy(parent_path+"/randomSplitWithError/", sub_datasets, args)
elif args.split_mode == 1: # 2. Divide CIFAR10 into n small datasets according to dataset labels
if args.isadd_label == False and args.isadd_error == False:
args.add_error_rate = 0.0
args.add_label_rate = 0.0
sub_datasets = splitByLabels(args, train_loader)
savenpy(parent_path+"/splitByLabels/", sub_datasets, args)
elif args.isadd_label == True and args.isadd_error == False:
args.add_error_rate = 0.0
# 3. Based on the 2nd method, each dataset adds 10% of the data taken from the other datasets
sub_datasets = splitByLabelsAnddDataset(args, train_loader)
savenpy(parent_path+"/splitByLabelsAnddDataset/", sub_datasets, args)
elif args.isadd_label == False and args.isadd_error == True:
args.add_label_rate = 0.0
# 5. get dataset, each dataset adds some error label data to form a new dataset
temp_sub_datasets = splitByLabels(args, train_loader)
sub_datasets = addErrorDataset(args, temp_sub_datasets)
savenpy(parent_path+"/splitByLabelsWithErrorDataset/", sub_datasets, args)
else:
temp_sub_datasets = splitByLabelsAnddDataset(args, train_loader)
sub_datasets = addErrorDataset(args, temp_sub_datasets)
savenpy(parent_path+"/splitByLabelsWithNormalAndErrorDataset/", sub_datasets, args)
# 1. Randomly split Dataset into n small datasets
def randomSplit(args, loader):
args.add_label_rate = 0.0
node_num = args.node_num
sub_datasets = [[] for i in range(node_num)]
dataset_size_list = args.dataset_size_list
if args.isaverage_dataset_size == True:
# 均分
temp_list = []
node_index = 0
num = 0
print(loader.dataset)
for step, (imgs, label) in enumerate(loader):
temp_list.append([imgs[0].numpy(), label[0].numpy()])
num += 1
if (num % (dataset_size_list[node_index])) == 0 and num != 0:
print("finish average spliting %d dataset" % node_index)
# TODO(save one small dataset)
sub_datasets[node_index] = temp_list
node_index = node_index+1
if node_index == node_num:
break
temp_list = []
if step == len(loader.dataset.data) -1:
print("finish left spliting %d dataset" % node_index)
sub_datasets[node_index] = temp_list
else:
temp_list = []
node_index = 0
temp_step = dataset_size_list[node_index]
num = 0
if args.dataset_mode == "CIFAR10":
for step, (imgs, labels) in enumerate(loader):
num +=1
temp_list.append([imgs[0].numpy(), labels[0].numpy()])
# temp_list.append([imgs.numpy(), labels.numpy()])
if num == temp_step and num !=0:
print("finish spliting %d dataset" % node_index)
sub_datasets[node_index] = temp_list
node_index = node_index + 1
if node_index == node_num:
break
temp_step += dataset_size_list[node_index]
temp_list = []
if step == len(loader.dataset.data) -1:
print("finish left spliting %d dataset" % node_index)
sub_datasets[node_index] = temp_list
elif args.dataset_mode == "MNIST":
step = 0
for (i, data) in enumerate(loader):
step += 1
num +=1
temp_list.append([data[0].numpy(), data[1].numpy()])
# temp_list.append([imgs.numpy(), labels.numpy()])
if num == temp_step and num !=0:
print("finish spliting %d dataset" % node_index)
sub_datasets[node_index] = temp_list
node_index = node_index + 1
if node_index == node_num:
break
temp_step += dataset_size_list[node_index]
temp_list = []
if i == len(loader.dataset.data) -1:
print("finish left spliting %d dataset" % node_index)
sub_datasets[node_index] = temp_list
return sub_datasets
# 2. Divide CIFAR10 into n small datasets according to dataset labels
def splitByLabels(args, train_loader):
sub_datasets = [[] for i in range(args.node_num)]
temp_datasets = [[] for i in range(10)]
# category according to nodes nums, each node 按照 节点数 分类,每个节点的类别个数,对应数据量
node_index = 0
for step, (imgs, label) in enumerate(train_loader):
num_label = label.data.item()
# CIFAR10 Dataset
# imgs[0].numpy(): <class 'tuple'>: (3, 32, 32) label[0].numpy() [x] =>
# temp_datasets [
# [[(3, 32, 32) , 0], [(3, 32, 32) , 0], ..],
# [[[(3, 32, 32) , 1], [(3, 32, 32) , 1], ..],
# ...
# ]
temp_datasets[num_label].append(
[imgs[0].numpy(), label[0].numpy()])
if step % 5000 == 0:
print("split dataset step: ", step)
# loop temp_datasets, add and contract
# node_label_num [1, 2, 2, 5, 7]
rs = random.sample(range(0, 10), 10) # 0 - 9 random nums
# according to nodes list, distribute label dataset
all_label_kinds = len(temp_datasets)
sum_x = 0
for index, x in enumerate(args.node_label_num):
temp_list = []
if x > all_label_kinds:
x = all_label_kinds
for y in range(x):
# temp_list only contain 10 kinds labels
labels_index = (y + sum_x) % all_label_kinds
temp_list.extend(temp_datasets[labels_index])
print("node %d" % index, "| add label-%d dataset" % (labels_index))
# if we need the part of data, shuffle, split
if args.isaverage_dataset_size == True:
random.shuffle(temp_list)
temp_list = temp_list[:args.dataset_size_list[index]]
sub_datasets[index] = temp_list
sum_x += x
return sub_datasets
# 3. Based on the 2nd method, each dataset adds n% of the data taken from the other datasets
def splitByLabelsAnddDataset(args, train_loader):
percent = args.add_label_rate
# call splitByLabels
sub_datasets = splitByLabels(args, train_loader)
# add other data Attention other dataset
add_rate_num = [int(percent*len(sub_datasets[i])) for i in range(args.node_num)]
for i in range(args.node_num):
for step, (imgs, label) in enumerate(train_loader):
if step < add_rate_num[i]:
if step % 100 == 0:
print("node %d " % i, "| step:%d, adding other label dataset" % step)
sub_datasets[i].append([imgs[0].numpy(), label[0].numpy()])
else:
break
print("adding other data succeed!")
return sub_datasets
# 4. each dataset adds some error label data
def addErrorDataset(args, array):
error_ratio = args.add_error_rate
add_error_nums = [int(error_ratio * len(array[i])) for i in range(args.node_num)]
# add error data
for i in range(args.node_num):
for index in range(add_error_nums[i]):
if index % 5 == 0:
print("node %d" % i, "| step:%d, adding other error dataset" % index)
# array [
# [[imgs, label], [imgs, label]....],
# [[imgs, label], [imgs, label]....],
# ]
real_label = array[i][index][1]
error_label = random.choice([i for i in range(0, 9) if i not in [real_label]])
array[i].append([array[i][index][0], error_label])
print("adds some error label data succeed!")
return array
# save each small list dataset file
def savenpy(path, array, args):
'''
loop array save each small list dataset file
:param path:
:param array:
:return:
'''
if not os.path.exists(path):
os.makedirs(path)
# array [[(3, 32, 32), x], [(3, 32, 32), x]]
# randomSplit_dataset size_target label_添加label_errorlabel
# label classes
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
for i in range(len(array)):
if len(array[i]) != 0:
filename = ''
if args.split_mode == 0:
filename = 'randomSplit'+'_node_'+str(i) + '_'+str(len(array[i]))+ '_' + "normal"
elif args.split_mode == 1:
if int(args.node_label_num[i]) != 1:
filename = 'SplitByLabels'+'_node_'+str(i) + '_' + str(len(array[i])) + '_' + classes[array[i][0][1]]+ "andMore"
else:
filename = 'SplitByLabels'+'_node_'+str(i) + '_' + str(len(array[i])) + '_' + classes[array[i][0][1]]
strings = path + filename +'_' + str(args.add_label_rate) + '_' + str(args.add_error_rate)+'.npy'
np.save(file=strings, arr=array[i])
print("index %d saved %s" % (i, strings))
print("save file succeed !")
def readnpy(path):
# npy file: [[imgs, label], [imgs, label]...., [imgs, label]]
# when allow_pickle=True, matrix needs same size
np_array = np.load(path, allow_pickle=True)
imgs = []
label = []
for index in range(len(np_array)):
imgs.append(np_array[index][0])
label.append(np_array[index][1])
torch_dataset = Data.TensorDataset(torch.from_numpy(np.array(imgs)), torch.from_numpy(np.array(label)))
dataloader = Data.DataLoader(
torch_dataset,
batch_size=64,
shuffle=True
)
print(dataloader)
return dataloader
if __name__ == "__main__":
make_dataset()
# preprocess.load_npy("./cifar10/splitByLabelsWithNormalAndErrorDataset/SplitByLabels_5555_dog_0.1_0.01.npy")
# readnpy("./cifar10/splitByLabelsWithNormalAndErrorDataset/SplitByLabels_3666_truck.npy") | 16,303 | 42.946092 | 239 | py |
GalaxyDataset | GalaxyDataset-master/downloadData.py | # -*- coding: utf-8 -*-
import argparse
import torch
from torchvision import datasets, transforms
# CIFAR-10,
# mean, [0.5, 0.5, 0.5]
# std, [0.5, 0.5, 0.5]
# CIFAR-100,
# mean, [0.5071, 0.4865, 0.4409]
# std, [0.2673, 0.2564, 0.2762]
def load_data(args):
args.batch_size = 1
train_loader = []
test_loader = []
if args.dataset_mode == "CIFAR10":
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('data', train=True, download=True, transform=transform_train),
batch_size=args.batch_size,
shuffle=True,
num_workers=0
)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('data', train=False, transform=transform_test),
batch_size=args.batch_size,
shuffle=True,
num_workers=1
)
elif args.dataset_mode == "CIFAR100":
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4865, 0.4409), (0.2673, 0.2564, 0.2762)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4865, 0.4409), (0.2673, 0.2564, 0.2762)),
])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('data', train=True, download=True, transform=transform_train),
batch_size=args.batch_size,
shuffle=True,
num_workers=2
)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('data', train=False, transform=transform_test),
batch_size=args.batch_size,
shuffle=False,
num_workers=2
)
elif args.dataset_mode == "MNIST":
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
])
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('data/newMNIST', train=True, download=True, transform=transform_train),
batch_size=args.batch_size,
shuffle=True,
num_workers=2
)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('data/newMNIST', train=False, transform=transform_test),
batch_size=args.batch_size,
shuffle=True,
num_workers=2
)
return train_loader, test_loader
class argsment:
# 定义基本属性
batch_size = 1,
dataset_mode = "MNIST",
# constructor
def __init__(self, batch, mode):
self.batch_size = batch,
self.dataset_mode = mode,
# method
def getBatchSize(self):
print(self.batch_size)
# download data
if __name__ == "__main__":
parser = argparse.ArgumentParser('parameters')
# dataset
parser.add_argument('--dataset-mode', type=str, default="CIFAR100", help="dataset")
args = parser.parse_args()
print(args.dataset_mode)
train_loader, test_loader = load_data(args)
print(train_loader) | 3,725 | 30.310924 | 98 | py |
GalaxyDataset | GalaxyDataset-master/fdata.py | from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms
import torch as t
import numpy as np
import random
from PIL import ImageFilter
from PIL import Image
class GaussianBlur(object):
def __init__(self, sigma=[.1, 2.]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
# Xargs.RandomResizedCrop
def cifar_train_transforms(Xargs):
all_transforms = transforms.Compose([
transforms.RandomResizedCrop(32, scale=(Xargs.RandomResizedCrop[0], Xargs.RandomResizedCrop[1])),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([GaussianBlur(Xargs.GaussianBlur)], p=0.5),
transforms.RandomGrayscale(Xargs.RandomGrayscale),
transforms.ToTensor(),
transforms.Normalize(Xargs.Normalize_mean, Xargs.Normalize_std)
])
return all_transforms
# def cifar_train_transforms():
# all_transforms = transforms.Compose([
# transforms.RandomResizedCrop(32, scale=(0.2, 1.)),
# transforms.RandomHorizontalFlip(),
# transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),
# transforms.RandomGrayscale(p=0.2),
# transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
# ])
# return all_transforms
def cifar_test_transforms():
all_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
class CIFAR10C(datasets.CIFAR10):
def __init__(self, *args, **kwargs):
super(CIFAR10C, self).__init__(*args, **kwargs)
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
# return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
xi = self.transform(img)
xj = self.transform(img)
return xi, xj, target
loader_map = {
'CIFAR10C': CIFAR10C,
'CIFAR10': datasets.CIFAR10
}
num_class = {
'CIFAR10C': 10,
'CIFAR10': 10
}
class Loader(object):
def __init__(self, file_path, batch_size , sub_num, train_transform, test_transform, dataset_ident = 'CIFAR10C' , download = False, use_cuda =True):
train_dataset,test_dataset = self.get_dataset_train(loader_map[dataset_ident], file_path, download,
train_transform, test_transform)
subsize = int(50000 / (sub_num +1 ))
subsets_range = [range(i * subsize ,(i+1)*subsize ) for i in range(sub_num)]
subsets = [self.get_fix_part(train_dataset,i) for i in subsets_range]
kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}
self.train_loaders = [DataLoader(i, batch_size=batch_size, shuffle=True, **kwargs) for i in subsets]
self.test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
#tmp_batch = self.train_loader.__iter__().__next__()[0]
#self.img_shape = list(tmp_batch.size())[1:]
#self.num_class = num_class[dataset_ident]
@staticmethod
def get_dataset_train(dataset, file_path, download, train_transform, test_transform):
# Training and Validation datasets
train_dataset = dataset(file_path, train=True, download=download,
transform=train_transform)
test_dataset = dataset(file_path, train=False, download=download,
transform=test_transform)
return train_dataset,test_dataset
def get_fix_part(self,trainset,datarange):
return t.utils.data.Subset(trainset,datarange)
| 3,878 | 33.945946 | 154 | py |
GalaxyDataset | GalaxyDataset-master/autoencoder.py | # Numpy
import numpy as np
# Torch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
# Torchvision
import torchvision
import torchvision.transforms as transforms
# Matplotlib
# %matplotlib inline
import matplotlib.pyplot as plt
# OS
import os
import argparse
EPOCH = 100
# Set random seed for reproducibility
# SEED = 87
# np.random.seed(SEED)
# torch.manual_seed(SEED)
# if torch.cuda.is_available():
# torch.cuda.manual_seed(SEED)
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
def print_model(encoder, decoder):
print("============== Encoder ==============")
print(encoder)
print("============== Decoder ==============")
print(decoder)
print("")
# vgg net
vgg19 = torchvision.models.vgg19(pretrained=True)
vgg19_bn = torchvision.models.vgg19_bn(pretrained=True)
if torch.cuda.is_available():
vgg19 = vgg19.cuda()
vgg19_bn = vgg19_bn.cuda()
# 自定义loss
class VGG_loss(nn.Module):
def __init__(self):
super(VGG_loss, self).__init__()
def forward(self, x1, x2):
dis = torch.abs(x1-x2)
return torch.mean(torch.exp((-1.0)*dis))
loss_vgg = VGG_loss()
def create_model():
autoencoder = Autoencoder()
print_model(autoencoder.encoder, autoencoder.decoder)
if torch.cuda.is_available():
autoencoder = autoencoder.cuda()
print("Model moved to GPU in order to speed up training.")
return autoencoder
def get_torch_vars(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
def imshow(img):
npimg = img.cpu().numpy()
plt.axis('off')
plt.imshow(np.transpose(npimg, (1, 2, 0)))
if not os.path.exists('./imgs'):
os.mkdir('./imgs')
plt.savefig("./imgs/result.png")
plt.show()
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
# Input size: [batch, 3, 32, 32]
# Output size: [batch, 3, 32, 32]
self.encoder = nn.Sequential(
nn.Conv2d(3, 12, 4, stride=2, padding=1), # [batch, 12, 16, 16]
nn.ReLU(),
nn.Conv2d(12, 24, 4, stride=2, padding=1), # [batch, 24, 8, 8]
nn.ReLU(),
nn.Conv2d(24, 48, 4, stride=2, padding=1), # [batch, 48, 4, 4]
nn.ReLU(),
# nn.Conv2d(48, 96, 4, stride=2, padding=1), # [batch, 96, 2, 2]
# nn.ReLU(),
)
self.decoder = nn.Sequential(
# nn.ConvTranspose2d(96, 48, 4, stride=2, padding=1), # [batch, 48, 4, 4]
# nn.ReLU(),
nn.ConvTranspose2d(48, 24, 4, stride=2, padding=1), # [batch, 24, 8, 8]
nn.ReLU(),
nn.ConvTranspose2d(24, 12, 4, stride=2, padding=1), # [batch, 12, 16, 16]
nn.ReLU(),
nn.ConvTranspose2d(12, 3, 4, stride=2, padding=1), # [batch, 3, 32, 32]
nn.Sigmoid(),
)
def forward(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return encoded, decoded
def main():
parser = argparse.ArgumentParser(description="Train Autoencoder")
# parser.add_argument("--valid", action="store_true", default=False,
# help="Perform validation only.")
parser.add_argument("--valid", type=bool, default=False,
help="Perform validation only.")
args = parser.parse_args()
# Create model
autoencoder = create_model()
# Load data
transform = transforms.Compose(
[transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=16,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=16,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
if args.valid:
print("Loading checkpoint...")
autoencoder.load_state_dict(torch.load("./weights/autoencoder.pkl"))
dataiter = iter(testloader)
images, labels = dataiter.next()
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(16)))
imshow(torchvision.utils.make_grid(images))
images = Variable(images.cuda())
decoded_imgs = autoencoder(images)[1]
imshow(torchvision.utils.make_grid(decoded_imgs.data))
exit(0)
# Define an optimizer and criterion
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(autoencoder.parameters())
for epoch in range(EPOCH):
running_loss = 0.0
x_list = []
y_list = []
for i, (inputs, _) in enumerate(trainloader, 0):
inputs_x = get_torch_vars(inputs)
inputs_y = get_torch_vars(inputs)
# 循环两步之后
x_list.append(inputs_x)
y_list.append(inputs_y)
if len(x_list) != 2:
continue
# ============ Forward ============
encoded_1, outputs_1 = autoencoder(x_list[0])
encoded_2, outputs_2 = autoencoder(x_list[1])
loss1 = criterion(outputs_1, y_list[0])
loss2 = criterion(outputs_2, y_list[1])
vgg19_bn.eval()
x_list_0 = vgg19_bn(x_list[0])
x_list_1 = vgg19_bn(x_list[1])
loss3 = loss_vgg(x_list_0, x_list_1)
loss = loss1 + loss2 + loss3
# ============ Backward ============
optimizer.zero_grad()
loss.backward()
optimizer.step()
x_list = [] # 清空
y_list = [] # 清空
# ============ Logging ============
running_loss += loss.data
if i % 2000 == 1999:
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
print('Saving Model...')
if not os.path.exists('./weights'):
os.mkdir('./weights')
torch.save(autoencoder.state_dict(), "./weights/autoencoder.pkl")
if __name__ == '__main__':
main() | 6,675 | 31.565854 | 87 | py |
GalaxyDataset | GalaxyDataset-master/NEI.py | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.utils.data as Data
from preprocess import load_npy
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
# Torchvision
import torchvision
import torchvision.transforms as transforms
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
# Input size: [batch, 3, 32, 32]
# Output size: [batch, 3, 32, 32]
self.encoder = nn.Sequential(
nn.Conv2d(3, 12, 4, stride=2, padding=1), # [batch, 12, 16, 16]
nn.ReLU(),
nn.Conv2d(12, 24, 4, stride=2, padding=1), # [batch, 24, 8, 8]
nn.ReLU(),
nn.Conv2d(24, 48, 4, stride=2, padding=1), # [batch, 48, 4, 4]
nn.ReLU(),
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(48, 24, 4, stride=2, padding=1), # [batch, 24, 8, 8]
nn.ReLU(),
nn.ConvTranspose2d(24, 12, 4, stride=2, padding=1), # [batch, 12, 16, 16]
nn.ReLU(),
nn.ConvTranspose2d(12, 3, 4, stride=2, padding=1), # [batch, 3, 32, 32]
nn.Sigmoid(),
)
def forward(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return encoded, decoded
def get_torch_vars(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
EPOCH = 100
LR = 0.005
def train_AEnet(dataset):
# Load data
transform = transforms.Compose(
[transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=16,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=16,
shuffle=False, num_workers=2)
autoencoder = Autoencoder()
optimizer = torch.optim.Adam(autoencoder.parameters(), lr=LR)
loss_func = nn.CrossEntropyLoss()
for epoch in range(EPOCH):
for step, (x, b_label) in enumerate(trainloader):
inputs_x = get_torch_vars(x)
inputs_y = get_torch_vars(x)
encoded, decoded = autoencoder(inputs_x)
loss = loss_func(decoded, inputs_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step % 100 == 0:
print("Epoch: ", epoch, "| step: ", step, "| train loss: %.4f" % loss.data.numpy())
return autoencoder
def computing_NI(train_dataset, test_dataset, nums_classes):
# 1. 特征提取 2. 一阶矩 3. 二范式
autoencoder = train_AEnet(train_dataset)
train_encoded, _ = autoencoder(train_dataset)
test_encoded, _ = autoencoder(test_dataset)
normalize_data = F.normalize(train_dataset.concat(test_encoded), p=2, dim=1)
NI = torch.norm(torch.mean(train_encoded) - torch.mean(test_encoded) / (normalize_data), p=2)
return NI
if __name__ == "__main__":
# 1. 读取数据 2. 创建公式 3. 使用公式
train_loader, test_loader = load_npy("./cifar10/splitByLabelsWithNormalAndErrorDataset/SplitByLabels_3666_truck.npy")
NI = computing_NI(train_loader, test_loader, 10)
pass | 3,661 | 35.62 | 121 | py |
GalaxyDataset | GalaxyDataset-master/mnist_bias.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
import random, os, time, argparse, pickle
def mnist_image_raw2bias(image_raw, label, background, digit, id_1, id_2):
b = []
d = []
for i in range(8):
i_0 = i//4
i_1 = (i//2)%2
i_2 = i%2
b.append([i_0, i_1, i_2])
d.append([(i_0+0.5)/2, (i_1+0.5)/2, (i_2+0.5)/2])
image_bias = []
for i in image_raw:
for j in i:
if j == 0:
image_bias.append(b[background])
else:
j = ((j - 0.5) / 2).numpy().tolist() # [-0.25, 0.25]
image_bias.append([d[digit][0]+j, d[digit][1]+j, d[digit][2]+j])
im = torch.FloatTensor(image_bias)
im = im.reshape([28, 28, 3])
im = im.permute(2, 0, 1)
data = im
# trans = transforms.Compose([
# transforms.ToPILImage()
# ])
# im = trans(im)
# path = 'mnist_bias_eval/{}/'.format(label)
# if not os.path.exists(path):
# os.makedirs(path)
# im.save('mnist_bias_eval/{}/label={}_background={}_digit={}_id_1={}_id_2={}.jpg'.format(label, label, background, digit, id_1, id_2))
return (data, label, background, digit)
def mnist_process(path):
mnist_raw = datasets.MNIST(path, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
]))
mnist_list = []
mnist_bias = []
for i in range(10):
mnist_list.append([])
for ([image_raw], label) in mnist_raw:
mnist_list[label].append(([image_raw], label))
for i in range(10):
l = len(mnist_list[i])
num = l // 56
background_color = 0
digit_color = 0
for j in range(l):
([image_raw], label) = mnist_list[i][j]
if j % num == 0:
digit_color += 1
cnt = 0
if background_color == digit_color:
digit_color += 1
if digit_color == 8:
digit_color = 0
background_color += 1
if background_color == 8:
background_color = 7
cnt += 1
mnist_bias.append(mnist_image_raw2bias(image_raw, label, background_color, digit_color, cnt, j))
print(i, j)
print(len(mnist_bias))
f = open(path+'/'+'mnist_bias_train.pkl', 'wb')
pickle.dump(mnist_bias, f)
return mnist_bias
| 2,547 | 33.432432 | 139 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.