repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
DDoS
DDoS-master/train_DDoS.py
import argparse import logging import math import os import random import statistics import sys import numpy as np import torch import torch.autograd.profiler as profiler import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchio as tio from torch.cuda.amp import GradScaler, autocast from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm import wandb from models import * from models.ReconResNet import ResNet from models.ShuffleUNet.net import ShuffleUNet from models.ThisNewNet import ThisNewNet from utils.data import * from utils.datasets_dyn import SRDataset from utils.pLoss.perceptual_loss import PerceptualLoss from utils.utilities import getSSIM, tensorboard_images __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" modelIDs = { 0: "UNET", 1: "SRCNN", 2: "SRCNNv2", 3: "SRCNNv3", 4: "UNETvSeg", 5: "UNETvSegDS", 6: "DenseNet", 7: "UNETSRCNN", 8: "SRCNNUNET", 9: "ReconResNet", 10: "ShuffleUNet", 11: "UNETMSS", } lossIDs = { 0: "pLoss", 1: "MAE", 2: "MultiSSIM", 3: "SSIM3D" } def parseARGS(): ap = argparse.ArgumentParser() ap.add_argument("-g", "--gpu", default="0", help="GPU ID(s).") ap.add_argument("--seed", default=2020, type=int, help="Seed") # ap.add_argument("-ds", "--dataset", default=r'/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Data/CHAOSwoT2Dyn/newSet/', help="Path to Dataset Folder.") ap.add_argument("-ds", "--dataset", default=r'/home/schatter/Soumick/Data/Chimp/CHAOSwoT2Dyn/newSet/', help="Path to Dataset Folder.") ap.add_argument("-us", "--us", default='Center4MaskWoPad', help="Undersample.") ap.add_argument("-s", "--scalefact", default='(1,1,1)', help="Scaling Factor. For Zero padded data, set the dim to 1. [As a 3 valued tuple, factor for each dim. Supply seperated by coma or as a tuple, no spaces in between.].") ap.add_argument("-uf", "--usfolder", default='usTrain', help="Undersampled Folder.") ap.add_argument("-hf", "--hrfolder", default='hrTrain', help="HighRes (Fully-sampled) Folder.") ap.add_argument("-o", "--outfolder", default='dynDualChn', help="Output Folder.") ap.add_argument("-ms", "--modelsuffix", default='full', help="Any Suffix To Add with the Model Name.") ap.add_argument("-bs", "--batchsize", type=int, default=1, help="Batch Size.") ap.add_argument("-nw", "--nworkers", type=int, default=0, help="Number of Workers.") ap.add_argument("-cp", "--chkpoint", default=None, help="Checkpoint (of the current training) to Load.") ap.add_argument("-cpft", "--chkpointft", default=None, help="(To be used for Fine-Tuning) Checkpoint to Load for Fine-Tuning.") ap.add_argument("-c", "--cuda", type=bool, default=False, help="Use CUDA.") ap.add_argument("-mg", "--mulgpu", type=bool, default=False, help="Use Multiple GPU.") ap.add_argument("-amp", "--amp", type=bool, default=True, help="Use AMP.") ap.add_argument("-v", "--val", type=bool, default=True, help="Do Validation.") ap.add_argument("-vp", "--valdsper", type=float, default=0.3, help="Percentage of the DS to be used for Validation.") ap.add_argument("-p", "--profile", type=bool, default=False, help="Do Model Profiling.") ap.add_argument("-ep", "--epochs", type=int, default=100, help="Total Number of Epochs. To use Number of Iterations, set it to None") ap.add_argument("-it", "--iterations", type=int, default=1e6, help="Total Number of Iterations. To be used if number of Epochs is None") ap.add_argument("-lr", "--lr", type=float, default=1e-4, help="Total Number of Epochs.") ap.add_argument("-ps", "--patchsize", default=None, help="Patch Size. Supply seperated by coma or as a tuple, no spaces in between. Set it to None if not desired.") ap.add_argument("-pst", "--patchstride", default='(12,12,6)', help="Stride of patches, to be used during validation") ap.add_argument("-l", "--logfreq", type=int, default=10, help="log Frequency.") ap.add_argument("-sf", "--savefreq", type=int, default=1, help="saving Frequency.") ap.add_argument("-ml", "--medianloss", type=int, default=True, help="Use Median to get loss value (Final Reduction).") ap.add_argument("-mid", "--modelid", type=int, default=0, help="Model ID."+str(modelIDs)) ap.add_argument("-mbn", "--batchnorm", type=bool, default=False, help="(Only for Model ID 0, 11) Do BatchNorm.") ap.add_argument("-mum", "--upmode", default='upsample', help="(Only for Model ID 0, 11) UpMode for model ID 0 and 11: [upconv, upsample], for model ID 9: [convtrans, <interp algo>]") ap.add_argument("-mdp", "--mdepth", type=int, default=3, help="(Only for Model ID 0, 6, 11) Depth of the Model.") ap.add_argument("-d", "--dropprob", type=float, default=0.0, help="(Only for Model ID 0, 6, 11) Dropout Probability.") ap.add_argument("-inc", "--inchannel", type=int, default=2, help="Number of Channels in the Data.") ap.add_argument("-otc", "--outchannel", type=int, default=1, help="Number of Channels in the Data.") ap.add_argument("-mslvl", "--msslevel", type=int, default=1, help="(Only for Model ID 11) Depth of the Model.") ap.add_argument("-msltn", "--msslatent", type=int, default=0, help="(Only for Model ID 11) Use the latent as one of the MSS level.") ap.add_argument("-msup", "--mssup", default="trilinear", help="(Only for Model ID 11) Interpolation to use on the MSS levels.") ap.add_argument("-msinb4", "--mssinterpb4", type=int, default=1, help="(Only for Model ID 11) Apply Interpolation before applying conv for the MSS levels. If False, interp will be applied after conv.") ap.add_argument("-is", "--inshape", default='(256,256,30)', help="Input Shape. Supply seperated by coma or as a tuple, no spaces in between. Will only be used if Patch Size is None.") ap.add_argument("-f", "--nfeatures", type=int, default=64, help="(Not for DenseNet) N Starting Features of the Network.") ap.add_argument("-lid", "--lossid", type=int, default=0, help="Loss ID."+str(lossIDs)) ap.add_argument("-plt", "--plosstyp", default="L1", help="(Only for Loss ID 0) Perceptual Loss Type.") ap.add_argument("-pll", "--plosslvl", type=int, default=3, help="(Only for Loss ID 0) Perceptual Loss Level.") ap.add_argument("-lrd", "--lrdecrate", type=int, default=1, help="(To be used for Fine-Tuning) Factor by which lr will be divided to find the actual lr. Set it to 1 if not desired") ap.add_argument("-ft", "--finetune", type=int, default=0, help="Is it a Fine-tuning traing or not (main-train).") ap.add_argument("-ftep", "--fteprt", type=float, default=0.00, help="(To be used for Fine-Tuning) Fine-Tune Epoch Rate.") ap.add_argument("-ftit", "--ftitrt", type=float, default=0.10, help="(To be used for Fine-Tuning, if fteprt is None) Fine-Tune Iteration Rate.") ap.add_argument("-int", "--preint", default="trilinear", help="Pre-interpolate before sending it to the Network. Set it to None if not needed.") ap.add_argument("-nrm", "--prenorm", default=True, type=bool, help="Rescale intensities beteen 0 and 1") ap.add_argument("-tls", "--tnnlslc", type=int, default=2, help="Solo per ThisNewNet. loss_slice_count. Default 2") ap.add_argument("-tli", "--tnnlinp", type=int, default=1, help="Solo per ThisNewNet. loss_inplane. Default 1") #WnB related params ap.add_argument("-wnb", "--wnbactive", type=bool, default=True, help="WandB: Whether to use or not") ap.add_argument("-wnbp", "--wnbproject", default='SuperResMRI', help="WandB: Name of the project") ap.add_argument("-wnbe", "--wnbentity", default='mickchimp', help="WandB: Name of the entity") ap.add_argument("-wnbg", "--wnbgroup", default='dynDualChnFullVol', help="WandB: Name of the group") ap.add_argument("-wnbpf", "--wnbprefix", default='', help="WandB: Prefix for TrainID") ap.add_argument("-wnbml", "--wnbmodellog", default='all', help="WandB: While watching the model, what to save: gradients, parameters, all, None") ap.add_argument("-wnbmf", "--wnbmodelfreq", type=int, default=100, help="WandB: The number of steps between logging gradients") return ap.parse_args() args = parseARGS() # os.environ["TMPDIR"] = "/scratch/schatter/tmp" # os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu torch.set_num_threads(1) random.seed(args.seed) os.environ['PYTHONHASHSEED'] = str(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if __name__ == "__main__" : args.scalefact = tuple(map(int, args.scalefact.replace('(','').replace(')','').split(','))) args.homepath = os.path.expanduser("~/Documents") if args.patchsize: args.patchsize = tuple(map(int, args.patchsize.replace('(','').replace(')','').split(','))) if args.patchstride: args.patchstride = tuple(map(int, args.patchstride.replace('(','').replace(')','').split(','))) if args.inshape: args.inshape = tuple(map(int, args.inshape.replace('(','').replace(')','').split(','))) args.modelname = args.usfolder + "_" + modelIDs[args.modelid] + args.modelsuffix if args.modelid == 0 or args.modelid == 6 or args.modelid == 11: args.modelname += "do" + str(args.dropprob) + "dp" + str(args.mdepth) if args.modelid == 0 or args.modelid == 9 or args.modelid == 11: args.modelname += args.upmode if args.batchnorm: args.modelname += "BN" if args.modelid == 11: args.modelname += "MSS"+str(args.msslevel) args.modelname += "Latent" if args.msslatent else "NoLatent" args.modelname += args.mssup args.modelname += "InterpB4" if args.mssinterpb4 else "NoInterpB4" trainID = args.modelname + '_' + args.us + '_' + lossIDs[args.lossid] if args.lossid == 0: trainID += args.plosstyp + 'lvl' + str(args.plosslvl) if args.finetune: trainID += "_FT_lrdec" + str(args.lrdecrate) if args.fteprt: trainID += "_eprt" + str(args.fteprt) else: trainID += "_itrt" + str(args.ftitrt) print("Training: "+trainID) if args.modelid == 2: SRCNN3D = SRCNN3Dv2 elif args.modelid == 3: SRCNN3D = SRCNN3Dv3 if args.medianloss: loss_reducer = statistics.median else: loss_reducer = statistics.mean dir_path = args.dataset + args.usfolder+ '/' + args.us + '/' label_dir_path = args.dataset + args.hrfolder + '/' log_path = os.path.join(args.dataset, args.outfolder, 'TBLogs', trainID) save_path = os.path.join(args.dataset, args.outfolder, trainID) device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu") tb_writer = SummaryWriter(log_dir = log_path) os.makedirs(save_path, exist_ok=True) logname = os.path.join(args.homepath, 'log_'+trainID+'.txt') logging.basicConfig(filename=logname, filemode='a', format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s', datefmt='%H:%M:%S', level=logging.DEBUG) transforms = [] if not args.patchsize: transforms.append(tio.transforms.CropOrPad(target_shape=args.inshape)) trainDS = SRDataset(logger=logging, patch_size=args.patchsize[0] if args.patchsize else -1, dir_path=dir_path, label_dir_path=label_dir_path, #TODO: implement non-iso patch-size, now only using the first element stride_depth=args.patchstride[2], stride_length=args.patchstride[0], stride_width=args.patchstride[1], Size=None, fly_under_percent=None, #TODO: implement fly_under_percent, if needed patch_size_us=None, pre_interpolate=args.preint, norm_data=args.prenorm, pre_load=False, pad_patch=False) #TODO implement patch_size_us if required - patch_size//scaling_factor model_scale_factor=tuple(np.roll(args.scalefact,shift=1)) if args.val: train_size = int((1-args.valdsper) * len(trainDS)) val_size = len(trainDS) - train_size trainDS, valDS = torch.utils.data.random_split(trainDS, [train_size, val_size]) else: valDS = None if bool(args.patchsize): args.inshape = args.patchsize train_loader = DataLoader(dataset=trainDS, batch_size=args.batchsize,shuffle=True, num_workers=args.nworkers, pin_memory=True) val_loader = None if not args.val else DataLoader(dataset=valDS,batch_size=args.batchsize,shuffle=False, num_workers=args.nworkers, pin_memory=True) if args.modelid == 0: model = UNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)), batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob)) elif (args.modelid == 1) or (args.modelid == 2) or (args.modelid == 3): sys.exit("SRCNN3D is not ready for different numbers of input and output channel") model = SRCNN3D(n_channels=args.nchannel, scale_factor=model_scale_factor, num_features=args.nfeatures) elif (args.modelid == 4) or (args.modelid == 5): model = UNetVSeg(in_ch=args.inchannel, out_ch=args.outchannel, n1=args.nfeatures) elif args.modelid == 6: model = DenseNet(model_depth=args.mdepth, n_input_channels=args.inchannel, num_classes=args.outchannel, drop_rate=args.dropprob) elif (args.modelid == 7) or (args.modelid == 8): model = ThisNewNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob), scale_factor=model_scale_factor, num_features=args.nfeatures, sliceup_first=True if args.modelid==8 else False, loss_slice_count=args.tnnlslc, loss_inplane=args.tnnlinp) elif args.modelid == 9: model=ResNet(in_channels=args.inchannel, out_channels=args.outchannel, res_blocks=4, starting_nfeatures=args.nfeatures, updown_blocks=2, is_relu_leaky=True, #TODO: put all params as args do_batchnorm=args.batchnorm, res_drop_prob=0.2, is_replicatepad=0, out_act="sigmoid", forwardV=0, upinterp_algo='convtrans' if args.upmode == "upconv" else "trilinear", post_interp_convtrans=True, is3D=True) elif args.modelid == 10: model=ShuffleUNet(in_ch=args.inchannel, num_features=args.nfeatures, out_ch=args.outchannel) elif args.modelid == 11: model = UNetMSS(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)), batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob), mss_level=args.msslevel, mss_fromlatent=args.msslatent, mss_up=args.mssup, mss_interpb4=args.mssinterpb4) else: sys.exit("Invalid Model ID") if args.modelid == 5: IsDeepSup = True else: IsDeepSup = False if args.profile: dummy = torch.randn(args.batchsize, args.inchannel, *args.inshape) with profiler.profile(profile_memory=True, record_shapes=True, use_cuda=True) as prof: model(dummy) prof.export_chrome_trace(os.path.join(save_path, 'model_trace')) args.lr = args.lr/args.lrdecrate optimizer = optim.Adam(params=filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr) model.to(device) if args.lossid == 0: if args.outchannel != 1: sys.exit("Perceptual Loss used here only works for 1 channel images") loss_func = PerceptualLoss(device=device, loss_model="unet3Dds", resize=None, loss_type=args.plosstyp, n_level=args.plosslvl) elif args.lossid == 1: loss_func = nn.L1Loss(reduction='mean') elif args.lossid == 2: loss_func = MultiSSIM(data_range=1, n_channels=args.outchannel, reduction='mean').to(device) elif args.lossid == 3: loss_func = SSIM(data_range=1, channel=args.outchannel, spatial_dims=3).to(device) else: sys.exit("Invalid Loss ID") if (args.lossid == 0 and args.plosstyp == "L1") or (args.lossid == 1): IsNegLoss = False else: IsNegLoss = True if (args.modelid == 7) or (args.modelid == 8): model.loss_func = loss_func scaler = GradScaler(enabled=args.amp) if args.chkpoint: chk = torch.load(args.chkpoint, map_location=device) elif args.finetune: if args.chkpointft: chk = torch.load(args.chkpointft, map_location=device) else: sys.exit("Finetune can't be performed if chkpointft not supplied") else: chk = None start_epoch = 0 best_loss = float('-inf') if IsNegLoss else float('inf') if chk is not None: model.load_state_dict(chk['state_dict']) optimizer.load_state_dict(chk['optimizer']) scaler.load_state_dict(chk['AMPScaler']) best_loss = chk['best_loss'] start_epoch = chk['epoch'] + 1 iterations = chk['iterations'] main_train_epcoh = (chk['main_train_epoch'] + 1) if 'main_train_epoch' in chk else start_epoch #only be used for finetune if args.finetune: if args.fteprt: args.epochs = int((main_train_epcoh*(1+args.fteprt))) else: args.iterations = int(iterations*args.ftitrt) n_ft_ep = int(args.iterations // len(train_loader)) args.epochs = main_train_epcoh + n_ft_ep if args.epochs is None: args.epochs = int(args.iterations // len(train_loader) + 1) if start_epoch >= args.epochs: logging.error('Training should atleast be for one epoch. Adjusting to perform 1 epoch training') args.epochs = start_epoch+1 if not args.wnbactive: os.environ["WANDB_MODE"] = "dryrun" with wandb.init(project=args.wnbproject, entity=args.wnbentity, group=args.wnbgroup, config=args, name=args.wnbprefix+trainID, id=args.wnbprefix+trainID, resume=True) as WnBRun: wandb.watch(model, log=args.wnbmodellog, log_freq=args.wnbmodelfreq) logging.info('Training Epochs: from {0} to {1}'.format(start_epoch, args.epochs-1)) for epoch in range(start_epoch, args.epochs): #Train model.train() runningLoss = [] train_loss = [] print('Epoch '+ str(epoch)+ ': Train') for i, (images, gt) in enumerate(tqdm(train_loader)): images = images.to(device) gt = gt.to(device) with autocast(enabled=args.amp): if type(model) is SRCNN3D: output1, output2 = model(images) loss1 = loss_func(output1, gt) loss2 = loss_func(output2, gt) loss = loss2 + loss1 elif type(model) is UNetVSeg: if IsDeepSup: sys.exit("Not Implimented yet") else: out, _, _ = model(images) loss = loss_func(out, gt) elif type(model) is ThisNewNet: out, loss = model(images, gt=gt) elif type(model) is UNetMSS: out, mssout = model(images) loss = loss_func(out, gt) for mss in range(len(mssout)): loss += model.mss_coeff[mss] * loss_func(mssout[mss], gt) else: out = model(images) loss = loss_func(out, gt) if IsNegLoss: loss = -loss optimizer.zero_grad() scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() loss = round((-loss).data.item(),4) if IsNegLoss else round(loss.data.item(),4) train_loss.append(loss) runningLoss.append(loss) logging.info('[%d/%d][%d/%d] Train Loss: %.4f' % ((epoch+1), args.epochs, i, len(train_loader), loss)) del gt, out, loss torch.cuda.empty_cache() if i % args.logfreq == 0: niter = epoch*len(train_loader)+i tb_writer.add_scalar('Train/Loss', loss_reducer(runningLoss), niter) wandb.log({"Epoch":epoch, "TrainLoss":loss_reducer(runningLoss)})#, step=niter) # tensorboard_images(tb_writer, inp, out.detach(), gt, epoch, 'train') runningLoss = [] if args.finetune or (epoch % args.savefreq == 0): checkpoint = { 'epoch': epoch, 'iterations': (epoch+1)*len(train_loader), 'best_loss': best_loss, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'AMPScaler': scaler.state_dict() } torch.save(checkpoint, os.path.join(save_path, trainID+".pth.tar")) if args.modelid != 9 and args.modelid != 6: torch.onnx.export(model, images, trainID+".onnx", input_names=["HRPrevTP+LRCurrTP"], output_names=["SuperResolvedCurrTP"]) wandb.save(trainID+".onnx") del images tb_writer.add_scalar('Train/EpochLoss', loss_reducer(train_loss), epoch) wandb.log({"TrainEpochLoss":loss_reducer(train_loss)})#, step=epoch) torch.cuda.empty_cache() #Validate if val_loader: model.eval() with torch.no_grad(): runningLoss = [] val_loss = [] runningAcc = [] val_acc = [] print('Epoch '+ str(epoch)+ ': Val') for i, (images, gt) in enumerate(tqdm(val_loader)): images = images.to(device) gt = gt.to(device) with autocast(enabled=args.amp): if type(model) is SRCNN3D: output1, output2 = model(images) loss1 = loss_func(output1, gt) loss2 = loss_func(output2, gt) loss = loss2 + loss1 elif type(model) is UNetVSeg: if IsDeepSup: sys.exit("Not Implimented yet") else: out, _, _ = model(images) loss = loss_func(out, gt) elif type(model) is ThisNewNet: out, loss = model(images, gt=gt) else: out = model(images) loss = loss_func(out, gt) ssim = getSSIM(gt.detach().cpu().numpy(), out.detach().cpu().numpy(), data_range=1) loss = round((-loss).data.item(),4) if IsNegLoss else round(loss.data.item(),4) val_loss.append(loss) runningLoss.append(loss) val_acc.append(ssim) runningAcc.append(ssim) logging.info('[%d/%d][%d/%d] Val Loss: %.4f' % ((epoch+1), args.epochs, i, len(val_loader), loss)) del gt, out, loss torch.cuda.empty_cache() #For tensorboard if i % args.logfreq == 0: niter = epoch*len(val_loader)+i tb_writer.add_scalar('Val/Loss', loss_reducer(runningLoss), niter) wandb.log({"Epoch":epoch, "ValLoss":loss_reducer(runningLoss)})#, step=niter) tb_writer.add_scalar('Val/SSIM', loss_reducer(runningAcc), niter) wandb.log({"Epoch":epoch, "ValSSIM":loss_reducer(runningAcc)})#, step=niter) # tensorboard_images(tb_writer, inp, out.detach(), gt, epoch, 'val') runningLoss = [] runningAcc = [] if (loss_reducer(val_loss) < best_loss and not IsNegLoss) or (loss_reducer(val_loss) > best_loss and IsNegLoss): best_loss = loss_reducer(val_loss) WnBRun.summary["best_loss"] = best_loss checkpoint = { 'epoch': epoch, 'best_loss': best_loss, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'AMPScaler': scaler.state_dict() } torch.save(checkpoint, os.path.join(save_path, trainID+"_best.pth.tar")) if args.modelid != 9 and args.modelid != 6: torch.onnx.export(model, images, trainID+"_best.onnx", input_names=["HRPrevTP+LRCurrTP"], output_names=["SuperResolvedCurrTP"]) wandb.save(trainID+"_best.onnx") del images tb_writer.add_scalar('Val/EpochLoss', loss_reducer(val_loss), epoch) wandb.log({"ValEpochLoss":loss_reducer(val_loss)})#, step=epoch) tb_writer.add_scalar('Val/EpochSSIM', loss_reducer(val_acc), epoch) wandb.log({"ValEpochSSIM":loss_reducer(val_acc)})#, step=epoch) torch.cuda.empty_cache()
26,365
53.929167
230
py
DDoS
DDoS-master/models/unet3DMSS.py
# Adapted from https://discuss.pytorch.org/t/unet-implementation/426 import torch from torch import nn import torch.nn.functional as F import torchcomplex.nn.functional as cF __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" class UNetMSS(nn.Module): """ Implementation of U-Net: Convolutional Networks for Biomedical Image Segmentation (Ronneberger et al., 2015) https://arxiv.org/abs/1505.04597 Using the default arguments will yield the exact version used in the original paper Args: in_channels (int): number of input channels n_classes (int): number of output channels depth (int): depth of the network wf (int): number of filters in the first layer is 2**wf padding (bool): if True, apply padding such that the input shape is the same as the output. This may introduce artifacts batch_norm (bool): Use BatchNorm after layers with an activation function up_mode (str): one of 'upconv' or 'upsample'. 'upconv' will use transposed convolutions for learned upsampling. 'upsample' will use bilinear upsampling. """ def __init__(self, in_channels=1, n_classes=1, depth=3, wf=6, padding=True, batch_norm=False, up_mode='upconv', dropout=False, mss_level=2, mss_fromlatent=True, mss_up="trilinear", mss_interpb4=False): super(UNetMSS, self).__init__() assert up_mode in ('upconv', 'upsample') self.padding = padding self.depth = depth self.dropout = nn.Dropout3d() if dropout else nn.Sequential() prev_channels = in_channels self.down_path = nn.ModuleList() up_out_features = [] for i in range(depth): self.down_path.append(UNetConvBlock(prev_channels, 2**(wf+i), padding, batch_norm)) prev_channels = 2**(wf+i) if mss_fromlatent: mss_features = [prev_channels] else: mss_features = [] self.up_path = nn.ModuleList() for i in reversed(range(depth - 1)): self.up_path.append(UNetUpBlock(prev_channels, 2**(wf+i), up_mode, padding, batch_norm)) prev_channels = 2**(wf+i) up_out_features.append(prev_channels) self.last = nn.Conv3d(prev_channels, n_classes, kernel_size=1) mss_features += up_out_features[len(up_out_features)-1-mss_level if not mss_fromlatent else len(up_out_features)-1-mss_level+1:-1] self.mss_level = mss_level self.mss_up = mss_up self.mss_fromlatent = mss_fromlatent self.mss_interpb4 = mss_interpb4 self.mss_convs = nn.ModuleList() for i in range(self.mss_level): self.mss_convs.append(nn.Conv3d(mss_features[i], n_classes, kernel_size=1)) if self.mss_level == 1: self.mss_coeff = [0.5] else: lmbda = [] for i in range(self.mss_level-1, -1, -1): lmbda.append(2**i) self.mss_coeff = [] fact = 1.0 / sum(lmbda) for i in range(self.mss_level-1): self.mss_coeff.append(fact*lmbda[i]) self.mss_coeff.append(1.0 - sum(self.mss_coeff)) self.mss_coeff.reverse() def forward(self, x): blocks = [] for i, down in enumerate(self.down_path): x = down(x) if i != len(self.down_path)-1: blocks.append(x) x = F.avg_pool3d(x, 2) x = self.dropout(x) if self.mss_fromlatent: mss = [x] else: mss = [] for i, up in enumerate(self.up_path): x = up(x, blocks[-i-1]) if self.training and ((len(self.up_path)-1-i <= self.mss_level) and not(i+1 == len(self.up_path))): mss.append(x) if self.training: for i in range(len(mss)): if not self.mss_interpb4: mss[i] = F.interpolate(self.mss_convs[i](mss[i]), size=x.shape[2:], mode=self.mss_up) else: mss[i] = self.mss_convs[i](F.interpolate(mss[i], size=x.shape[2:], mode=self.mss_up)) return self.last(x), mss else: return self.last(x) class UNetConvBlock(nn.Module): def __init__(self, in_size, out_size, padding, batch_norm): super(UNetConvBlock, self).__init__() block = [] block.append(nn.Conv3d(in_size, out_size, kernel_size=3, padding=int(padding))) block.append(nn.ReLU()) if batch_norm: block.append(nn.BatchNorm3d(out_size)) block.append(nn.Conv3d(out_size, out_size, kernel_size=3, padding=int(padding))) block.append(nn.ReLU()) if batch_norm: block.append(nn.BatchNorm3d(out_size)) self.block = nn.Sequential(*block) def forward(self, x): out = self.block(x) return out class UNetUpBlock(nn.Module): def __init__(self, in_size, out_size, up_mode, padding, batch_norm): super(UNetUpBlock, self).__init__() if up_mode == 'upconv': self.up = nn.ConvTranspose3d(in_size, out_size, kernel_size=2, stride=2) elif up_mode == 'upsample': self.up = nn.Sequential(nn.Upsample(mode='trilinear', scale_factor=2), nn.Conv3d(in_size, out_size, kernel_size=1)) self.conv_block = UNetConvBlock(in_size, out_size, padding, batch_norm) def center_crop(self, layer, target_size): _, _, layer_depth, layer_height, layer_width = layer.size() diff_z = (layer_depth - target_size[0]) // 2 diff_y = (layer_height - target_size[1]) // 2 diff_x = (layer_width - target_size[2]) // 2 return layer[:, :, diff_z:(diff_z + target_size[0]), diff_y:(diff_y + target_size[1]), diff_x:(diff_x + target_size[2])] # _, _, layer_height, layer_width = layer.size() #for 2D data # diff_y = (layer_height - target_size[0]) // 2 # diff_x = (layer_width - target_size[1]) // 2 # return layer[:, :, diff_y:(diff_y + target_size[0]), diff_x:(diff_x + target_size[1])] def forward(self, x, bridge): up = self.up(x) # bridge = self.center_crop(bridge, up.shape[2:]) #sending shape ignoring 2 digit, so target size start with 0,1,2 up = F.interpolate(up, size=bridge.shape[2:], mode='trilinear') out = torch.cat([up, bridge], 1) out = self.conv_block(out) return out
7,232
38.961326
128
py
DDoS
DDoS-master/models/SRCNN3Dv3.py
import numpy as np import torch import torch.nn as nn __author__ = "Soumick Chatterjee, Geetha Doddapaneni Gopinath" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Geetha Doddapaneni Gopinath"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Under Testing" class SRCNN3Dv3(nn.Module): def __init__(self,n_channels=1,scale_factor=2,num_features=32,kernel_size=3,stride=1): super(SRCNN3Dv3, self).__init__() if type(scale_factor) is int: self.scale_factor=(scale_factor,scale_factor,scale_factor) else: self.scale_factor=scale_factor self.n_channels=n_channels self.conv_1 = nn.Sequential(nn.Conv3d(n_channels, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_2 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_3 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_4 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_5 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_6s = nn.ModuleList() self.conv_6s_post = nn.ModuleList() for i in range(len(self.scale_factor)): if self.scale_factor[i] > 1: out_features = np.prod(self.scale_factor[i:]) self.conv_6s.append(nn.Sequential(nn.Conv3d(num_features, out_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=out_features), nn.ReLU(inplace=True))) self.conv_6s_post.append(nn.Sequential(nn.Conv3d(out_features // self.scale_factor[i], num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True))) self.conv_7 = nn.Sequential(nn.Conv3d(n_channels, num_features, kernel_size, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_8 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_9 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_10 = nn.Sequential(nn.Conv3d(num_features, n_channels, kernel_size, padding=kernel_size // 2), nn.Sigmoid()) def forward(self, image): output_1 = self.conv_1(image) output_2 = self.conv_2(output_1) output_3a = self.conv_3(output_2) output_3 = torch.add(output_1, output_3a) #torch.mul(output_1, 1) = output_1 #Note for Geetha output_4 = self.conv_4(output_3) output_5a = self.conv_5(output_4) output_5 = torch.add(output_1, output_5a) output_6 = output_5 mod_ind = 0 for i in range(len(self.scale_factor)): if self.scale_factor[i] > 1: output_6 = self.conv_6s[mod_ind](output_6) suffled_size = list(output_6.shape) suffled_size[1] //= self.scale_factor[i] suffled_size[2+i] *= self.scale_factor[i] output_6 = output_6.view(suffled_size) if i+1 < len(self.scale_factor): output_6 = self.conv_6s_post[mod_ind](output_6) mod_ind += 1 output_7 = self.conv_7(output_6) output_8 = self.conv_8(output_7) output_9a = self.conv_9(output_8) output_9 = torch.add(output_7, output_9a) output = self.conv_10(output_9) # Final Loss return output_6, output if __name__ == "__main__": tensor = torch.rand((2, 1, 24, 16, 16)).cuda() model = SRCNN3Dv2(scale_factor=(2,4,3)).cuda() model(tensor) # model = SRCNN3D(1,num_features=64,scale_factor=(2,1,1)).cuda() # from torchsummary import summary # summary(model, input_size=(1, 32, 32, 32))
5,034
53.728261
165
py
DDoS
DDoS-master/models/densenet.py
# Source: https://github.com/kenshohara/3D-ResNets-PyTorch/blob/master/models/densenet.py # Paper Ref: https://arxiv.org/abs/2004.04968 from collections import OrderedDict import torch import torch.nn as nn import torch.nn.functional as F __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Untested" class _DenseLayer(nn.Sequential): def __init__(self, num_input_features, growth_rate, bn_size, drop_rate): super().__init__() self.add_module('norm1', nn.BatchNorm3d(num_input_features)) self.add_module('relu1', nn.ReLU(inplace=True)) self.add_module( 'conv1', nn.Conv3d(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)) self.add_module('norm2', nn.BatchNorm3d(bn_size * growth_rate)) self.add_module('relu2', nn.ReLU(inplace=True)) self.add_module( 'conv2', nn.Conv3d(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)) self.drop_rate = drop_rate def forward(self, x): new_features = super().forward(x) if self.drop_rate > 0: new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) return torch.cat([x, new_features], 1) class _DenseBlock(nn.Sequential): def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate): super().__init__() for i in range(num_layers): layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate) self.add_module('denselayer{}'.format(i + 1), layer) class _Transition(nn.Sequential): def __init__(self, num_input_features, num_output_features, no_pool=True): super().__init__() self.add_module('norm', nn.BatchNorm3d(num_input_features)) self.add_module('relu', nn.ReLU(inplace=True)) self.add_module( 'conv', nn.Conv3d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)) if not no_pool: self.add_module('pool', nn.AvgPool3d(kernel_size=2, stride=2)) class DenseNet(nn.Module): """Densenet-BC model class Args: growth_rate (int) - how many filters to add each layer (k in paper) block_config (list of 4 ints) - how many layers in each pooling block num_init_features (int) - the number of filters to learn in the first convolution layer bn_size (int) - multiplicative factor for number of bottle neck layers (i.e. bn_size * k features in the bottleneck layer) drop_rate (float) - dropout rate after each dense layer num_classes (int) - number of classification classes """ def __init__(self, n_input_channels=3, conv1_kernel=7, conv1_stride=1, no_pool=True, growth_rate=32, block_config=(6, 12, 24, 16), num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000): super().__init__() # First convolution self.model = [('conv1', nn.Conv3d(n_input_channels, num_init_features, kernel_size=conv1_kernel, stride=conv1_stride, padding=conv1_kernel // 2, bias=False)), ('norm1', nn.BatchNorm3d(num_init_features)), ('relu1', nn.ReLU(inplace=True))] if not no_pool: self.model.append( ('pool1', nn.MaxPool3d(kernel_size=3, stride=2, padding=1))) self.model = nn.Sequential(OrderedDict(self.model)) # Each denseblock num_features = num_init_features for i, num_layers in enumerate(block_config): block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate) self.model.add_module('denseblock{}'.format(i + 1), block) num_features = num_features + num_layers * growth_rate if i != len(block_config) - 1: trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2, no_pool=no_pool) self.model.add_module('transition{}'.format(i + 1), trans) num_features = num_features // 2 # Final batch norm self.model.add_module('norm5', nn.BatchNorm3d(num_features)) # Final fully connected layer self.model.add_module('finconv', nn.Conv3d(num_features, num_classes, kernel_size=1, stride=1, padding=0)) for m in self.modules(): if isinstance(m, nn.Conv3d): m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out') elif isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() for m in self.modules(): if isinstance(m, nn.Conv3d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm3d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def forward(self, x): return self.model(x) def generate_model(model_depth, **kwargs): assert model_depth in [121, 169, 201, 264] if model_depth == 121: model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), **kwargs) elif model_depth == 169: model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32), **kwargs) elif model_depth == 201: model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32), **kwargs) elif model_depth == 264: model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 64, 48), **kwargs) return model
7,339
37.631579
114
py
DDoS
DDoS-master/models/SRCNN3D.py
import numpy as np import torch import torch.nn as nn __author__ = "Soumick Chatterjee, Geetha Doddapaneni Gopinath" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Geetha Doddapaneni Gopinath"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Under Testing" class SRCNN3D(nn.Module): def __init__(self,n_channels=1,scale_factor=2,num_features=32,kernel_size=3,stride=1): super(SRCNN3D, self).__init__() if type(scale_factor) is int: self.scale_factor=(scale_factor,scale_factor,scale_factor) else: self.scale_factor=scale_factor # n_dim_upscale = 0 # for f in self.scale_factor: # if f > 1: # n_dim_upscale += 1 #This will only work for scale factor of 2 in any num of dims TODO # activation_maps = 2 ** n_dim_upscale self.n_channels=n_channels activation_maps = np.prod(self.scale_factor) self.conv_1 = nn.Sequential(nn.Conv3d(n_channels, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_2 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_3 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_4 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_5 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_6 = nn.Sequential(nn.Conv3d(num_features, activation_maps, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=activation_maps), nn.ReLU(inplace=True)) self.conv_7 = nn.Sequential(nn.Conv3d(n_channels, num_features, kernel_size, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_8 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_9 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_10 = nn.Sequential(nn.Conv3d(num_features, n_channels, kernel_size, padding=kernel_size // 2), nn.Sigmoid()) def forward(self, image): output_1 = self.conv_1(image) output_2 = self.conv_2(output_1) output_3a = self.conv_3(output_2) output_3 = torch.add(output_1, output_3a) #torch.mul(output_1, 1) = output_1 #Note for Geetha output_4 = self.conv_4(output_3) output_5a = self.conv_5(output_4) output_5 = torch.add(output_1, output_5a) output_6 = self.conv_6(output_5) suffled_size = tuple(np.multiply(output_6.shape[2:], self.scale_factor)) output_7 = output_6.view(output_6.shape[0], self.n_channels, *suffled_size) output_8 = self.conv_7(output_7) output_9 = self.conv_8(output_8) output_10a = self.conv_9(output_9) output_10 = torch.add(output_8, output_10a) output = self.conv_10(output_10) # Final Loss return output_7, output if __name__ == "__main__": tensor = torch.rand((2, 1, 24, 16, 16)).cuda() model = SRCNN3D(scale_factor=(2,1,3)).cuda() model(tensor) # model = SRCNN3D(1,num_features=64,scale_factor=(2,1,1)).cuda() # from torchsummary import summary # summary(model, input_size=(1, 32, 32, 32))
4,427
56.506494
125
py
DDoS
DDoS-master/models/brokenconv.py
import numpy as np import torch import torch.nn as nn __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Under Testing" class BrokenConvNd(nn.Module): def __init__(self, div_dim, learn_alpha=False, conv_layer=nn.Conv2d, **kwargs): super(BrokenConvNd, self).__init__() self.div_dim = div_dim self.n_conv = np.multiply(*div_dim) self.convs = nn.ModuleList() for _ in range(self.n_conv): self.convs.append(conv_layer(**kwargs)) if learn_alpha: self.alphas = nn.Parameter(data=torch.rand(self.n_conv)) else: self.alphas = [1]*self.n_conv def _split_tensorlist(self, tensor_list, split_size_or_sections, dim): split_tensor_list = [] for t in tensor_list: split_tensor_list += list(torch.split(t, split_size_or_sections=split_size_or_sections, dim=dim+2)) return split_tensor_list def _chunk_tensorlist(self, tensor_list, n_chunks, dim): split_tensor_list = [] for t in tensor_list: split_tensor_list += list(torch.chunk(t, chunks=n_chunks, dim=dim+2)) return split_tensor_list def _cat_tensorlist(self, split_tensor_list, n_split, dim): tensor_list = [] for i in range(0,len(split_tensor_list),n_split): tensor_list.append(torch.cat(split_tensor_list[i:i+n_split], dim=dim+2)) return tensor_list def forward(self, x): # dim = x.shape[2:] # dim_size = np.divide(dim, self.div_dim).astype(np.int) x = [x] for d in range(len(self.div_dim)): # x = self._split_tensorlist(x, split_size_or_sections=int(dim_size[d]), dim=d) x = self._chunk_tensorlist(x, n_chunks=int(self.div_dim[d]), dim=d) res = [] for i in range(self.n_conv): res.append(self.alphas[i] * self.convs[i](x[i])) for d in range(len(self.div_dim)-1,-1,-1): res = self._cat_tensorlist(res, n_split=int(self.div_dim[d]), dim=d) return res[0]
2,300
37.35
111
py
DDoS
DDoS-master/models/SRCNN3Dv2.py
import numpy as np import torch import torch.nn as nn __author__ = "Soumick Chatterjee, Geetha Doddapaneni Gopinath" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Geetha Doddapaneni Gopinath"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Under Testing" class SRCNN3Dv2(nn.Module): def __init__(self,n_channels=1,scale_factor=2,num_features=32,kernel_size=3,stride=1): super(SRCNN3Dv2, self).__init__() if type(scale_factor) is int: self.scale_factor=(scale_factor,scale_factor,scale_factor) else: self.scale_factor=scale_factor self.n_channels=n_channels self.conv_1 = nn.Sequential(nn.Conv3d(n_channels, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_2 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_3 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_4 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_5 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_6s = nn.ModuleList() in_features = num_features for i in range(len(self.scale_factor)): if self.scale_factor[i] > 1: out_features = np.prod(self.scale_factor[i:]) self.conv_6s.append(nn.Sequential(nn.Conv3d(in_features, out_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=out_features), nn.ReLU(inplace=True))) in_features = out_features // self.scale_factor[i] self.conv_7 = nn.Sequential(nn.Conv3d(n_channels, num_features, kernel_size, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_8 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_9 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_10 = nn.Sequential(nn.Conv3d(num_features, n_channels, kernel_size, padding=kernel_size // 2), nn.Sigmoid()) def forward(self, image): output_1 = self.conv_1(image) output_2 = self.conv_2(output_1) output_3a = self.conv_3(output_2) output_3 = torch.add(output_1, output_3a) #torch.mul(output_1, 1) = output_1 #Note for Geetha output_4 = self.conv_4(output_3) output_5a = self.conv_5(output_4) output_5 = torch.add(output_1, output_5a) output_6 = output_5 mod_ind = 0 for i in range(len(self.scale_factor)): if self.scale_factor[i] > 1: output_6 = self.conv_6s[mod_ind](output_6) mod_ind += 1 suffled_size = list(output_6.shape) suffled_size[1] //= self.scale_factor[i] suffled_size[2+i] *= self.scale_factor[i] output_6 = output_6.view(suffled_size) output_7 = self.conv_7(output_6) output_8 = self.conv_8(output_7) output_9a = self.conv_9(output_8) output_9 = torch.add(output_7, output_9a) output = self.conv_10(output_9) # Final Loss return output_6, output # if __name__ == "__main__": # tensor = torch.rand((2, 1, 24, 16, 16)).cuda() # model = SRCNN3Dv2(scale_factor=(2,1,3)).cuda() # model(tensor) # model = SRCNN3D(1,num_features=64,scale_factor=(2,1,1)).cuda() # from torchsummary import summary # summary(model, input_size=(1, 32, 32, 32))
4,709
51.921348
135
py
DDoS
DDoS-master/models/__init__.py
from models.unet3D import UNet from models.unet3DMSS import UNetMSS from models.SRCNN3D import SRCNN3D from models.SRCNN3Dv2 import SRCNN3Dv2 from models.SRCNN3Dv3 import SRCNN3Dv3 from models.unet3DvSeg_DeepSup import U_Net_DeepSup as UNetVSeg from models.densenet import generate_model as DenseNet from models.ThisNewNet import ThisNewNet from models.ReconResNet import ResNet from models.ShuffleUNet.net import ShuffleUNet
425
41.6
63
py
DDoS
DDoS-master/models/unet3D_DeepSup.py
# from __future__ import print_function, division import torch import torch.nn as nn import torch.utils.data __author__ = "Kartik Prabhu, Mahantesh Pattadkal, and Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Kartik Prabhu", "Mahantesh Pattadkal", "Soumick Chatterjee"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" class conv_block(nn.Module): """ Convolution Block """ def __init__(self, in_channels, out_channels, k_size=3, stride=1, padding=1, bias=True): super(conv_block, self).__init__() self.conv = nn.Sequential( nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=padding, bias=bias), nn.BatchNorm3d(num_features=out_channels), nn.ReLU(inplace=True), nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=padding, bias=bias), nn.BatchNorm3d(num_features=out_channels), nn.ReLU(inplace=True) ) def forward(self, x): x = self.conv(x) return x class up_conv(nn.Module): """ Up Convolution Block """ # def __init__(self, in_ch, out_ch): def __init__(self, in_channels, out_channels, k_size=3, stride=1, padding=1, bias=True): super(up_conv, self).__init__() self.up = nn.Sequential( nn.Upsample(scale_factor=2), nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=padding, bias=bias), nn.BatchNorm3d(num_features=out_channels), nn.ReLU(inplace=True)) def forward(self, x): x = self.up(x) return x class U_Net_DeepSup(nn.Module): """ UNet - Basic Implementation Input _ [batch * channel(# of channels of each image) * depth(# of frames) * height * width]. Paper : https://arxiv.org/abs/1505.04597 """ def __init__(self, in_ch=1, out_ch=1, n1=64): super(U_Net_DeepSup, self).__init__() filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16] # 64,128,256,512,1024 self.Maxpool1 = nn.MaxPool3d(kernel_size=2, stride=2) self.Maxpool2 = nn.MaxPool3d(kernel_size=2, stride=2) self.Maxpool3 = nn.MaxPool3d(kernel_size=2, stride=2) self.Maxpool4 = nn.MaxPool3d(kernel_size=2, stride=2) self.Conv1 = conv_block(in_ch, filters[0]) self.Conv2 = conv_block(filters[0], filters[1]) self.Conv3 = conv_block(filters[1], filters[2]) self.Conv4 = conv_block(filters[2], filters[3]) self.Conv5 = conv_block(filters[3], filters[4]) #1x1x1 Convolution for Deep Supervision self.Conv_d3 = conv_block(filters[1], 1) self.Conv_d4 = conv_block(filters[2], 1) self.Up5 = up_conv(filters[4], filters[3]) self.Up_conv5 = conv_block(filters[4], filters[3]) self.Up4 = up_conv(filters[3], filters[2]) self.Up_conv4 = conv_block(filters[3], filters[2]) self.Up3 = up_conv(filters[2], filters[1]) self.Up_conv3 = conv_block(filters[2], filters[1]) self.Up2 = up_conv(filters[1], filters[0]) self.Up_conv2 = conv_block(filters[1], filters[0]) self.Conv = nn.Conv3d(filters[0], out_ch, kernel_size=1, stride=1, padding=0) # self.active = torch.nn.Sigmoid() def forward(self, x): # print("unet") # print(x.shape) # print(padded.shape) e1 = self.Conv1(x) # print("conv1:") # print(e1.shape) e2 = self.Maxpool1(e1) e2 = self.Conv2(e2) # print("conv2:") # print(e2.shape) e3 = self.Maxpool2(e2) e3 = self.Conv3(e3) # print("conv3:") # print(e3.shape) e4 = self.Maxpool3(e3) e4 = self.Conv4(e4) # print("conv4:") # print(e4.shape) e5 = self.Maxpool4(e4) e5 = self.Conv5(e5) # print("conv5:") # print(e5.shape) d5 = self.Up5(e5) # print("d5:") # print(d5.shape) # print("e4:") # print(e4.shape) d5 = torch.cat((e4, d5), dim=1) d5 = self.Up_conv5(d5) # print("upconv5:") # print(d5.size) d4 = self.Up4(d5) # print("d4:") # print(d4.shape) d4 = torch.cat((e3, d4), dim=1) d4 = self.Up_conv4(d4) d4_out = self.Conv_d4(d4) # print("upconv4:") # print(d4.shape) d3 = self.Up3(d4) d3 = torch.cat((e2, d3), dim=1) d3 = self.Up_conv3(d3) d3_out = self.Conv_d3(d3) # print("upconv3:") # print(d3.shape) d2 = self.Up2(d3) d2 = torch.cat((e1, d2), dim=1) d2 = self.Up_conv2(d2) # print("upconv2:") # print(d2.shape) out = self.Conv(d2) # print("out:") # print(out.shape) # d1 = self.active(out) return [out, d3_out , d4_out]
5,263
29.783626
110
py
DDoS
DDoS-master/models/ThisNewNet.py
import math import torch.nn as nn from models import * __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Under Testing" class ThisNewNet(nn.Module): def __init__(self, scale_factor, loss_func=None, in_channels=1, n_classes=1, depth=3, batch_norm=False, up_mode="upsample", dropout=0.0, num_features=64, sliceup_first=False, loss_slice_count=2, loss_inplane=True): super(ThisNewNet, self).__init__() self.in_plane_upsampler = UNet(in_channels=in_channels, n_classes=n_classes, depth=depth, wf=round(math.log(num_features,2)), batch_norm=batch_norm, up_mode=up_mode, dropout=dropout) self.slice_upsampler = SRCNN3D(n_channels=in_channels, scale_factor=scale_factor, num_features=num_features) self.sliceup_first = sliceup_first self.loss_func = loss_func self.scale_factor = scale_factor self.loss_slice_count = loss_slice_count self.loss_inplane = loss_inplane def forward(self, images, gt=None): if self.sliceup_first: _, up_images = self.slice_upsampler(images) output = self.in_plane_upsampler(up_images) else: up_images = self.in_plane_upsampler(images) aux_out, output = self.slice_upsampler(up_images) if gt is None or self.loss_func is None: return output else: if self.sliceup_first: loss = self.loss_func(output, gt) else: in_plane_loss = self.loss_func(up_images, gt[:,:,::self.scale_factor[0],...]) #unet loss slice_aux_loss = self.loss_func(aux_out, gt) #aux srcnn loss slice_main_loss = self.loss_func(output, gt) #srcnn loss loss = slice_main_loss if self.loss_inplane: loss += in_plane_loss if self.loss_slice_count > 1: loss += slice_aux_loss # loss = in_plane_loss + slice_aux_loss + slice_main_loss return output, loss
2,283
46.583333
218
py
DDoS
DDoS-master/models/unet3D.py
# Adapted from https://discuss.pytorch.org/t/unet-implementation/426 import torch from torch import nn import torch.nn.functional as F __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" class UNet(nn.Module): """ Implementation of U-Net: Convolutional Networks for Biomedical Image Segmentation (Ronneberger et al., 2015) https://arxiv.org/abs/1505.04597 Using the default arguments will yield the exact version used in the original paper Args: in_channels (int): number of input channels n_classes (int): number of output channels depth (int): depth of the network wf (int): number of filters in the first layer is 2**wf padding (bool): if True, apply padding such that the input shape is the same as the output. This may introduce artifacts batch_norm (bool): Use BatchNorm after layers with an activation function up_mode (str): one of 'upconv' or 'upsample'. 'upconv' will use transposed convolutions for learned upsampling. 'upsample' will use bilinear upsampling. """ def __init__(self, in_channels=1, n_classes=1, depth=3, wf=6, padding=True, batch_norm=False, up_mode='upconv', dropout=False): super(UNet, self).__init__() assert up_mode in ('upconv', 'upsample') self.padding = padding self.depth = depth self.dropout = nn.Dropout3d() if dropout else nn.Sequential() prev_channels = in_channels self.down_path = nn.ModuleList() for i in range(depth): self.down_path.append(UNetConvBlock(prev_channels, 2**(wf+i), padding, batch_norm)) prev_channels = 2**(wf+i) self.up_path = nn.ModuleList() for i in reversed(range(depth - 1)): self.up_path.append(UNetUpBlock(prev_channels, 2**(wf+i), up_mode, padding, batch_norm)) prev_channels = 2**(wf+i) self.last = nn.Conv3d(prev_channels, n_classes, kernel_size=1) def forward(self, x): blocks = [] for i, down in enumerate(self.down_path): x = down(x) if i != len(self.down_path)-1: blocks.append(x) x = F.avg_pool3d(x, 2) x = self.dropout(x) for i, up in enumerate(self.up_path): x = up(x, blocks[-i-1]) return self.last(x) class UNetConvBlock(nn.Module): def __init__(self, in_size, out_size, padding, batch_norm): super(UNetConvBlock, self).__init__() block = [] block.append(nn.Conv3d(in_size, out_size, kernel_size=3, padding=int(padding))) block.append(nn.ReLU()) if batch_norm: block.append(nn.BatchNorm3d(out_size)) block.append(nn.Conv3d(out_size, out_size, kernel_size=3, padding=int(padding))) block.append(nn.ReLU()) if batch_norm: block.append(nn.BatchNorm3d(out_size)) self.block = nn.Sequential(*block) def forward(self, x): out = self.block(x) return out class UNetUpBlock(nn.Module): def __init__(self, in_size, out_size, up_mode, padding, batch_norm): super(UNetUpBlock, self).__init__() if up_mode == 'upconv': self.up = nn.ConvTranspose3d(in_size, out_size, kernel_size=2, stride=2) elif up_mode == 'upsample': self.up = nn.Sequential(nn.Upsample(mode='trilinear', scale_factor=2), nn.Conv3d(in_size, out_size, kernel_size=1)) self.conv_block = UNetConvBlock(in_size, out_size, padding, batch_norm) def center_crop(self, layer, target_size): _, _, layer_depth, layer_height, layer_width = layer.size() diff_z = (layer_depth - target_size[0]) // 2 diff_y = (layer_height - target_size[1]) // 2 diff_x = (layer_width - target_size[2]) // 2 return layer[:, :, diff_z:(diff_z + target_size[0]), diff_y:(diff_y + target_size[1]), diff_x:(diff_x + target_size[2])] # _, _, layer_height, layer_width = layer.size() #for 2D data # diff_y = (layer_height - target_size[0]) // 2 # diff_x = (layer_width - target_size[1]) // 2 # return layer[:, :, diff_y:(diff_y + target_size[0]), diff_x:(diff_x + target_size[1])] def forward(self, x, bridge): up = self.up(x) # bridge = self.center_crop(bridge, up.shape[2:]) #sending shape ignoring 2 digit, so target size start with 0,1,2 up = F.interpolate(up, size=bridge.shape[2:], mode='trilinear') out = torch.cat([up, bridge], 1) out = self.conv_block(out) return out
5,245
38.443609
128
py
DDoS
DDoS-master/models/ReconResNet.py
#!/usr/bin/env python import torch.nn as nn from tricorder.torch.transforms import Interpolator __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Soumick Chatterjee & OvGU:ESF:MEMoRIAL" __credits__ = ["Soumick Chatterjee"] __license__ = "GPL" __version__ = "1.0.0" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Under Testing" class ResidualBlock(nn.Module): def __init__(self, in_features, drop_prob=0.2): super(ResidualBlock, self).__init__() conv_block = [layer_pad(1), layer_conv(in_features, in_features, 3), layer_norm(in_features), act_relu(), layer_drop(p=drop_prob, inplace=True), layer_pad(1), layer_conv(in_features, in_features, 3), layer_norm(in_features)] self.conv_block = nn.Sequential(*conv_block) def forward(self, x): return x + self.conv_block(x) class DownsamplingBlock(nn.Module): def __init__(self, in_features, out_features): super(DownsamplingBlock, self).__init__() conv_block = [layer_conv(in_features, out_features, 3, stride=2, padding=1), layer_norm(out_features), act_relu()] self.conv_block = nn.Sequential(*conv_block) def forward(self, x): return self.conv_block(x) class UpsamplingBlock(nn.Module): def __init__(self, in_features, out_features, mode="convtrans", interpolator=None, post_interp_convtrans=False): super(UpsamplingBlock, self).__init__() self.interpolator = interpolator self.mode = mode self.post_interp_convtrans = post_interp_convtrans if self.post_interp_convtrans: self.post_conv = layer_conv(out_features, out_features, 1) if mode == "convtrans": conv_block = [layer_convtrans( in_features, out_features, 3, stride=2, padding=1, output_padding=1), ] else: conv_block = [layer_pad(1), layer_conv(in_features, out_features, 3), ] conv_block += [layer_norm(out_features), act_relu()] self.conv_block = nn.Sequential(*conv_block) def forward(self, x, out_shape=None): if self.mode == "convtrans": if self.post_interp_convtrans: x = self.conv_block(x) if x.shape[2:] != out_shape: return self.post_conv(self.interpolator(x, out_shape)) else: return x else: return self.conv_block(x) else: return self.conv_block(self.interpolator(x, out_shape)) class ResNet(nn.Module): def __init__(self, in_channels=1, out_channels=1, res_blocks=14, starting_nfeatures=64, updown_blocks=2, is_relu_leaky=True, do_batchnorm=False, res_drop_prob=0.2, is_replicatepad=0, out_act="sigmoid", forwardV=0, upinterp_algo='convtrans', post_interp_convtrans=False, is3D=False): # should use 14 as that gives number of trainable parameters close to number of possible pixel values in a image 256x256 super(ResNet, self).__init__() layers = {} if is3D: layers["layer_conv"] = nn.Conv3d layers["layer_convtrans"] = nn.ConvTranspose3d if do_batchnorm: layers["layer_norm"] = nn.BatchNorm3d else: layers["layer_norm"] = nn.InstanceNorm3d layers["layer_drop"] = nn.Dropout3d if is_replicatepad == 0: layers["layer_pad"] = nn.ReflectionPad3d elif is_replicatepad == 1: layers["layer_pad"] = nn.ReplicationPad3d layers["interp_mode"] = 'trilinear' else: layers["layer_conv"] = nn.Conv2d layers["layer_convtrans"] = nn.ConvTranspose2d if do_batchnorm: layers["layer_norm"] = nn.BatchNorm2d else: layers["layer_norm"] = nn.InstanceNorm2d layers["layer_drop"] = nn.Dropout2d if is_replicatepad == 0: layers["layer_pad"] = nn.ReflectionPad2d elif is_replicatepad == 1: layers["layer_pad"] = nn.ReplicationPad2d layers["interp_mode"] = 'bilinear' if is_relu_leaky: layers["act_relu"] = nn.PReLU else: layers["act_relu"] = nn.ReLU globals().update(layers) self.forwardV = forwardV self.upinterp_algo = upinterp_algo interpolator = Interpolator( mode=layers["interp_mode"] if self.upinterp_algo == "convtrans" else self.upinterp_algo) # Initial convolution block intialConv = [layer_pad(3), layer_conv(in_channels, starting_nfeatures, 7), layer_norm(starting_nfeatures), act_relu()] # Downsampling [need to save the shape for upsample] downsam = [] in_features = starting_nfeatures out_features = in_features*2 for _ in range(updown_blocks): downsam.append(DownsamplingBlock(in_features, out_features)) in_features = out_features out_features = in_features*2 # Residual blocks resblocks = [] for _ in range(res_blocks): resblocks += [ResidualBlock(in_features, res_drop_prob)] # Upsampling upsam = [] out_features = in_features//2 for _ in range(updown_blocks): upsam.append(UpsamplingBlock(in_features, out_features, self.upinterp_algo, interpolator, post_interp_convtrans)) in_features = out_features out_features = in_features//2 # Output layer finalconv = [layer_pad(3), layer_conv(starting_nfeatures, out_channels, 7), ] if out_act == "sigmoid": finalconv += [nn.Sigmoid(), ] elif out_act == "relu": finalconv += [act_relu(), ] elif out_act == "tanh": finalconv += [nn.Tanh(), ] self.intialConv = nn.Sequential(*intialConv) self.downsam = nn.ModuleList(downsam) self.resblocks = nn.Sequential(*resblocks) self.upsam = nn.ModuleList(upsam) self.finalconv = nn.Sequential(*finalconv) if self.forwardV == 0: self.forward = self.forwardV0 elif self.forwardV == 1: self.forward = self.forwardV1 elif self.forwardV == 2: self.forward = self.forwardV2 elif self.forwardV == 3: self.forward = self.forwardV3 elif self.forwardV == 4: self.forward = self.forwardV4 elif self.forwardV == 5: self.forward = self.forwardV5 def forwardV0(self, x): # v0: Original Version x = self.intialConv(x) shapes = [] for downblock in self.downsam: shapes.append(x.shape[2:]) x = downblock(x) x = self.resblocks(x) for i, upblock in enumerate(self.upsam): x = upblock(x, shapes[-1-i]) return self.finalconv(x) def forwardV1(self, x): # v1: input is added to the final output out = self.intialConv(x) shapes = [] for downblock in self.downsam: shapes.append(out.shape[2:]) out = downblock(out) out = self.resblocks(out) for i, upblock in enumerate(self.upsam): out = upblock(out, shapes[-1-i]) return x + self.finalconv(out) def forwardV2(self, x): # v2: residual of v1 + input to the residual blocks added back with the output out = self.intialConv(x) shapes = [] for downblock in self.downsam: shapes.append(out.shape[2:]) out = downblock(out) out = out + self.resblocks(out) for i, upblock in enumerate(self.upsam): out = upblock(out, shapes[-1-i]) return x + self.finalconv(out) def forwardV3(self, x): # v3: residual of v2 + input of the initial conv added back with the output out = x + self.intialConv(x) shapes = [] for downblock in self.downsam: shapes.append(out.shape[2:]) out = downblock(out) out = out + self.resblocks(out) for i, upblock in enumerate(self.upsam): out = upblock(out, shapes[-1-i]) return x + self.finalconv(out) def forwardV4(self, x): # v4: residual of v3 + output of the initial conv added back with the input of final conv iniconv = x + self.intialConv(x) shapes = [] if len(self.downsam) > 0: for i, downblock in enumerate(self.downsam): if i == 0: shapes.append(iniconv.shape[2:]) out = downblock(iniconv) else: shapes.append(out.shape[2:]) out = downblock(out) else: out = iniconv out = out + self.resblocks(out) for i, upblock in enumerate(self.upsam): out = upblock(out, shapes[-1-i]) out = iniconv + out return x + self.finalconv(out) def forwardV5(self, x): # v5: residual of v4 + individual down blocks with individual up blocks outs = [x + self.intialConv(x)] shapes = [] for i, downblock in enumerate(self.downsam): shapes.append(outs[-1].shape[2:]) outs.append(downblock(outs[-1])) outs[-1] = outs[-1] + self.resblocks(outs[-1]) for i, upblock in enumerate(self.upsam): outs[-1] = upblock(outs[-1], shapes[-1-i]) outs[-1] = outs[-2] + outs.pop() return x + self.finalconv(outs.pop())
9,909
36.537879
257
py
DDoS
DDoS-master/models/unet3DvSeg_DeepSup.py
# from __future__ import print_function, division import torch import torch.nn as nn import torch.utils.data __author__ = "Kartik Prabhu, Mahantesh Pattadkal, and Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Kartik Prabhu", "Mahantesh Pattadkal", "Soumick Chatterjee"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" class conv_block(nn.Module): """ Convolution Block """ def __init__(self, in_channels, out_channels, k_size=3, stride=1, padding=1, bias=True): super(conv_block, self).__init__() self.conv = nn.Sequential( nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=padding, bias=bias), nn.BatchNorm3d(num_features=out_channels), nn.ReLU(inplace=True), nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=padding, bias=bias), nn.BatchNorm3d(num_features=out_channels), nn.ReLU(inplace=True) ) def forward(self, x): x = self.conv(x) return x class up_conv(nn.Module): """ Up Convolution Block """ # def __init__(self, in_ch, out_ch): def __init__(self, in_channels, out_channels, k_size=3, stride=1, padding=1, bias=True): super(up_conv, self).__init__() self.up = nn.Sequential( nn.Upsample(scale_factor=2), nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=padding, bias=bias), nn.BatchNorm3d(num_features=out_channels), nn.ReLU(inplace=True)) def forward(self, x): x = self.up(x) return x class U_Net_DeepSup(nn.Module): """ UNet - Basic Implementation Input _ [batch * channel(# of channels of each image) * depth(# of frames) * height * width]. Paper : https://arxiv.org/abs/1505.04597 """ def __init__(self, in_ch=1, out_ch=1, n1=64): super(U_Net_DeepSup, self).__init__() filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16] # 64,128,256,512,1024 self.Maxpool1 = nn.MaxPool3d(kernel_size=2, stride=2) self.Maxpool2 = nn.MaxPool3d(kernel_size=2, stride=2) self.Maxpool3 = nn.MaxPool3d(kernel_size=2, stride=2) self.Maxpool4 = nn.MaxPool3d(kernel_size=2, stride=2) self.Conv1 = conv_block(in_ch, filters[0]) self.Conv2 = conv_block(filters[0], filters[1]) self.Conv3 = conv_block(filters[1], filters[2]) self.Conv4 = conv_block(filters[2], filters[3]) self.Conv5 = conv_block(filters[3], filters[4]) #1x1x1 Convolution for Deep Supervision self.Conv_d3 = conv_block(filters[1], 1) self.Conv_d4 = conv_block(filters[2], 1) self.Up5 = up_conv(filters[4], filters[3]) self.Up_conv5 = conv_block(filters[4], filters[3]) self.Up4 = up_conv(filters[3], filters[2]) self.Up_conv4 = conv_block(filters[3], filters[2]) self.Up3 = up_conv(filters[2], filters[1]) self.Up_conv3 = conv_block(filters[2], filters[1]) self.Up2 = up_conv(filters[1], filters[0]) self.Up_conv2 = conv_block(filters[1], filters[0]) self.Conv = nn.Conv3d(filters[0], out_ch, kernel_size=1, stride=1, padding=0) # self.active = torch.nn.Sigmoid() def forward(self, x): # print("unet") # print(x.shape) # print(padded.shape) e1 = self.Conv1(x) # print("conv1:") # print(e1.shape) e2 = self.Maxpool1(e1) e2 = self.Conv2(e2) # print("conv2:") # print(e2.shape) e3 = self.Maxpool2(e2) e3 = self.Conv3(e3) # print("conv3:") # print(e3.shape) e4 = self.Maxpool3(e3) e4 = self.Conv4(e4) # print("conv4:") # print(e4.shape) e5 = self.Maxpool4(e4) e5 = self.Conv5(e5) # print("conv5:") # print(e5.shape) d5 = self.Up5(e5) # print("d5:") # print(d5.shape) # print("e4:") # print(e4.shape) d5 = torch.cat((e4, d5), dim=1) d5 = self.Up_conv5(d5) # print("upconv5:") # print(d5.size) d4 = self.Up4(d5) # print("d4:") # print(d4.shape) d4 = torch.cat((e3, d4), dim=1) d4 = self.Up_conv4(d4) d4_out = self.Conv_d4(d4) # print("upconv4:") # print(d4.shape) d3 = self.Up3(d4) d3 = torch.cat((e2, d3), dim=1) d3 = self.Up_conv3(d3) d3_out = self.Conv_d3(d3) # print("upconv3:") # print(d3.shape) d2 = self.Up2(d3) d2 = torch.cat((e1, d2), dim=1) d2 = self.Up_conv2(d2) # print("upconv2:") # print(d2.shape) out = self.Conv(d2) # print("out:") # print(out.shape) # d1 = self.active(out) return [out, d3_out , d4_out]
5,263
29.783626
110
py
DDoS
DDoS-master/models/srVAE/srVAE.py
from functools import partial import numpy as np import torch import torch.nn as nn from torchvision import transforms from .backbone.densenet16x32 import * from .priors.realnvp import RealNVP # --------- Utility functions --------- def get_shape(z_dim): """ Given the dimentionality of the latent space, re-shape it to an appropriate 3-D tensor. """ d = 8 if (z_dim%d==0) and (z_dim // (d*d) > 0): # cx8x8 H = W = d C = z_dim // (d*d) return (C, H, W) raise "Latent space can not mapped to a 3-D tensor. \ Please choose another dimentionality (power of 2)." # ----- Two Staged VAE ----- class srVAE(nn.Module): """ Super-Resolution Variational Auto-Encoder (srVAE). A Two Staged Visual Processing Variational AutoEncoder. Author: Ioannis Gatopoulos. """ def __init__(self, x_shape, y_shape=(3, 16, 16), u_dim=args.u_dim, z_dim=args.z_dim, prior=args.prior, device="cuda"): super().__init__() self.device = device self.x_shape = x_shape self.y_shape = (x_shape[0], y_shape[1], y_shape[2]) self.u_shape = get_shape(u_dim) self.z_shape = get_shape(z_dim) # q(y|x): deterministic "compressed" transformation self.compressed_transform = transforms.Compose([ transforms.ToPILImage(), transforms.Resize((self.y_shape[1], self.y_shape[2])), transforms.ToTensor() ]) # p(u) self.p_u = RealNVP(self.u_shape) # q(u | y) self.q_u = q_u(self.u_shape, self.y_shape) # p(z | y) self.p_z = p_z(self.z_shape, (self.y_shape, self.u_shape)) # q(z | x) self.q_z = q_z(self.z_shape, self.x_shape) # p(y | u) self.p_y = p_y(self.y_shape, self.u_shape) # p(x | y, z) self.p_x = p_x(self.x_shape, (self.y_shape, self.z_shape)) # likelihood distribution self.recon_loss = partial(dmol_loss) self.sample_distribution = partial(sample_from_dmol) def compressed_transoformation(self, input): y = [] for x in input: y.append(self.compressed_transform(x.cpu())) return torch.stack(y).to(self.device) def initialize(self, dataloader): """ Data dependent init for weight normalization (Automatically done during the first forward pass). """ with torch.no_grad(): x, _ = next(iter(dataloader)) x = x.to(self.device) output = self.forward(x) self.calculate_elbo(x, output) return @staticmethod def reparameterize(z_mean, z_log_var): """ z ~ N(z| z_mu, z_logvar) """ epsilon = torch.randn_like(z_mean) return z_mean + torch.exp(0.5*z_log_var)*epsilon @torch.no_grad() def generate(self, n_samples=20): # u ~ p(u) u = self.p_u.sample(self.u_shape, n_samples=n_samples, device=self.device).to(self.device) # p(y|u) y_logits = self.p_y(u) y_hat = self.sample_distribution(y_logits, nc=self.y_shape[0]) # z ~ p(z|y, u) z_p_mean, z_p_logvar = self.p_z((y_hat, u)) z_p = self.reparameterize(z_p_mean, z_p_logvar) # x ~ p(x|y,z) x_logits = self.p_x((y_hat, z_p)) x_hat = self.sample_distribution(x_logits, nc=self.x_shape[0]) return x_hat, y_hat @torch.no_grad() def reconstruct(self, x, **kwargs): outputs = self.forward(x) y_hat = self.sample_distribution(outputs.get('y_logits'), nc=self.y_shape[0]) x_hat = self.sample_distribution(outputs.get('x_logits'), nc=self.x_shape[0]) return outputs.get('y'), y_hat, x_hat @torch.no_grad() def super_resolution(self, y): # u ~ q(u| y) u_q_mean, u_q_logvar = self.q_u(y) u_q = self.reparameterize(u_q_mean, u_q_logvar) # z ~ p(z|y) z_p_mean, z_p_logvar = self.p_z((y, u_q)) z_p = self.reparameterize(z_p_mean, z_p_logvar) # x ~ p(x|y,z) x_logits = self.p_x((y, z_p)) x_hat = self.sample_distribution(x_logits) return x_hat def calculate_elbo(self, x, outputs, **kwargs): # unpack variables y, x_logits, y_logits = outputs.get('y'), outputs.get('x_logits'), outputs.get('y_logits') u_q, u_q_mean, u_q_logvar = outputs.get('u_q'), outputs.get('u_q_mean'), outputs.get('u_q_logvar') z_q, z_q_mean, z_q_logvar = outputs.get('z_q'), outputs.get('z_q_mean'), outputs.get('z_q_logvar') z_p_mean, z_p_logvar = outputs.get('z_p_mean'), outputs.get('z_p_logvar') # Reconstraction loss RE_x = self.recon_loss(x, x_logits, nc=self.x_shape[0]) RE_y = self.recon_loss(y, y_logits, nc=self.y_shape[0]) # Regularization loss log_p_u = self.p_u.log_p(u_q, dim=1) log_q_u = log_normal_diag(u_q, u_q_mean, u_q_logvar) KL_u = log_q_u - log_p_u log_p_z = log_normal_diag(z_q, z_p_mean, z_p_logvar) log_q_z = log_normal_diag(z_q, z_q_mean, z_q_logvar) KL_z = log_q_z - log_p_z # Total lower bound loss nelbo = - (RE_x + RE_y - KL_u - KL_z).mean() diagnostics = { "bpd" : (nelbo.item()) / (np.prod(x.shape[1:]) * np.log(2.)), "nelbo" : nelbo.item(), "RE" : - (RE_x + RE_y).mean().item(), "RE_x" : - RE_x.mean().item(), "RE_y" : - RE_y.mean().item(), "KL" : (KL_z + KL_u).mean().item(), "KL_u" : KL_u.mean().item(), "KL_z" : KL_z.mean().item(), } return nelbo, diagnostics def forward(self, x, **kwargs): """ Forward pass through the inference and the generative model. """ # y ~ f(x) (determinist) y = self.compressed_transoformation(x) # u ~ q(u| y) u_q_mean, u_q_logvar = self.q_u(y) u_q = self.reparameterize(u_q_mean, u_q_logvar) # z ~ q(z| x, y) z_q_mean, z_q_logvar = self.q_z(x) z_q = self.reparameterize(z_q_mean, z_q_logvar) # x ~ p(x| y, z) x_logits = self.p_x((y, z_q)) # y ~ p(y| u) y_logits = self.p_y(u_q) # z ~ p(z| x) z_p_mean, z_p_logvar = self.p_z((y, u_q)) return { 'u_q_mean' : u_q_mean, 'u_q_logvar' : u_q_logvar, 'u_q' : u_q, 'z_q_mean' : z_q_mean, 'z_q_logvar' : z_q_logvar, 'z_q' : z_q, 'z_p_mean' : z_p_mean, 'z_p_logvar' : z_p_logvar, 'y' : y, 'y_logits' : y_logits, 'x_logits' : x_logits } if __name__ == "__main__": pass
6,789
29.3125
122
py
DDoS
DDoS-master/models/srVAE/__init__.py
from .srVAE import srVAE
25
12
24
py
DDoS
DDoS-master/models/srVAE/backbone/densenet16x32.py
import torch import torch.nn as nn import torch.nn.functional as F from src.modules.nn_layers import * from src.modules.distributions import n_embenddings from src.utils.args import args class q_u(nn.Module): """ Encoder q(u|y) """ def __init__(self, output_shape, input_shape): super().__init__() nc_in = input_shape[0] nc_out = 2 * output_shape[0] self.core_nn = nn.Sequential( DenselyEncoder( in_channels=nc_in, out_channels=nc_out, growth_rate=64, steps=3, scale_factor=1) ) def forward(self, input): mu, logvar = self.core_nn(input).chunk(2, 1) return mu, F.hardtanh(logvar, min_val=-7, max_val=7.) class p_y(nn.Module): """ Dencoder p(y|u) """ def __init__(self, output_shape, input_shape): super().__init__() nc_in = input_shape[0] nc_out = n_embenddings(output_shape[0]) self.core_nn = nn.Sequential( DenselyDecoder( in_channels=nc_in, out_channels=nc_out, growth_rate=128, steps=4, scale_factor=1) ) def forward(self, input): logits = self.core_nn(input) return logits class q_z(nn.Module): """ Encoder q(z|x) """ def __init__(self, output_shape, input_shape): super().__init__() nc_in = input_shape[0] nc_out = 2 * output_shape[0] self.core_nn = nn.Sequential( DenselyEncoder( in_channels=nc_in, out_channels=nc_out, growth_rate=16, steps=4, scale_factor=2) ) def forward(self, input): mu, logvar = self.core_nn(input).chunk(2, 1) return mu, F.hardtanh(logvar, min_val=-7, max_val=7.) class p_z(nn.Module): """ Encoder p(z| y, u) """ def __init__(self, output_shape, input_shape): super().__init__() nc_y_in, nc_u_in = input_shape[0][0], input_shape[1][0] nc_out = 2 * output_shape[0] self.y_nn = nn.Sequential( DenselyEncoder( in_channels=nc_y_in, out_channels=nc_out//2, growth_rate=32, steps=5, scale_factor=1), nn.ELU(inplace=True) ) self.u_nn = nn.Sequential( DenselyNetwork( in_channels=nc_u_in, out_channels=nc_out//2, growth_rate=64, steps=3, blocks=3, act=True) ) self.core_nn = nn.Sequential( DenselyNetwork( in_channels=nc_out, out_channels=nc_out, growth_rate=64, steps=3, blocks=3, act=None) ) def forward(self, input): y, u = input[0], input[1] y_out = self.y_nn(y) u_out = self.u_nn(u) joint = torch.cat((y_out, u_out), 1) mu, logvar = self.core_nn(joint).chunk(2, 1) return mu, F.hardtanh(logvar, min_val=-7, max_val=7.) class p_x(nn.Module): """ p(x| y, z) """ def __init__(self, output_shape, input_shape): super().__init__() nc_y_in, nc_z_in = input_shape[0][0], input_shape[1][0] nc_out = n_embenddings(output_shape[0]) self.z_nn = nn.Sequential( DenselyDecoder( in_channels=nc_z_in, out_channels=nc_out, growth_rate=64, steps=8, scale_factor=2) ) self.core_nn = nn.Sequential( DenselyNetwork( in_channels=nc_out + 3, out_channels=nc_out, growth_rate=64, steps=5, blocks=3, act=None) ) def forward(self, input): y, z = input[0], input[1] y_out = F.interpolate(y, size=[32, 32], align_corners=False, mode='bilinear') z_out = self.z_nn(z) joint = torch.cat((y_out, z_out), 1) logits = self.core_nn(joint) return logits if __name__ == "__main__": pass
4,289
23.94186
85
py
DDoS
DDoS-master/models/srVAE/priors/mog.py
import numpy as np import torch import torch.nn as nn from torch.autograd import Variable from .prior import Prior from src.modules.nn_layers import * from src.modules.distributions import * from src.utils import args # Modified vertion of: https://github.com/divymurli/VAEs class MixtureOfGaussians(Prior): def __init__(self, z_shape, num_mixtures=1000): super().__init__() self.z_shape = z_shape self.z_dim = np.prod(z_shape) self.k = num_mixtures # Mixture of Gaussians prior self.z_pre = torch.nn.Parameter(torch.randn(1, 2 * self.k, self.z_dim).to(args.device) / np.sqrt(self.k * self.z_dim)) # Uniform weighting self.pi = torch.nn.Parameter(torch.ones(self.k).to(args.device) / self.k, requires_grad=False) def sample_gaussian(self, m, v): """ Element-wise application reparameterization trick to sample from Gaussian """ sample = torch.randn(m.shape).to(args.device) z = m + (v**0.5)*sample return z def log_sum_exp(self, x, dim=0): """ Compute the log(sum(exp(x), dim)) in a numerically stable manner """ max_x = torch.max(x, dim)[0] new_x = x - max_x.unsqueeze(dim).expand_as(x) return max_x + (new_x.exp().sum(dim)).log() def log_mean_exp(self, x, dim): """ Compute the log(mean(exp(x), dim)) in a numerically stable manner """ return self.log_sum_exp(x, dim) - np.log(x.size(dim)) def log_normal(self, x, m, v): """ Computes the elem-wise log probability of a Gaussian and then sum over the last dim. Basically we're assuming all dims are batch dims except for the last dim. """ const = -0.5 * x.size(-1) * torch.log(2*torch.tensor(np.pi)) log_det = -0.5 * torch.sum(torch.log(v), dim = -1) log_exp = -0.5 * torch.sum((x - m)**2/v, dim = -1) log_prob = const + log_det + log_exp return log_prob def log_normal_mixture(self, z, m, v): """ Computes log probability of a uniformly-weighted Gaussian mixture. """ z = z.view(z.shape[0], 1, -1) log_probs = self.log_normal(z, m, v) log_prob = self.log_mean_exp(log_probs, 1) return log_prob def gaussian_parameters(self, h, dim=-1): m, h = torch.split(h, h.size(dim) // 2, dim=dim) v = F.softplus(h) + 1e-8 return m, v def sample(self, n_samples=1, **kwargs): idx = torch.distributions.categorical.Categorical(self.pi).sample((n_samples,)) m, v = self.gaussian_parameters(self.z_pre.squeeze(0), dim=0) m, v = m[idx], v[idx] z_samples = self.sample_gaussian(m, v) return z_samples.view(z_samples.shape[0], *self.z_shape) def log_p(self, z, **kwargs): return self.forward(z) def forward(self, z, dim=None, **kwargs): """ Computes the mixture of Gaussian prior """ m, v = self.gaussian_parameters(self.z_pre, dim=1) log_p_z = self.log_normal_mixture(z, m, v) return log_p_z def __str__(self): return "MixtureOfGaussians" if __name__ == "__main__": pass
3,267
32.010101
94
py
DDoS
DDoS-master/models/srVAE/priors/prior.py
import torch import torch.nn as nn class Prior(nn.Module): def __init__(self): super().__init__() def sample(self, **kwargs): raise NotImplementedError def log_p(self, input, **kwargs): return self.forward(z) def forward(self, input, **kwargs): raise NotImplementedError def __str__(self): raise NotImplementedError if __name__ == "__main__": pass
420
16.541667
39
py
DDoS
DDoS-master/models/srVAE/priors/__init__.py
from .prior import Prior from .realnvp import RealNVP from .mog import MixtureOfGaussians from .standard_normal import StandardNormal
134
26
43
py
DDoS
DDoS-master/models/srVAE/priors/standard_normal.py
import math import torch class StandardNormal: def __init__(self, z_shape): self.z_shape = z_shape def sample(self, n_samples=1, **kwargs): return torch.randn((n_samples, *self.z_shape)) def log_p(self, z, **kwargs): return self.forward(z) def forward(self, z, **kwargs): """ Outputs the log p(z). """ log_probs = z.pow(2) + math.log(math.pi * 2.) log_probs = -0.5 * log_probs.view(z.size(0), -1).sum(dim=1) return log_probs def __call__(self, z, **kwargs): return self.forward(z, **kwargs) def __str__(self): return "StandardNormal" if __name__ == "__main__": pass
681
21
67
py
DDoS
DDoS-master/models/srVAE/priors/realnvp/__init__.py
from .model import RealNVP
27
13
26
py
DDoS
DDoS-master/models/srVAE/priors/realnvp/distributions/mog.py
import numpy as np import torch import torch.nn as nn from torch.autograd import Variable from src.modules.nn_layers import * from src.modules.distributions import * from src.utils import args class MixtureOfGaussians(nn.Module): def __init__(self, z_shape, num_mixtures=10): super().__init__() self.z_shape = z_shape self.z_dim = np.prod(z_shape) self.k = num_mixtures # Mixture of Gaussians prior self.z_pre = torch.nn.Parameter(torch.randn(1, 2 * self.k, self.z_dim).to(args.device) / np.sqrt(self.k * self.z_dim)) # Uniform weighting self.pi = torch.nn.Parameter(torch.ones(self.k).to(args.device) / self.k, requires_grad=False) def sample_gaussian(self, m, v): """ Element-wise application reparameterization trick to sample from Gaussian """ sample = torch.randn(m.shape).to(args.device) z = m + (v**0.5)*sample return z def log_sum_exp(self, x, dim=0): """ Compute the log(sum(exp(x), dim)) in a numerically stable manner """ max_x = torch.max(x, dim)[0] new_x = x - max_x.unsqueeze(dim).expand_as(x) return max_x + (new_x.exp().sum(dim)).log() def log_mean_exp(self, x, dim): """ Compute the log(mean(exp(x), dim)) in a numerically stable manner """ return self.log_sum_exp(x, dim) - np.log(x.size(dim)) def log_normal(self, x, m, v): """ Computes the elem-wise log probability of a Gaussian and then sum over the last dim. Basically we're assuming all dims are batch dims except for the last dim. """ const = -0.5 * x.size(-1) * torch.log(2*torch.tensor(np.pi)) log_det = -0.5 * torch.sum(torch.log(v), dim = -1) log_exp = -0.5 * torch.sum((x - m)**2/v, dim = -1) log_prob = const + log_det + log_exp return log_prob def log_normal_mixture(self, z, m, v): """ Computes log probability of a uniformly-weighted Gaussian mixture. """ z = z.view(z.shape[0], 1, -1) log_probs = self.log_normal(z, m, v) log_prob = self.log_mean_exp(log_probs, 1) return log_prob def gaussian_parameters(self, h, dim=-1): m, h = torch.split(h, h.size(dim) // 2, dim=dim) v = F.softplus(h) + 1e-8 return m, v def sample(self, n_samples=1, **kwargs): idx = torch.distributions.categorical.Categorical(self.pi).sample((n_samples,)) m, v = self.gaussian_parameters(self.z_pre.squeeze(0), dim=0) m, v = m[idx], v[idx] z_samples = self.sample_gaussian(m, v) return z_samples.view(z_samples.shape[0], *self.z_shape) def log_p(self, z, **kwargs): return self.forward(z) def forward(self, z, dim=None, **kwargs): """ Computes the mixture of Gaussian prior """ m, v = self.gaussian_parameters(self.z_pre, dim=1) log_p_z = self.log_normal_mixture(z, m, v) return log_p_z def __str__(self): return "MixtureOfGaussians" if __name__ == "__main__": pass
3,185
32.536842
94
py
DDoS
DDoS-master/models/srVAE/priors/realnvp/distributions/__init__.py
from .mog import MixtureOfGaussians from .standard_normal import StandardNormal
80
26
43
py
DDoS
DDoS-master/models/srVAE/priors/realnvp/distributions/standard_normal.py
import math import torch import torch.nn as nn class StandardNormal: """ Isotropic Standard Normal distribution. """ def __init__(self, z_shape): self.z_shape = z_shape def sample(self, n_samples=1, **kwargs): return torch.randn((n_samples, *self.z_shape)) def log_p(self, z, **kwargs): return self.forward(z) def forward(self, z, **kwargs): """ Outputs the log p(z). """ log_probs = z.pow(2) + math.log(math.pi * 2.) log_probs = -0.5 * log_probs.view(z.size(0), -1).sum(dim=1) return log_probs def __call__(self, z, **kwargs): return self.forward(z, **kwargs) def __str__(self): return "StandardNormal" if __name__ == "__main__": pass
764
20.857143
67
py
DDoS
DDoS-master/models/srVAE/priors/realnvp/util/array_util.py
import torch import torch.nn.functional as F def squeeze_2x2(x, reverse=False, alt_order=False): """For each spatial position, a sub-volume of shape `1x1x(N^2 * C)`, reshape into a sub-volume of shape `NxNxC`, where `N = block_size`. Adapted from: https://github.com/tensorflow/models/blob/master/research/real_nvp/real_nvp_utils.py See Also: - TensorFlow nn.depth_to_space: https://www.tensorflow.org/api_docs/python/tf/nn/depth_to_space - Figure 3 of RealNVP paper: https://arxiv.org/abs/1605.08803 Args: x (torch.Tensor): Input tensor of shape (B, C, H, W). reverse (bool): Whether to do a reverse squeeze (unsqueeze). alt_order (bool): Whether to use alternate ordering. """ block_size = 2 if alt_order: n, c, h, w = x.size() if reverse: if c % 4 != 0: raise ValueError('Number of channels must be divisible by 4, got {}.'.format(c)) c //= 4 else: if h % 2 != 0: raise ValueError('Height must be divisible by 2, got {}.'.format(h)) if w % 2 != 0: raise ValueError('Width must be divisible by 4, got {}.'.format(w)) # Defines permutation of input channels (shape is (4, 1, 2, 2)). squeeze_matrix = torch.tensor([[[[1., 0.], [0., 0.]]], [[[0., 0.], [0., 1.]]], [[[0., 1.], [0., 0.]]], [[[0., 0.], [1., 0.]]]], dtype=x.dtype, device=x.device) perm_weight = torch.zeros((4 * c, c, 2, 2), dtype=x.dtype, device=x.device) for c_idx in range(c): slice_0 = slice(c_idx * 4, (c_idx + 1) * 4) slice_1 = slice(c_idx, c_idx + 1) perm_weight[slice_0, slice_1, :, :] = squeeze_matrix shuffle_channels = torch.tensor([c_idx * 4 for c_idx in range(c)] + [c_idx * 4 + 1 for c_idx in range(c)] + [c_idx * 4 + 2 for c_idx in range(c)] + [c_idx * 4 + 3 for c_idx in range(c)]) perm_weight = perm_weight[shuffle_channels, :, :, :] if reverse: x = F.conv_transpose2d(x, perm_weight, stride=2) else: x = F.conv2d(x, perm_weight, stride=2) else: b, c, h, w = x.size() x = x.permute(0, 2, 3, 1) if reverse: if c % 4 != 0: raise ValueError('Number of channels {} is not divisible by 4'.format(c)) x = x.view(b, h, w, c // 4, 2, 2) x = x.permute(0, 1, 4, 2, 5, 3) x = x.contiguous().view(b, 2 * h, 2 * w, c // 4) else: if h % 2 != 0 or w % 2 != 0: raise ValueError('Expected even spatial dims HxW, got {}x{}'.format(h, w)) x = x.view(b, h // 2, 2, w // 2, 2, c) x = x.permute(0, 1, 3, 5, 2, 4) x = x.contiguous().view(b, h // 2, w // 2, c * 4) x = x.permute(0, 3, 1, 2) return x def checkerboard_mask(height, width, reverse=False, dtype=torch.float32, device=None, requires_grad=False): """Get a checkerboard mask, such that no two entries adjacent entries have the same value. In non-reversed mask, top-left entry is 0. Args: height (int): Number of rows in the mask. width (int): Number of columns in the mask. reverse (bool): If True, reverse the mask (i.e., make top-left entry 1). Useful for alternating masks in RealNVP. dtype (torch.dtype): Data type of the tensor. device (torch.device): Device on which to construct the tensor. requires_grad (bool): Whether the tensor requires gradient. Returns: mask (torch.tensor): Checkerboard mask of shape (1, 1, height, width). """ checkerboard = [[((i % 2) + j) % 2 for j in range(width)] for i in range(height)] mask = torch.tensor(checkerboard, dtype=dtype, device=device, requires_grad=requires_grad) if reverse: mask = 1 - mask # Reshape to (1, 1, height, width) for broadcasting with tensors of shape (B, C, H, W) mask = mask.view(1, 1, height, width) return mask
4,369
40.226415
103
py
DDoS
DDoS-master/models/srVAE/priors/realnvp/util/norm_util.py
import functools import torch import torch.nn as nn def get_norm_layer(norm_type='instance'): if norm_type == 'batch': return functools.partial(nn.BatchNorm2d, affine=True) elif norm_type == 'instance': return functools.partial(nn.InstanceNorm2d, affine=False) else: raise NotImplementedError('Invalid normalization type: {}'.format(norm_type)) def get_param_groups(net, weight_decay, norm_suffix='weight_g', verbose=False): """Get two parameter groups from `net`: One named "normalized" which will override the optimizer with `weight_decay`, and one named "unnormalized" which will inherit all hyperparameters from the optimizer. Args: net (torch.nn.Module): Network to get parameters from weight_decay (float): Weight decay to apply to normalized weights. norm_suffix (str): Suffix to select weights that should be normalized. For WeightNorm, using 'weight_g' normalizes the scale variables. verbose (bool): Print out number of normalized and unnormalized parameters. """ norm_params = [] unnorm_params = [] for n, p in net.named_parameters(): if n.endswith(norm_suffix): norm_params.append(p) else: unnorm_params.append(p) param_groups = [{'name': 'normalized', 'params': norm_params, 'weight_decay': weight_decay}, {'name': 'unnormalized', 'params': unnorm_params}] if verbose: print('{} normalized parameters'.format(len(norm_params))) print('{} unnormalized parameters'.format(len(unnorm_params))) return param_groups class WNConv2d(nn.Module): """Weight-normalized 2d convolution. Args: in_channels (int): Number of channels in the input. out_channels (int): Number of channels in the output. kernel_size (int): Side length of each convolutional kernel. padding (int): Padding to add on edges of input. bias (bool): Use bias in the convolution operation. """ def __init__(self, in_channels, out_channels, kernel_size, padding, bias=True): super(WNConv2d, self).__init__() self.conv = nn.utils.weight_norm( nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding, bias=bias)) def forward(self, x): x = self.conv(x) return x class BatchNormStats2d(nn.Module): """Compute BatchNorm2d normalization statistics: `mean` and `var`. Useful for keeping track of sum of log-determinant of Jacobians in flow models. Args: num_features (int): Number of features in the input (i.e., `C` in `(N, C, H, W)`). eps (float): Added to the denominator for numerical stability. decay (float): The value used for the running_mean and running_var computation. Different from conventional momentum, see `nn.BatchNorm2d` for more. """ def __init__(self, num_features, eps=1e-5, decay=0.1): super(BatchNormStats2d, self).__init__() self.eps = eps self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) self.decay = decay def forward(self, x, training): # Get mean and variance per channel if training: channels = x.transpose(0, 1).contiguous().view(x.size(1), -1) used_mean, used_var = channels.mean(-1), channels.var(-1) curr_mean, curr_var = used_mean, used_var # Update variables self.running_mean = self.running_mean - self.decay * (self.running_mean - curr_mean) self.running_var = self.running_var - self.decay * (self.running_var - curr_var) else: used_mean = self.running_mean used_var = self.running_var used_var += self.eps # Reshape to (N, C, H, W) used_mean = used_mean.view(1, x.size(1), 1, 1).expand_as(x) used_var = used_var.view(1, x.size(1), 1, 1).expand_as(x) return used_mean, used_var
4,052
37.971154
96
py
DDoS
DDoS-master/models/srVAE/priors/realnvp/util/__init__.py
from .array_util import squeeze_2x2, checkerboard_mask from .norm_util import get_norm_layer, get_param_groups, WNConv2d
121
39.666667
65
py
DDoS
DDoS-master/models/srVAE/priors/realnvp/model/real_nvp.py
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from .coupling_layer import CouplingLayer, MaskType from ..util import squeeze_2x2 from ..distributions import StandardNormal # Modified vertion of: https://github.com/chrischute/real-nvp class RealNVP(nn.Module): """RealNVP Model Codebase from Chris Chute: https://github.com/chrischute/real-nvp Based on the paper: "Density estimation using Real NVP" by Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio (https://arxiv.org/abs/1605.08803). Args: num_scales (int): Number of scales in the RealNVP model. in_channels (int): Number of channels in the input. mid_channels (int): Number of channels in the intermediate layers. num_blocks (int): Number of residual blocks in the s and t network of `Coupling` layers. """ def __init__(self, input_shape, mid_channels=64, num_blocks=5, num_scales=2, prior='std_normal'): super().__init__() self.flows = _RealNVP(0, num_scales, input_shape[0], mid_channels, num_blocks) # self.nbits = 8. if prior=='std_normal': self.prior = StandardNormal(input_shape) elif prior=='mog': self.prior = MixtureOfGaussians(input_shape) @torch.no_grad() def sample(self, z_shape, n_samples, device, **kwargs): """Sample from RealNVP model. Args: z_shape (tuple): n_samples (int): Number of samples to generate. device (torch.device): Device to use. """ z = self.prior.sample(n_samples).to(device) x, _ = self.forward(z, reverse=True) return x def log_p(self, x, **kwargs): """ returns the log likelihood. """ z, sldj = self.forward(x, reverse=False) ll = (self.prior.log_p(z) + sldj) # prior_ll = -0.5 * (z ** 2 + np.log(2 * np.pi)) # prior_ll = prior_ll.flatten(1).sum(-1) - np.log(2**self.nbits) * np.prod(z.size()[1:]) # ll = prior_ll + sldj # ll = ll.mean() return ll def forward(self, x, reverse=False): sldj = None if not reverse: sldj = 0 # we do not quintize ! # quintize ! # x = (x * (2**self.nbits - 1) + torch.rand_like(x)) / (2**self.nbits) x, sldj = self.flows(x, sldj, reverse) return x, sldj class _RealNVP(nn.Module): """Recursive builder for a `RealNVP` model. Each `_RealNVPBuilder` corresponds to a single scale in `RealNVP`, and the constructor is recursively called to build a full `RealNVP` model. Args: scale_idx (int): Index of current scale. num_scales (int): Number of scales in the RealNVP model. in_channels (int): Number of channels in the input. mid_channels (int): Number of channels in the intermediate layers. num_blocks (int): Number of residual blocks in the s and t network of `Coupling` layers. """ def __init__(self, scale_idx, num_scales, in_channels, mid_channels, num_blocks): super(_RealNVP, self).__init__() self.is_last_block = scale_idx == num_scales - 1 self.in_couplings = nn.ModuleList([ CouplingLayer(in_channels, mid_channels, num_blocks, MaskType.CHECKERBOARD, reverse_mask=False), CouplingLayer(in_channels, mid_channels, num_blocks, MaskType.CHECKERBOARD, reverse_mask=True), CouplingLayer(in_channels, mid_channels, num_blocks, MaskType.CHECKERBOARD, reverse_mask=False) ]) if self.is_last_block: self.in_couplings.append( CouplingLayer(in_channels, mid_channels, num_blocks, MaskType.CHECKERBOARD, reverse_mask=True)) else: self.out_couplings = nn.ModuleList([ CouplingLayer(4 * in_channels, 2 * mid_channels, num_blocks, MaskType.CHANNEL_WISE, reverse_mask=False), CouplingLayer(4 * in_channels, 2 * mid_channels, num_blocks, MaskType.CHANNEL_WISE, reverse_mask=True), CouplingLayer(4 * in_channels, 2 * mid_channels, num_blocks, MaskType.CHANNEL_WISE, reverse_mask=False) ]) self.next_block = _RealNVP(scale_idx + 1, num_scales, 2 * in_channels, 2 * mid_channels, num_blocks) def forward(self, x, sldj, reverse=False): if reverse: if not self.is_last_block: # Re-squeeze -> split -> next block x = squeeze_2x2(x, reverse=False, alt_order=True) x, x_split = x.chunk(2, dim=1) x, sldj = self.next_block(x, sldj, reverse) x = torch.cat((x, x_split), dim=1) x = squeeze_2x2(x, reverse=True, alt_order=True) # Squeeze -> 3x coupling (channel-wise) x = squeeze_2x2(x, reverse=False) for coupling in reversed(self.out_couplings): x, sldj = coupling(x, sldj, reverse) x = squeeze_2x2(x, reverse=True) for coupling in reversed(self.in_couplings): x, sldj = coupling(x, sldj, reverse) else: for coupling in self.in_couplings: x, sldj = coupling(x, sldj, reverse) if not self.is_last_block: # Squeeze -> 3x coupling (channel-wise) x = squeeze_2x2(x, reverse=False) for coupling in self.out_couplings: x, sldj = coupling(x, sldj, reverse) x = squeeze_2x2(x, reverse=True) # Re-squeeze -> split -> next block x = squeeze_2x2(x, reverse=False, alt_order=True) x, x_split = x.chunk(2, dim=1) x, sldj = self.next_block(x, sldj, reverse) x = torch.cat((x, x_split), dim=1) x = squeeze_2x2(x, reverse=True, alt_order=True) return x, sldj
5,949
37.636364
120
py
DDoS
DDoS-master/models/srVAE/priors/realnvp/model/coupling_layer.py
import torch import torch.nn as nn from enum import IntEnum from ..util import checkerboard_mask from src.modules.nn_layers import * class MaskType(IntEnum): CHECKERBOARD = 0 CHANNEL_WISE = 1 class CouplingLayer(nn.Module): """Coupling layer in RealNVP. Args: in_channels (int): Number of channels in the input. mid_channels (int): Number of channels in the `s` and `t` network. num_blocks (int): Number of residual blocks in the `s` and `t` network. mask_type (MaskType): One of `MaskType.CHECKERBOARD` or `MaskType.CHANNEL_WISE`. reverse_mask (bool): Whether to reverse the mask. Useful for alternating masks. """ def __init__(self, in_channels, mid_channels, num_blocks, mask_type, reverse_mask): super(CouplingLayer, self).__init__() # Save mask info self.mask_type = mask_type self.reverse_mask = reverse_mask if self.mask_type == MaskType.CHANNEL_WISE: in_channels //= 2 # Build scale and translate network growth_rate, steps = 64, 5 self.st_net = nn.Sequential( DenseNetLayer(inplanes=in_channels, growth_rate=growth_rate, steps=steps), Conv2d(in_channels + growth_rate*steps, 2*in_channels, kernel_size=3, stride=1, padding=1) ) # Learnable scale for s self.rescale = nn.utils.weight_norm(Rescale(in_channels)) def forward(self, x, sldj=None, reverse=True): if self.mask_type == MaskType.CHECKERBOARD: # Checkerboard mask b = checkerboard_mask(x.size(2), x.size(3), self.reverse_mask, device=x.device) x_b = x * b st = self.st_net(x_b) s, t = st.chunk(2, dim=1) s = self.rescale(torch.tanh(s)) s = s * (1 - b) t = t * (1 - b) # Scale and translate if reverse: inv_exp_s = s.mul(-1).exp() if torch.isnan(inv_exp_s).any(): raise RuntimeError('Scale factor has NaN entries') x = x * inv_exp_s - t else: exp_s = s.exp() if torch.isnan(exp_s).any(): raise RuntimeError('Scale factor has NaN entries') x = (x + t) * exp_s # Add log-determinant of the Jacobian sldj += s.view(s.size(0), -1).sum(-1) else: # Channel-wise mask if self.reverse_mask: x_id, x_change = x.chunk(2, dim=1) else: x_change, x_id = x.chunk(2, dim=1) st = self.st_net(x_id) s, t = st.chunk(2, dim=1) s = self.rescale(torch.tanh(s)) # Scale and translate if reverse: inv_exp_s = s.mul(-1).exp() if torch.isnan(inv_exp_s).any(): raise RuntimeError('Scale factor has NaN entries') x_change = x_change * inv_exp_s - t else: exp_s = s.exp() if torch.isnan(exp_s).any(): raise RuntimeError('Scale factor has NaN entries') x_change = (x_change + t) * exp_s # Add log-determinant of the Jacobian sldj += s.view(s.size(0), -1).sum(-1) if self.reverse_mask: x = torch.cat((x_id, x_change), dim=1) else: x = torch.cat((x_change, x_id), dim=1) return x, sldj class Rescale(nn.Module): """Per-channel rescaling. Need a proper `nn.Module` so we can wrap it with `torch.nn.utils.weight_norm`. Args: num_channels (int): Number of channels in the input. """ def __init__(self, num_channels): super(Rescale, self).__init__() self.weight = nn.Parameter(torch.ones(num_channels, 1, 1)) def forward(self, x): x = self.weight * x return x
4,031
32.04918
91
py
DDoS
DDoS-master/models/srVAE/priors/realnvp/model/__init__.py
from .real_nvp import RealNVP
30
14.5
29
py
DDoS
DDoS-master/models/ShuffleUNet/icnr.py
import torch import torch.nn as nn def ICNR(tensor, upscale_factor=2, inizializer=nn.init.kaiming_normal_): new_shape = [int(tensor.shape[0] / (upscale_factor ** 2))] + list(tensor.shape[1:]) subkernel = torch.zeros(new_shape) subkernel = inizializer(subkernel) subkernel = subkernel.transpose(0, 1) subkernel = subkernel.contiguous().view(subkernel.shape[0], subkernel.shape[1], -1) kernel = subkernel.repeat(1, 1, upscale_factor ** 2) transposed_shape = [tensor.shape[1]] + [tensor.shape[0]] + list(tensor.shape[2:]) kernel = kernel.contiguous().view(transposed_shape) kernel = kernel.transpose(0, 1) return kernel
708
31.227273
87
py
DDoS
DDoS-master/models/ShuffleUNet/pixel_shuffle.py
import torch.nn as nn from . import icnr def _pixel_shuffle(input, upscale_factor): r"""Rearranges elements in a Tensor of shape :math:`(N, C, d_{1}, d_{2}, ..., d_{n})` to a tensor of shape :math:`(N, C/(r^n), d_{1}*r, d_{2}*r, ..., d_{n}*r)`. Where :math:`n` is the dimensionality of the data. See :class:`~torch.nn.PixelShuffle` for details. Args: input (Variable): Input upscale_factor (int): factor to increase spatial resolution by Examples:: # 1D example #>>> input = torch.Tensor(1, 4, 8) #>>> output = F.pixel_shuffle(input, 2) #>>> print(output.size()) torch.Size([1, 2, 16]) # 2D example #>>> input = torch.Tensor(1, 9, 8, 8) #>>> output = F.pixel_shuffle(input, 3) #>>> print(output.size()) torch.Size([1, 1, 24, 24]) # 3D example #>>> input = torch.Tensor(1, 8, 16, 16, 16) #>>> output = F.pixel_shuffle(input, 2) #>>> print(output.size()) torch.Size([1, 1, 32, 32, 32]) """ input_size = list(input.size()) dimensionality = len(input_size) - 2 input_size[1] //= (upscale_factor ** dimensionality) output_size = [dim * upscale_factor for dim in input_size[2:]] input_view = input.contiguous().view( input_size[0], input_size[1], *(([upscale_factor] * dimensionality) + input_size[2:]) ) indicies = list(range(2, 2 + 2 * dimensionality)) indicies = indicies[1::2] + indicies[0::2] shuffle_out = input_view.permute(0, 1, *(indicies[::-1])).contiguous() return shuffle_out.view(input_size[0], input_size[1], *output_size) class PixelShuffle(nn.Module): def __init__(self, in_c, out_c, kernel, stride, bias=True, d=3): super(PixelShuffle, self).__init__() if d==3: self.conv = nn.Conv3d(in_c, out_c, kernel_size=kernel, stride=stride, bias=bias, padding=kernel//2) else: self.conv = nn.Conv2d(in_c, out_c, kernel_size=kernel, stride=stride, bias=bias, padding=kernel//2) self.icnr_weights = icnr.ICNR(self.conv.weight, 2) self.conv.weight.data.copy_(self.icnr_weights) def forward(self, x): x = self.conv(x) x = _pixel_shuffle(x, 2) return x
2,276
35.142857
111
py
DDoS
DDoS-master/models/ShuffleUNet/net.py
import sys import torch import torch.nn as nn from . import pixel_shuffle, pixel_unshuffle # -------------------------------------------------------------------------------------------------------------------------------------------------## class _double_conv(nn.Module): """ Double Convolution Block """ def __init__(self, in_channels, out_channels, k_size, stride, bias=True, conv_layer=nn.Conv3d): super(_double_conv, self).__init__() self.conv_1 = conv_layer(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=k_size // 2, bias=bias) self.conv_2 = conv_layer(in_channels=out_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=k_size // 2, bias=bias) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.conv_1(x) x = self.relu((x)) x = self.conv_2(x) x = self.relu((x)) return x class _conv_decomp(nn.Module): """ Convolutional Decomposition Block """ def __init__(self, in_channels, out_channels, k_size, stride, bias=True, conv_layer=nn.Conv3d): super(_conv_decomp, self).__init__() self.conv1 = conv_layer(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=k_size // 2, bias=bias) self.conv2 = conv_layer(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=k_size // 2, bias=bias) self.conv3 = conv_layer(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=k_size // 2, bias=bias) self.conv4 = conv_layer(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=k_size // 2, bias=bias) self.relu = nn.ReLU(inplace=True) def forward(self, x): x1 = self.conv1(x) x1 = self.relu((x1)) x2 = self.conv2(x) x2 = self.relu((x2)) x3 = self.conv3(x) x3 = self.relu((x3)) x4 = self.conv4(x) x4 = self.relu((x4)) return x1, x2, x3, x4 class _concat(nn.Module): """ Skip-Addition block """ def __init__(self): super(_concat, self).__init__() def forward(self, e1, e2, e3, e4, d1, d2, d3, d4): self.X1 = e1 + d1 self.X2 = e2 + d2 self.X3 = e3 + d3 self.X4 = e4 + d4 x = torch.cat([self.X1, self.X2, self.X3, self.X4], dim=1) return x # -------------------------------------------------------------------------------------------------------------------------------------------------## class ShuffleUNet(nn.Module): def __init__(self, d=3, in_ch=1, num_features=64, n_levels=3, out_ch=1, kernel_size=3, stride=1): super(ShuffleUNet, self).__init__() self.n_levels = n_levels num_features = num_features filters = [num_features] for _ in range(n_levels): filters.append(filters[-1]*2) if d==3: conv_layer = nn.Conv3d ps_fact = (2 ** 2) elif d==2: conv_layer = nn.Conv2d ps_fact = 2 else: sys.exit("Invalid d") # Input self.conv_inp = _double_conv(in_ch, filters[0], kernel_size, stride, conv_layer=conv_layer) #Contraction path self.wave_down = nn.ModuleList() self.pix_unshuff = nn.ModuleList() self.conv_enc = nn.ModuleList() for i in range(0, n_levels): self.wave_down.append(_conv_decomp(filters[i], filters[i], kernel_size, stride, conv_layer=conv_layer)) self.pix_unshuff.append(pixel_unshuffle.PixelUnshuffle(num_features * (2**i), num_features * (2**i), kernel_size, stride, d=d)) self.conv_enc.append(_double_conv(filters[i], filters[i+1], kernel_size, stride, conv_layer=conv_layer)) #Expansion path self.cat = _concat() self.pix_shuff = nn.ModuleList() self.wave_up = nn.ModuleList() self.convup = nn.ModuleList() for i in range(n_levels-1,-1,-1): self.pix_shuff.append(pixel_shuffle.PixelShuffle(num_features * (2**(i+1)), num_features * (2**(i+1)) * ps_fact, kernel_size, stride, d=d)) self.wave_up.append(_conv_decomp(filters[i], filters[i], kernel_size, stride, conv_layer=conv_layer)) self.convup.append(_double_conv(filters[i] * 5, filters[i], kernel_size, stride, conv_layer=conv_layer)) #FC self.out = conv_layer(filters[0], out_ch, kernel_size=1, stride=1, padding=0, bias=True) #Weight init for m in self.modules(): if isinstance(m, conv_layer): weight = nn.init.kaiming_normal_(m.weight, nonlinearity='relu') m.weight.data.copy_(weight) if m.bias is not None: m.bias.data.zero_() def forward(self, x): encs = [self.conv_inp(x)] waves = [] for i in range(self.n_levels): waves.append(self.wave_down[i](encs[-1])) _tmp = self.pix_unshuff[i](waves[-1][-1]) encs.append(self.conv_enc[i](_tmp)) dec = encs.pop() for i in range(self.n_levels): _tmp = self.pix_shuff[i](dec) _tmp_waves = self.wave_up[i](_tmp) + waves.pop() _tmp_cat = self.cat(*_tmp_waves) dec = self.convup[i](torch.cat([encs.pop(), _tmp_cat], dim=1)) return self.out(dec)
5,659
36.733333
151
py
DDoS
DDoS-master/models/ShuffleUNet/pixel_unshuffle.py
import torch.nn as nn from . import icnr class _double_conv_3d(nn.Module): """ Convolution Block """ def __init__(self, in_channels, out_channels, k_size, stride, bias=True): super(_double_conv_3d, self).__init__() self.conv = nn.Sequential( nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=k_size//2, bias=bias), nn.BatchNorm3d(num_features=out_channels), nn.ReLU(inplace=True), nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=k_size//2, bias=bias), nn.BatchNorm3d(num_features=out_channels), nn.ReLU(inplace=True) ) def forward(self, x): x = self.conv(x) return x class _double_conv_2d(nn.Module): """ Convolution Block """ def __init__(self, in_channels, out_channels, k_size, stride, bias=True): super(_double_conv_2d, self).__init__() self.conv = nn.Sequential( nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=k_size//2, bias=bias), nn.BatchNorm2d(num_features=out_channels), nn.ReLU(inplace=True), nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=k_size//2, bias=bias), nn.BatchNorm2d(num_features=out_channels), nn.ReLU(inplace=True) ) def forward(self, x): x = self.conv(x) return x def _pixel_unshuffle_3d(input, upscale_factor): r"""Rearranges elements in a Tensor of shape :math:(C, rH, rW) to a tensor of shape :math:(*, r^2C, H, W). written by: Zhaoyi Yan, https://github.com/Zhaoyi-Yan and Kai Zhang, https://github.com/cszn/FFDNet 01/01/2019 """ batch_size, channels, depth, in_height, in_width = input.size() depth_final = depth // upscale_factor out_height = in_height // upscale_factor out_width = in_width // upscale_factor input_view = input.contiguous().view( batch_size, channels, depth_final, upscale_factor, out_height, upscale_factor, out_width, upscale_factor) channels *= upscale_factor ** 3 unshuffle_out = input_view.permute(0, 1, 3, 5, 7, 2, 4, 6).contiguous() return unshuffle_out.view(batch_size, channels, depth_final, out_height, out_width) def _pixel_unshuffle_2d(input, upscale_factor): r"""Rearranges elements in a Tensor of shape :math:(C, rH, rW) to a tensor of shape :math:(*, r^2C, H, W). written by: Zhaoyi Yan, https://github.com/Zhaoyi-Yan and Kai Zhang, https://github.com/cszn/FFDNet 01/01/2019 """ batch_size, channels, in_height, in_width = input.size() out_height = in_height // upscale_factor out_width = in_width // upscale_factor input_view = input.contiguous().view( batch_size, channels, out_height, upscale_factor, out_width, upscale_factor) channels *= upscale_factor ** 2 unshuffle_out = input_view.permute(0, 1, 3, 5, 2, 4).contiguous() return unshuffle_out.view(batch_size, channels, out_height, out_width) class PixelUnshuffle(nn.Module): def __init__(self, in_c, out_c, kernel, stride, bias=True, d=3): super(PixelUnshuffle, self).__init__() if d == 3: self.conv = nn.Conv3d(in_c, out_c, kernel_size=kernel, stride=stride, bias=bias, padding=kernel//2) self.down_conv = _double_conv_3d(out_c*8, out_c, kernel, stride, bias) self.pu = _pixel_unshuffle_3d else: self.conv = nn.Conv2d(in_c, out_c, kernel_size=kernel, stride=stride, bias=bias, padding=kernel//2) self.down_conv = _double_conv_2d(out_c*4, out_c, kernel, stride, bias) self.pu = _pixel_unshuffle_2d self.icnr_weights = icnr.ICNR(self.conv.weight, 2) self.conv.weight.data.copy_(self.icnr_weights) def forward(self, x): x = self.conv(x) x = self.down_conv(self.pu(x, 2)) return x
4,185
37.054545
111
py
DDoS
DDoS-master/models/ShuffleUNet/__init__.py
0
0
0
py
DDoS
DDoS-master/visualisation/num4trilinear.py
from glob import glob import torch from tqdm import tqdm import os import nibabel as nib import numpy as np import pandas as pd import torch.nn.functional as F from utils.utilities import calc_metircs fully_root = "/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Data/3DDynTest/MickAbdomen3DDyn/DynProtocol3/Filtered/hrTestDynConST" under_root = "/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Data/3DDynTest/MickAbdomen3DDyn/DynProtocol3/Filtered/usTestDynConST" interp = "trilinear" files = sorted(glob(f"{under_root}/**/*.nii.gz", recursive=True)) metrics = [] for f in tqdm(files): if ("Center" not in f and "Centre" not in f) or "TP00" in f or "WoPad" not in f: continue fully_parts = f.replace(under_root, fully_root).split(os.path.sep) undersampling = fully_parts[-3] del fully_parts[-3] f_fully = os.path.sep.join(fully_parts) vol_under = np.array(nib.load(f).get_fdata()) vol_fully = np.array(nib.load(f_fully).get_fdata()) vol_under /= vol_under.max() vol_fully /= vol_fully.max() vol_under = F.interpolate(torch.from_numpy(vol_under).unsqueeze(0).unsqueeze(0), size=vol_fully.shape, mode=interp, align_corners=False).squeeze().numpy() inp_metrics, inp_ssimMAP = calc_metircs(vol_fully, vol_under, tag="ZPad") inp_metrics["file"] = fully_parts[-2] + "_" + fully_parts[-1] inp_metrics["subject"] = fully_parts[-6] + "_" + fully_parts[-5] inp_metrics["undersampling"] = undersampling + "WoPad" inp_metrics["model"] = interp.capitalize() inp_metrics["DiffZPad"] = np.std(vol_fully - vol_under) metrics.append(inp_metrics) df = pd.DataFrame.from_dict(metrics) df.to_csv(f"{os.path.dirname(under_root)}/noprevnorm_metrics_{interp}.csv")
1,754
38
158
py
DDoS
DDoS-master/visualisation/num4zpad.py
from glob import glob from tqdm import tqdm import os import nibabel as nib import numpy as np import pandas as pd from utils.utilities import calc_metircs fully_root = "/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Data/3DDynTest/MarioAbdomen3DDyn/DynProtocol1/Filtered/hrTestDynConST" zpad_root = "/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Data/3DDynTest/MarioAbdomen3DDyn/DynProtocol1/Filtered/usTestDynConST" files = sorted(glob(f"{zpad_root}/**/*.nii.gz", recursive=True)) metrics = [] for f in tqdm(files): if ("Center" not in f and "Centre" not in f) or "TP00" in f or "WoPad" in f: continue fully_parts = f.replace(zpad_root, fully_root).split(os.path.sep) undersampling = fully_parts[-3] del fully_parts[-3] f_fully = os.path.sep.join(fully_parts) vol_zpad = np.array(nib.load(f).get_fdata()) vol_fully = np.array(nib.load(f_fully).get_fdata()) vol_zpad /= vol_zpad.max() vol_fully /= vol_fully.max() inp_metrics, inp_ssimMAP = calc_metircs(vol_fully, vol_zpad, tag="ZPad") inp_metrics["file"] = fully_parts[-2] + "_" + fully_parts[-1] inp_metrics["subject"] = fully_parts[-6] + "_" + fully_parts[-5] inp_metrics["undersampling"] = undersampling + "WoPad" inp_metrics["model"] = "ZeroPadded" inp_metrics["DiffZPad"] = np.std(vol_fully - vol_zpad) metrics.append(inp_metrics) df = pd.DataFrame.from_dict(metrics) df.to_csv(os.path.dirname(zpad_root)+"/noprevnorm_metrics_zpad.csv")
1,504
36.625
145
py
DDoS
DDoS-master/visualisation/generate_plots.py
#!/usr/bin/env python import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from matplotlib.ticker import FormatStrFormatter sns.set_theme(style="darkgrid") #Step 4 (actual) of 4 def convertInp2Out(df, method_name): df = df[df.columns.drop(list(df.filter(regex='Out')))] df.columns = df.columns.str.replace('Inp', 'Out') df.model = method_name return df samplings = { "Center4MaskWoPad": "4% of k-space", "Center6p25MaskWoPad": "6.25% of k-space", "Center10MaskWoPad": "10% of k-space", } models = { "Trilinear": "Trilinear Interpolation", "ZeroPadded": "Zero-padded", "Baseline_Dyn": "UNet\n(CHAOS\nDynamic)", "Baseline_NonDyn": "UNet\n(CHAOS)", "DDoS": "DDoS-UNet", } subjects = { "PhilAbd3DDyn1conST": 0, "MarioAbd3DDyn1conST": 1, "MickAbd3DDyn3conST": 2, "ChimpAbd3DDyn3conST": 3, "FatyAbd3DDyn3conST": 4, } <<<<<<< Updated upstream legend_order = ["Interpolated\nInput", "Zero-padded", "UNet\n(CHAOS)", "UNet\n(CHAOS\nDynamic)", "DDoS-UNet"] ======= legend_order = ["Trilinear Interpolation", "Zero-padded", "UNet (CHAOS)", "UNet (CHAOS Dynamic)", "DDoS-UNet"] >>>>>>> Stashed changes ignore_antipasto = False consolidated_csv = "/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Results/DDoS_Paper1/dynDualChn/DDoS-UNet/FullVol/woZPad/Results/QuantitativeAnalysis/Sources/consolidated.csv" results = pd.read_csv(consolidated_csv) results = results.replace({"model": models, "undersampling": samplings, "subject":subjects}) results = results.rename(columns={'subject': 'SubjectID', 'undersampling': 'Undersampling'}) <<<<<<< Updated upstream def generate_plot(df, y, path, title="", x_marker="Timepoint", hue="Method", ci=95, plot_type="line", grid_style="darkgrid"): if plot_type == "box": sns.set_theme(style=grid_style) ax = sns.boxplot(data=df, x=x_marker, y=y, hue=hue, palette=("pastel"), order=legend_order) # plt.xticks(rotation = 90) else: sns.set_theme(style=grid_style) df.Method = df.Method.str.replace("\n", " ") ax = sns.lineplot(data=df, x=x_marker, y=y, hue=hue, marker="o", ci=ci) ======= def generate_boxplot(df, y, path, title="", x_marker="Method", palette=("pastel")): df.Method = df.Method.str.replace(" ", "\n") ax = sns.boxplot(data=df, x=x_marker, y=y, palette=palette, order=[l.replace(" ", "\n") for l in legend_order]) ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f')) #plot and save plt.xticks(fontsize=10) plt.yticks(fontsize=10) plt.title(title,fontweight="bold") plt.tight_layout() plt.savefig(path, format='png') # plt.show() plt.clf() def generate_lineplot(df, y, path, title="", x_marker="Timepoint", ci=95): # if x_marker == "Timepoint": # ax = sns.lineplot(data=df, x=x_marker, y=y, hue="Method", style="SubjectID", marker="o") # else: # ax = sns.lineplot(data=df, x=x_marker, y=y, hue="Method", marker="o") ax = sns.lineplot(data=df, x=x_marker, y=y, hue="Method", marker="o", ci=ci) # ax = sns.relplot(data=df, x=x_marker, y=y, hue="Method") # ax = sns.scatterplot(data=df, x=x_marker, y=y, hue="Method") >>>>>>> Stashed changes if x_marker == "SubjectID": #fix the x-ticks ax.set(xticks=df[x_marker].unique()) #re-arrange legends if bool(hue): handles, labels = ax.get_legend_handles_labels() order = [labels.index(l.replace("\n", " ")) for l in legend_order] ax.legend([handles[idx] for idx in order],[labels[idx] for idx in order], bbox_to_anchor=(1.01, 1),borderaxespad=0) ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f')) #plot and save plt.xticks(fontsize=10) <<<<<<< Updated upstream ======= plt.yticks(fontsize=10) >>>>>>> Stashed changes plt.title(title,fontweight="bold") plt.tight_layout() plt.show(block=False) plt.savefig(path, format='png') plt.clf() for us in samplings.keys(): us_results = results[results["Undersampling"] == samplings[us]].sort_values(by ='file') <<<<<<< Updated upstream m0 = us_results.model.unique()[0] trilinear_df = convertInp2Out(us_results[us_results.model == m0], "Interpolated\nInput") ======= # m0 = us_results.model.unique()[0] # trilinear_df = convertInp2Out(us_results[us_results.model == m0], "Trilinear Interpolation") >>>>>>> Stashed changes # df = pd.concat([us_results, trilinear_df]) df = us_results df = df.reset_index(drop=True) df = df.rename(columns={'SSIMOut': 'SSIM', 'PSNROut': 'PSNR', "model": "Method", "file":"Timepoint"}) df.sort_values('Method',inplace=True, ascending=False) TPtags = df.Timepoint.str.split("_").str[0].unique() for tp in TPtags: df = df.replace(to_replace =tp+'_*', value = int(tp.replace("TP","")), regex = True) <<<<<<< Updated upstream # generate_plot(df, y='SSIM', x_marker="SubjectID", path=consolidated_csv.replace("consolidated.csv","Plots")+'/xSubject_'+us+'_SSIM.png', title=samplings[us]) # generate_plot(df, y='PSNR', x_marker="SubjectID", path=consolidated_csv.replace("consolidated.csv","Plots")+'/xSubject_'+us+'_PSNR.png', title=samplings[us]) generate_plot(df, y='SSIM', x_marker="Timepoint", path=consolidated_csv.replace("consolidated.csv","Plots")+'/xTPLine_'+us+'_SSIM.png', title=samplings[us], ci=None) generate_plot(df, y='PSNR', x_marker="Timepoint", path=consolidated_csv.replace("consolidated.csv","Plots")+'/xTPLine_'+us+'_PSNR.png', title=samplings[us], ci=None) # generate_plot(df, y='SSIM', x_marker="Method", hue=None, path=consolidated_csv.replace("consolidated.csv","Plots")+'/xSubjectBoxDark_'+us+'_SSIM.png', title=samplings[us], plot_type="box") # generate_plot(df, y='PSNR', x_marker="Method", hue=None, path=consolidated_csv.replace("consolidated.csv","Plots")+'/xSubjectBoxDark_'+us+'_PSNR.png', title=samplings[us], plot_type="box") # generate_plot(df, y='SSIM', x_marker="Method", hue=None, path=consolidated_csv.replace("consolidated.csv","Plots")+'/xSubjectBox_'+us+'_SSIM.png', title=samplings[us], plot_type="box", grid_style="whitegrid") # generate_plot(df, y='PSNR', x_marker="Method", hue=None, path=consolidated_csv.replace("consolidated.csv","Plots")+'/xSubjectBox_'+us+'_PSNR.png', title=samplings[us], plot_type="box", grid_style="whitegrid") ======= if ignore_antipasto: df = df[df.Timepoint!=df.Timepoint.unique().min()] # generate_plot(df, y='SSIM', x_marker="SubjectID", path=consolidated_csv.replace("Sources/consolidated.csv", "Plots")+'/xSubjectBox_'+('_noAP_' if ignore_antipasto else '')+'_'+us+'_SSIM.png', title=samplings[us]) # generate_plot(df, y='PSNR', x_marker="SubjectID", path=consolidated_csv.replace("Sources/consolidated.csv", "Plots")+'/xSubjectBox_'+('_noAP_' if ignore_antipasto else '')+'_'+us+'_PSNR.png', title=samplings[us]) # #DO THIS # generate_lineplot(df, y='SSIM', x_marker="Timepoint", path=consolidated_csv.replace("Sources/consolidated.csv", "Plots")+'/xTP_'+('_noAP_' if ignore_antipasto else '')+'_'+us+'_SSIM.png', title=samplings[us], ci=None) # generate_lineplot(df, y='PSNR', x_marker="Timepoint", path=consolidated_csv.replace("Sources/consolidated.csv", "Plots")+'/xTP_'+('_noAP_' if ignore_antipasto else '')+'_'+us+'_PSNR.png', title=samplings[us], ci=None) #generate_plot(df, y='PSNR', x_marker="Timepoint", path=consolidated_csv.replace("Sources/consolidated.csv", "Plots")+'/xTP_'+('_noAP_' if ignore_antipasto else '')+'_'+us+'_PSNR.png', title=samplings[us], ci=None) # generate_plot(df, y='SSIM', x_marker="Timepoint", path=consolidated_csv.replace("Sources/consolidated.csv", "Plots")+'/ciTP_'+('_noAP_' if ignore_antipasto else '')+'_'+us+'_SSIM.png', title=samplings[us], ci=95) # generate_plot(df, y='PSNR', x_marker="Timepoint", path=consolidated_csv.replace("Sources/consolidated.csv", "Plots")+'/ciTP_'+('_noAP_' if ignore_antipasto else '')+'_'+us+'_PSNR.png', title=samplings[us], ci=95) # #DO THIS # generate_lineplot(df, y='SSIM', x_marker="SubjectID", path=consolidated_csv.replace("Sources/consolidated.csv", "Plots")+'/ciSubwise'+('_noAP_' if ignore_antipasto else '')+'_'+us+'_SSIM.png', title=samplings[us], ci=95) # generate_lineplot(df, y='PSNR', x_marker="SubjectID", path=consolidated_csv.replace("Sources/consolidated.csv", "Plots")+'/ciSubwise'+('_noAP_' if ignore_antipasto else '')+'_'+us+'_PSNR.png', title=samplings[us], ci=95) # generate_plot(df, y='SSIM', x_marker="SubjectID", path=consolidated_csv.replace("Sources/consolidated.csv", "Plots")+'/xSubwise_'+('_noAP_' if ignore_antipasto else '')+'_'+us+'_SSIM.png', title=samplings[us], ci=None) # generate_plot(df, y='PSNR', x_marker="Timepoint", path=consolidated_csv.replace("Sources/consolidated.csv", "Plots")+'/ciTP_'+us+'_PSNR.png', title=samplings[us], ci=95) # #DO THIS # generate_boxplot(df, y='SSIM', path=consolidated_csv.replace("Sources/consolidated.csv", "Plots")+'/xMethodBox_'+('_noAP_' if ignore_antipasto else '')+'_'+us+'_SSIM.png', title=samplings[us]) generate_boxplot(df, y='PSNR', path=consolidated_csv.replace("Sources/consolidated.csv", "Plots")+'/xMethodBox_'+('_noAP_' if ignore_antipasto else '')+'_'+us+'_PSNR.png', title=samplings[us]) >>>>>>> Stashed changes
9,370
51.646067
226
py
DDoS
DDoS-master/visualisation/consolidate.py
import numpy as np import pandas as pd from glob import glob from tqdm import tqdm import os import nibabel as nib def MinMax(data): return (data-data.min())/(data.max()-data.min()) #Step 1 (actual) of 4 results_root = "/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Results/DDoS_Paper1/dynDualChn/DDoS-UNet/FullVol/ZPad/Results" dataset_root = "/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Data/3DDynTest" csv_path = "/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Results/DDoS_Paper1/dynDualChn/DDoS-UNet/FullVol/woZPad/Results/QuantitativeAnalysis/CSVs/RAWs" results_csvs = glob(f"{results_root}/**/Results.csv", recursive=True) dfs = [] for csv in tqdm(results_csvs): if "NonDyn" in csv: continue df = pd.read_csv(csv) fileparts = csv.split(os.path.sep) if "ZeroPadded" not in csv: recontype, subject, _, _, _ = fileparts[-2].split("_") undersampling = fileparts[-3].split("_")[2] model = fileparts[-4] else: recontype, subject, _, _, _, undersampling = fileparts[-2].split("_") model = "ZeroPadded" undersampling += "WoPad" df.columns = df.columns.str.replace('Inp', 'Out') df["recontype"] = recontype df["subject"] = subject df["undersampling"] = undersampling df["model"] = model df['DiffInp'] = 1.0 df['DiffOut'] = 1.0 datasetpath=f"{dataset_root}/{subject.split('Abd3DDyn')[0]}Abdomen3DDyn/DynProtocol{subject.split('Abd3DDyn')[1][0]}/Filtered/hrTestDyn{subject.split('Abd3DDyn')[1][1:].replace('conST', 'ConST')}" for f in df.file.unique(): tp = f.split("_")[0] tppath = glob(f"{datasetpath}/{tp}/*.nii*")[0] gt = MinMax(nib.load(tppath).get_fdata()) try: inppath = glob(csv.replace(".csv",f"/{f}/inp.nii*"))[0] inp = nib.load(inppath).get_fdata() diff_inp = gt - inp nib.save(nib.Nifti1Image(diff_inp, np.eye(4)), csv.replace(".csv",f"/{f}/diff_inp_nonorm.nii.gz")) diff_inp_std = np.std(diff_inp) if "ZeroPadded" not in csv: df.loc[df.file==f, "DiffInp"] = diff_inp_std else: df.loc[df.file==f, "DiffOut"] = diff_inp_std except: pass try: outpath = glob(csv.replace(".csv",f"/{f}/out.nii*"))[0] out = nib.load(outpath).get_fdata() diff_out = gt - out nib.save(nib.Nifti1Image(diff_out, np.eye(4)), csv.replace(".csv",f"/{f}/diff_out_nonorm.nii.gz")) diff_out_std = np.std(diff_out) df.loc[df.file==f, "DiffOut"] = diff_out_std except: pass dfs.append(df) df = pd.concat(dfs) df.to_csv(f"{csv_path}/DL_Results.csv")
2,781
36.594595
200
py
DDoS
DDoS-master/visualisation/merge.py
import pandas as pd total = "/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Results/DDoS_Paper1/dynDualChn/DDoS-UNet/FullVol/Results/consolidated_wrong_diffSDZeroPad.csv" zero = "/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Results/DDoS_Paper1/dynDualChn/DDoS-UNet/FullVol/Results/consolidated_zeroP.csv" newcsv = "/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Results/DDoS_Paper1/dynDualChn/DDoS-UNet/FullVol/Results/consolidated.csv" df = pd.read_csv(total) print(len(df)) df = df[df.model != "ZeroPadded"] print(len(df)) zero_df = pd.read_csv(zero) df = pd.concat([df, zero_df]) print(len(df)) df.to_csv(newcsv)
663
40.5
164
py
DDoS
DDoS-master/visualisation/calc_time.py
nPE = 264 nSlice = 44 TR = 2.31 overPE = 0.10 overSlice = 0.00 resPE = 0.50 resSlice = 0.64 actualPE = round(nPE * (1+overPE) * resPE) totalTR = actualPE * TR actualSlice = round(nSlice * resSlice) totalTime = totalTR * actualSlice print(actualPE) print(round(totalTime / 1000, 2))
287
15
42
py
DDoS
DDoS-master/visualisation/merge_csvs.py
import pandas as pd from glob import glob import os #Step 2 (actual) of 4 csv_root = "/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Results/DDoS_Paper1/dynDualChn/DDoS-UNet/FullVol/woZPad/Results/QuantitativeAnalysis/CSVs" dfDL = pd.read_csv(f"{csv_root}/RAWs/DL_Results.csv") dfDL.drop(dfDL[dfDL.model == "ZeroPadded"].index, inplace=True) dfDL.reset_index(drop=True, inplace=True) for f in glob(f"{csv_root}/RAWs/*.csv"): if "DL_Results" in f: continue subID = os.path.basename(f).split("_")[0] if "zpad" in f: model = "ZeroPadded" else: model = os.path.basename(f).split("_")[-1].split(".")[0].capitalize() df = pd.read_csv(f) df.subject = subID df.model = model newcols = {} for c in [c for c in df.columns if "ZPad" in c]: newcols[c] = c.split("ZPad")[0]+"Out" df.rename(columns=newcols, inplace=True) df.undersampling = df.undersampling.str.replace("WoPadWoPad", "WoPad") df.drop(df[df.undersampling == "Center4Mask2WoPad"].index, inplace=True) df.reset_index(drop=True, inplace=True) dfDL = dfDL.append(df) dfDL.reset_index(drop=True, inplace=True) dfDL.to_csv(f"{csv_root}/consolidated.csv")
1,213
32.722222
163
py
DDoS
DDoS-master/visualisation/get_numbers.py
import pandas as pd from glob import glob from tqdm import tqdm import os from scipy.stats import mannwhitneyu #Step 3 of 4 ignore_antipasto = True consolidated_csv = "/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Results/DDoS_Paper1/dynDualChn/DDoS-UNet/FullVol/woZPad/Results/QuantitativeAnalysis/Sources/consolidated.csv" # consolidated_csv = "/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Results/DDoS_Paper1/dynDualChn/DDoS-UNet/FullVol/ZPad/Results/consolidated_baseNonDyn.csv" df = pd.read_csv(consolidated_csv) if ignore_antipasto: df = df[~df.file.str.contains("TP01")] target_df = df[df.model=="DDoS"] def getStrings(mean, median, std, metric): avg = str(mean[metric].round(3).apply(str).str.cat(std[metric].round(3).apply(str), sep="±")) med = str(median[metric]) return avg, med def getInfo(df, groupby, metric_type="Out"): mean = df.groupby(groupby).mean() median = df.groupby(groupby).median() std = df.groupby(groupby).std() avgstrSSIM, medstrSSIM = getStrings(mean, median, std, 'SSIM'+metric_type) avgstrNRMSE, medstrNRMSE = getStrings(mean, median, std, 'NRMSE'+metric_type) avgstrPSNR, medstrPSNR = getStrings(mean, median, std, 'PSNR'+metric_type) avgstrDiff, medstrDiff = getStrings(mean, median, std, 'Diff'+metric_type) return (avgstrSSIM, medstrSSIM), (avgstrNRMSE, medstrNRMSE), (avgstrPSNR, medstrPSNR), (avgstrDiff, medstrDiff) def writeIndividual(file_obj, val, metric): file_obj.write("\n----------------------------\n") file_obj.write("\n") file_obj.write(f"\nAverage {metric}:\n") file_obj.write(val[0]) file_obj.write(f"\nMedian {metric}:\n") file_obj.write(val[1]) file_obj.write("\n----------------------------\n") file_obj.write("\n") def writeSubDF(df, target_df, file_obj, model, groupby, metric_type="Out"): SSIM, NRMSE, PSNR, Diff = getInfo(df, groupby, metric_type) file_obj.write("\n----------------------------\n") file_obj.write("\n----------------------------\n") file_obj.write(f"\nModel: {model}\n") file_obj.write("\n----------------------------\n") file_obj.write("\n----------------------------\n") file_obj.write("\n") writeIndividual(file_obj, SSIM, "SSIM") file_obj.write(f"\np-value: {getP(df, target_df, metric='SSIM', metric_type=metric_type)}\n") writeIndividual(file_obj, NRMSE, "NRMSE") file_obj.write(f"\np-value: {getP(df, target_df, metric='NRMSE', metric_type=metric_type)}\n") writeIndividual(file_obj, PSNR, "PSNR") file_obj.write(f"\np-value: {getP(df, target_df, metric='PSNR', metric_type=metric_type)}\n") writeIndividual(file_obj, Diff, "Diff") file_obj.write(f"\np-value: {getP(df, target_df, metric='Diff', metric_type=metric_type)}\n") file_obj.write("\n----------------------------\n") file_obj.write("\n----------------------------\n") def getP(model_df, target_df, metric, metric_type="Out"): pstring = "\n" if len(target_df) > 0: for us in model_df.undersampling.unique(): target = target_df[target_df.undersampling==us][metric+metric_type] current = model_df[model_df.undersampling==us][metric+"Out"] pstring += us + ": " + str(mannwhitneyu(target,current).pvalue) + "\n" return pstring else: return "-1" #Model-wise undersampling scores with open(f"{os.path.dirname(consolidated_csv)}/scores_model_undersampling{('_noAP' if ignore_antipasto else '')}.txt","w") as file_obj: # m0 = df.model.unique()[0] # model_df = df[df.model == m0] # writeSubDF(model_df, target_df, file_obj, "Trilinear", "undersampling", metric_type="Inp") for m in df.model.unique(): model_df = df[df.model == m] writeSubDF(model_df, target_df, file_obj, m, "undersampling", metric_type="Out") #Subject Model-wise undersampling scores with open(f"{os.path.dirname(consolidated_csv)}/scores_subject_model_undersampling{('_noAP' if ignore_antipasto else '')}.txt","w") as file_obj: for s in df.subject.unique(): sub_df = df[df.subject == s] sub_target_df = target_df[target_df.subject == s] file_obj.write("\n§§§§§§§§§§§§§§§§§§§§§§§§§§§§\n") file_obj.write("\n§§§§§§§§§§§§§§§§§§§§§§§§§§§§\n") file_obj.write(f"\nSubject: {s}\n") file_obj.write("\n§§§§§§§§§§§§§§§§§§§§§§§§§§§§\n") file_obj.write("\n§§§§§§§§§§§§§§§§§§§§§§§§§§§§\n") file_obj.write("\n") # m0 = sub_df.model.unique()[0] # model_df = sub_df[sub_df.model == m0] # writeSubDF(model_df, sub_target_df, file_obj, "Trilinear", "undersampling", metric_type="Inp") for m in sub_df.model.unique(): model_df = sub_df[sub_df.model == m] writeSubDF(model_df, sub_target_df, file_obj, m, "undersampling", metric_type="Out")
4,880
45.485714
191
py
DDoS
DDoS-master/visualisation/consolidate_diffnorm.py
import numpy as np import pandas as pd from glob import glob from tqdm import tqdm import os import nibabel as nib def MinMax(data): return (data-data.min())/(data.max()-data.min()) #Step 1 (alternative) of 3 results_root = "/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Results/DDoS_Paper1/dynDualChn/DDoS-UNet/FullVol/Results" dataset_root = "/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Data/3DDynTest" results_csvs = glob(f"{results_root}/**/Results.csv", recursive=True) dfs = [] for csv in tqdm(results_csvs): df = pd.read_csv(csv) fileparts = csv.split(os.path.sep) if "ZeroPadded" not in csv: recontype, subject, _, _, _ = fileparts[-2].split("_") undersampling = fileparts[-3].split("_")[2] model = fileparts[-4] else: recontype, subject, _, _, _, undersampling = fileparts[-2].split("_") model = "ZeroPadded" undersampling += "WoPad" df.columns = df.columns.str.replace('Inp', 'Out') df["recontype"] = recontype df["subject"] = subject df["undersampling"] = undersampling df["model"] = model df['DiffInp'] = 1.0 df['DiffOut'] = 1.0 datasetpath=f"{dataset_root}/{subject.split('Abd3DDyn')[0]}Abdomen3DDyn/DynProtocol{subject.split('Abd3DDyn')[1][0]}/Filtered/hrTestDyn{subject.split('Abd3DDyn')[1][1:].replace('conST', 'ConST')}" for f in df.file.unique(): tp = f.split("_")[0] tppath = glob(f"{datasetpath}/{tp}/*.nii*")[0] gt = MinMax(nib.load(tppath).get_fdata()) try: inppath = glob(csv.replace(".csv",f"/{f}/inp.nii*"))[0] inp = MinMax(nib.load(inppath).get_fdata()) diff_inp = gt - inp nib.save(nib.Nifti1Image(diff_inp, np.eye(4)), csv.replace(".csv",f"/{f}/diff_inp.nii.gz")) diff_inp_std = np.std(diff_inp) if "ZeroPadded" not in csv: df.loc[df.file==f, "DiffInp"] = diff_inp_std else: df.loc[df.file==f, "DiffOut"] = diff_out_std except: pass try: outpath = glob(csv.replace(".csv",f"/{f}/out.nii*"))[0] out = MinMax(nib.load(outpath).get_fdata()) diff_out = gt - out nib.save(nib.Nifti1Image(diff_out, np.eye(4)), csv.replace(".csv",f"/{f}/diff_out.nii.gz")) diff_out_std = np.std(diff_out) df.loc[df.file==f, "DiffOut"] = diff_out_std except: pass dfs.append(df) df = pd.concat(dfs) df.to_csv(f"{results_root}/consolidated_withdiffnorm.csv")
2,592
35.521127
200
py
DDoS
DDoS-master/utils/elastic_transform.py
#!/usr/bin/env python ''' Purpose : ''' from numbers import Number from typing import Optional, Tuple, Union import numpy as np import torch import torch as th import torch.nn as nn import torch.nn.functional as F from torch.nn.parameter import Parameter __author__ = "Kartik Prabhu, Mahantesh Pattadkal, and Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Kartik Prabhu", "Mahantesh Pattadkal", "Soumick Chatterjee"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" from torchio.utils import to_tuple try: from torchio import RandomElasticDeformation except: from torchio.transforms.augmentation import RandomElasticDeformation from torch.cuda.amp import autocast from airlab import utils as tu from airlab.transformation.pairwise import _KernelTransformation from airlab.transformation.utils import compute_grid from airlab.utils import kernelFunction as utils SPLINE_ORDER = 3 """ Warp image with displacement * input(tensor) : input of shape (N, C, H_\text{in}, W_\text{in})(N,C,H,W) (4-D case)(N,C,D,H ,W) (5-D case) * grid(tensor): flow-field of shape (N, H_\text{out}, W_\text{out}, 2)(N,H ,W,2) (4-D case) or (N, D, H, W, 3)(N,D,H,W,3) (5-D case) * mult = true if batched input """ def warp_image(image, displacement, multi=False): image_size = image.size() #[B, D, H, W] batch_size = image_size[0] if multi: image_size = image_size[2:]#[D, H, W] grid = compute_grid(image_size, dtype=image.dtype, device=image.device) grid = displacement + grid grid = torch.cat([grid] * batch_size, dim=0) # batch number of times # warp image if multi: warped_image = F.grid_sample(image, grid) #[B, C, D, H, W] else: warped_image = F.grid_sample(image.unsqueeze(0).unsqueeze(0), grid) #[B, C, D, H, W], unsqueeze to give batch and channel dimension return warped_image #[B, C, D, H, W] """ Base class for kernel transformations """ class _ParameterizedKernelTransformation(_KernelTransformation): def __init__(self, image_size, rnd_grid_params=None, diffeomorphic=False, dtype=th.float32, device='cpu'): super(_ParameterizedKernelTransformation, self).__init__(image_size, diffeomorphic, dtype, device) self.rnd_grid_params = rnd_grid_params def get_coarse_field(self, grid_shape, max_displacement, num_locked_borders, ): coarse_field = th.rand(self._dim, *grid_shape) # [0, 1) coarse_field -= 0.5 # [-0.5, 0.5) coarse_field *= 2 # [-1, 1] for dimension in range(3): # [-max_displacement, max_displacement) coarse_field[dimension, ...] *= max_displacement[dimension] # Set displacement to 0 at the borders for i in range(num_locked_borders): coarse_field[:, i, :] = 0 coarse_field[:, -1 - i, :] = 0 coarse_field[:, :, i] = 0 coarse_field[:, :, -1 - i] = 0 return coarse_field.unsqueeze(0) def _initialize(self): cp_grid = np.ceil(np.divide(self._image_size, self._stride)).astype(dtype=int) # new image size after convolution inner_image_size = np.multiply(self._stride, cp_grid) - (self._stride - 1) # add one control point at each side cp_grid = cp_grid + 2 # image size with additional control points new_image_size = np.multiply(self._stride, cp_grid) - (self._stride - 1) # center image between control points image_size_diff = inner_image_size - self._image_size image_size_diff_floor = np.floor((np.abs(image_size_diff)/2))*np.sign(image_size_diff) self._crop_start = image_size_diff_floor + np.remainder(image_size_diff, 2)*np.sign(image_size_diff) self._crop_end = image_size_diff_floor # create transformation parameters if self.rnd_grid_params is None: cp_grid = [1, self._dim] + cp_grid.tolist() self.trans_parameters = Parameter(th.Tensor(*cp_grid)) self.trans_parameters.data.fill_(0) else: self.trans_parameters = Parameter(self.get_coarse_field(cp_grid, self.rnd_grid_params['max_displacement'], self.rnd_grid_params['num_locked_borders'])) # copy to gpu if needed self.to(dtype=self._dtype, device=self._device) # convert to integer self._padding = self._padding.astype(dtype=int).tolist() self._stride = self._stride.astype(dtype=int).tolist() self._crop_start = self._crop_start.astype(dtype=int) self._crop_end = self._crop_end.astype(dtype=int) size = [1, 1] + new_image_size.astype(dtype=int).tolist() self._displacement_tmp = th.empty(*size, dtype=self._dtype, device=self._device) size = [1, 1] + self._image_size.astype(dtype=int).tolist() self._displacement = th.empty(*size, dtype=self._dtype, device=self._device) """ bspline kernel transformation """ class ParameterizedBsplineTransformation(_ParameterizedKernelTransformation): def __init__(self, image_size, sigma, rnd_grid_params=None, diffeomorphic=False, order=2, dtype=th.float32, device='cpu'): super(ParameterizedBsplineTransformation, self).__init__(image_size, rnd_grid_params, diffeomorphic, dtype, device) self._stride = np.array(sigma) # compute bspline kernel self._kernel = utils.bspline_kernel(sigma, dim=self._dim, order=order, asTensor=True, dtype=dtype) self._padding = (np.array(self._kernel.size()) - 1) / 2 self._kernel.unsqueeze_(0).unsqueeze_(0) self._kernel = self._kernel.expand(self._dim, *((np.ones(self._dim + 1, dtype=int)*-1).tolist())) self._kernel = self._kernel.to(dtype=dtype, device=self._device) self._initialize() class RandomElasticDeformation(nn.Module): def __init__( self, num_control_points: Union[int, Tuple[int, int, int]] = 7, max_displacement: Union[float, Tuple[float, float, float]] = 7.5, locked_borders: int = 2, ): super().__init__() self.num_control_points = to_tuple(num_control_points, length=3) self.parse_control_points(self.num_control_points) self.max_displacement = to_tuple(max_displacement, length=3) self.parse_max_displacement(self.max_displacement) self.num_locked_borders = locked_borders if locked_borders not in (0, 1, 2): raise ValueError('locked_borders must be 0, 1, or 2') if locked_borders == 2 and 4 in self.num_control_points: message = ( 'Setting locked_borders to 2 and using less than 5 control' 'points results in an identity transform. Lock fewer borders' ' or use more control points.' ) raise ValueError(message) self.bspline_params = {'max_displacement':self.max_displacement, 'num_locked_borders':self.num_locked_borders} @staticmethod def parse_control_points( num_control_points: Tuple[int, int, int], ) -> None: for axis, number in enumerate(num_control_points): if not isinstance(number, int) or number < 4: message = ( f'The number of control points for axis {axis} must be' f' an integer greater than 3, not {number}' ) raise ValueError(message) @staticmethod def parse_max_displacement( max_displacement: Tuple[float, float, float], ) -> None: for axis, number in enumerate(max_displacement): if not isinstance(number, Number) or number < 0: message = ( 'The maximum displacement at each control point' f' for axis {axis} must be' f' a number greater or equal to 0, not {number}' ) raise ValueError(message) """ Images: shape of [N,D,H,W] or [N,H,W] """ def forward(self, images): bspline_transform = ParameterizedBsplineTransformation(images.size()[2:], #ignore batch and channel dim sigma=self.num_control_points, rnd_grid_params=self.bspline_params, diffeomorphic=True, order=SPLINE_ORDER, device=images.device) displacement = bspline_transform.get_displacement() inv_displacement = bspline_transform.get_inverse_displacement() warped_images = warp_image(images, displacement, multi=True) return warped_images, displacement, inv_displacement
9,147
40.022422
163
py
DDoS
DDoS-master/utils/datasets_dyn.py
# from __future__ import self.logger.debug_function, division import fnmatch import glob import os import sys from random import randint, random, seed import nibabel import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data import torchvision.transforms as transforms from torch.utils.data import Dataset from utils.customutils import createCenterRatioMask, performUndersampling __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" torch.manual_seed(2020) np.random.seed(2020) seed(2020) class SRDataset(Dataset): def __init__(self,logger, patch_size, dir_path, label_dir_path, stride_depth=16, stride_length=32, stride_width=32, Size=4000, fly_under_percent=None, patch_size_us=None, return_coords=False, pad_patch=True, pre_interpolate=None, norm_data=True, pre_load=False, dyn=True, noncumulative=False): self.patch_size = patch_size #-1 = full vol self.stride_depth = stride_depth self.stride_length = stride_length self.stride_width = stride_width self.size = Size self.logger = logger self.fly_under_percent = fly_under_percent #if None, then use already undersampled data. Gets priority over patch_size_us. They are both mutually exclusive self.return_coords = return_coords self.pad_patch = pad_patch self.pre_interpolate = pre_interpolate if patch_size == patch_size_us: patch_size_us = None if patch_size!=-1 and patch_size_us is not None: stride_length_us = stride_length // (patch_size//patch_size_us) stride_width_us = stride_width // (patch_size//patch_size_us) self.stride_length_us = stride_length_us self.stride_width_us = stride_width_us elif patch_size==-1: patch_size_us = None if self.fly_under_percent is not None: patch_size_us = None self.patch_size_us = patch_size_us #If already downsampled data is supplied, then this can be used. Calculate already based on the downsampling size. self.norm_data = norm_data self.pre_load = pre_load self.dyn = dyn self.noncumulative = noncumulative self.pre_loaded_lbl = {} self.pre_loaded_img = {} if not self.norm_data: print("No Norm") #TODO remove # Constants self.IMAGE_FILE_NAME = "imageFilename" self.IMAGE_FILE_SHAPE = "imageFileShape" self.IMAGE_FILE_MAXVAL = "imageFileMaxVal" self.LABEL_FILE_NAME = "labelFilename" self.LABEL_FILE_SHAPE = "labelFileShape" self.LABEL_FILE_MAXVAL = "labelFileMaxVal" self.LABEL_PREV_FILE_NAME = "labelPrevFilename" self.LABEL_PREV_FILE_SHAPE = "labelPrevFileShape" self.LABEL_PREV_FILE_MAXVAL = "labelPrevFileMaxVal" self.STARTINDEX_DEPTH = "startIndex_depth" self.STARTINDEX_LENGTH = "startIndex_length" self.STARTINDEX_WIDTH = "startIndex_width" self.STARTINDEX_DEPTH_US = "startIndex_depth_us" self.STARTINDEX_LENGTH_US = "startIndex_length_us" self.STARTINDEX_WIDTH_US = "startIndex_width_us" self.trans = transforms.ToTensor() # used to convert tiffimagefile to tensor dataDict = { self.IMAGE_FILE_NAME: [], self.IMAGE_FILE_SHAPE: [], self.IMAGE_FILE_MAXVAL:[], self.LABEL_FILE_NAME: [], self.LABEL_FILE_SHAPE: [], self.LABEL_FILE_MAXVAL:[], self.STARTINDEX_DEPTH: [],self.STARTINDEX_LENGTH: [],self.STARTINDEX_WIDTH: [], self.STARTINDEX_DEPTH_US: [],self.STARTINDEX_LENGTH_US: [],self.STARTINDEX_WIDTH_US: []} column_names = [ self.IMAGE_FILE_NAME, self.IMAGE_FILE_SHAPE, self.IMAGE_FILE_MAXVAL, self.LABEL_FILE_NAME, self.LABEL_FILE_SHAPE, self.LABEL_FILE_MAXVAL, self.STARTINDEX_DEPTH, self.STARTINDEX_LENGTH,self.STARTINDEX_WIDTH, self.STARTINDEX_DEPTH_US, self.STARTINDEX_LENGTH_US,self.STARTINDEX_WIDTH_US] self.data = pd.DataFrame(columns=column_names) files_us = glob.glob(dir_path+'/**/*.nii', recursive = True) files_us += glob.glob(dir_path+'/**/*.nii.gz', recursive = True) for imageFileName in files_us: labelFileName = imageFileName.replace(dir_path[:-1], label_dir_path[:-1]) #[:-1] is needed to remove the trailing slash for shitty windows if imageFileName == labelFileName: sys.exit('Input and Output save file') if not(os.path.isfile(imageFileName) and os.path.isfile(labelFileName)): #trick to include the other file extension if labelFileName.endswith('.nii.nii.gz'): labelFileName = labelFileName.replace('.nii.nii.gz', '.nii.gz') elif labelFileName.endswith('.nii.gz'): labelFileName = labelFileName.replace('.nii.gz', '.nii') else: labelFileName = labelFileName.replace('.nii', '.nii.gz') #check again, after replacing the file extension if not(os.path.isfile(imageFileName) and os.path.isfile(labelFileName)): self.logger.debug("skipping file as label for the corresponding image doesn't exist :"+ str(imageFileName)) continue imageFile = nibabel.load(imageFileName) # shape (Length X Width X Depth X Channels) header_shape_us = imageFile.header.get_data_shape() imageFile_data = imageFile.get_data() imageFile_max = imageFile_data.max() labelFile = nibabel.load(labelFileName) # shape (Length X Width X Depth X Channels) - changed to label file name as input image can have different (lower) size header_shape = labelFile.header.get_data_shape() labelFile_data = labelFile.get_data() labelFile_max = labelFile_data.max() self.logger.debug(header_shape) n_depth,n_length,n_width = header_shape[2],header_shape[0],header_shape[1] # gives depth which is no. of slices n_depth_us,n_length_us,n_width_us = header_shape_us[2],header_shape_us[0],header_shape_us[1] # gives depth which is no. of slices if self.pre_load: self.pre_loaded_img[imageFileName] = imageFile_data self.pre_loaded_lbl[labelFileName] = labelFile_data if patch_size!=1 and (n_depth<patch_size or n_length<patch_size or n_width<patch_size): self.logger.debug("skipping file because of its size being less than the patch size :"+ str(imageFileName)) continue ############ Following the fully sampled size if patch_size != -1: depth_i =0 ranger_depth = int((n_depth-patch_size)/stride_depth)+1 for depth_index in range(ranger_depth if n_depth%patch_size==0 else ranger_depth+1): # iterate through the whole image voxel, and extract patch length_i = 0 # self.logger.debug("depth") # self.logger.debug(depth_i) ranger_length = int((n_length-patch_size)/stride_length)+1 for length_index in range(ranger_length if n_length%patch_size==0 else ranger_length+1): width_i = 0 # self.logger.debug("length") # self.logger.debug(length_i) ranger_width = int((n_width - patch_size)/stride_width)+1 for width_index in range(ranger_width if n_width%patch_size==0 else ranger_width+1): # self.logger.debug("width") # self.logger.debug(width_i) dataDict[self.IMAGE_FILE_NAME].append(imageFileName) dataDict[self.IMAGE_FILE_SHAPE].append(header_shape_us) dataDict[self.IMAGE_FILE_MAXVAL].append(imageFile_max) dataDict[self.LABEL_FILE_NAME].append(labelFileName) dataDict[self.LABEL_FILE_SHAPE].append(header_shape) dataDict[self.LABEL_FILE_MAXVAL].append(labelFile_max) dataDict[self.STARTINDEX_DEPTH].append(depth_i) dataDict[self.STARTINDEX_LENGTH].append(length_i) dataDict[self.STARTINDEX_WIDTH].append(width_i) if patch_size_us is None: #data is zero padded dataDict[self.STARTINDEX_DEPTH_US].append(depth_i) dataDict[self.STARTINDEX_LENGTH_US].append(length_i) dataDict[self.STARTINDEX_WIDTH_US].append(width_i) width_i += stride_width length_i += stride_length depth_i += stride_depth else: dataDict[self.IMAGE_FILE_NAME].append(imageFileName) dataDict[self.IMAGE_FILE_SHAPE].append(header_shape_us) dataDict[self.IMAGE_FILE_MAXVAL].append(imageFile_max) dataDict[self.LABEL_FILE_NAME].append(labelFileName) dataDict[self.LABEL_FILE_SHAPE].append(header_shape) dataDict[self.LABEL_FILE_MAXVAL].append(labelFile_max) dataDict[self.STARTINDEX_DEPTH].append(0) dataDict[self.STARTINDEX_LENGTH].append(0) dataDict[self.STARTINDEX_WIDTH].append(0) dataDict[self.STARTINDEX_DEPTH_US].append(0) dataDict[self.STARTINDEX_LENGTH_US].append(0) dataDict[self.STARTINDEX_WIDTH_US].append(0) ############ Following the undersampled size, only if patch_size_us has been provied if patch_size_us is not None: depth_i =0 ranger_depth = int((n_depth_us-patch_size_us)/stride_depth)+1 for depth_index in range(ranger_depth if n_depth_us%patch_size_us==0 else ranger_depth+1): # iterate through the whole image voxel, and extract patch length_i = 0 # self.logger.debug("depth") # self.logger.debug(depth_i) ranger_length = int((n_length_us-patch_size_us)/stride_length_us)+1 for length_index in range(ranger_length if n_length_us%patch_size_us==0 else ranger_length+1): width_i = 0 # self.logger.debug("length") # self.logger.debug(length_i) ranger_width = int((n_width_us - patch_size_us)/stride_width_us)+1 for width_index in range(ranger_width if n_width_us%patch_size_us==0 else ranger_width+1): # self.logger.debug("width") # self.logger.debug(width_i) dataDict[self.STARTINDEX_DEPTH_US].append(depth_i) dataDict[self.STARTINDEX_LENGTH_US].append(length_i) dataDict[self.STARTINDEX_WIDTH_US].append(width_i) width_i += stride_width_us length_i += stride_length_us depth_i += stride_depth self.data = pd.DataFrame.from_dict(dataDict) self.logger.debug(len(self.data)) if self.dyn: inp_dicts, files_inp = self._process_TPs(files_us) files_gt = glob.glob(label_dir_path+'/**/*.nii', recursive = True) files_gt += glob.glob(label_dir_path+'/**/*.nii.gz', recursive = True) gt_dicts, _ = self._process_TPs(files_gt) tp_dicts = [] for filename in files_inp: inp_files = [d for d in inp_dicts if filename in d['filename']] gt_files = [d for d in gt_dicts if filename in d['filename']] tps = list(set(dic["tp"] for dic in inp_files)) tp_prev = tps.pop(0) for tp in tps: # inp_tp_prev = [d for d in inp_files if tp_prev == d['tp']] gt_tp_prev = [d for d in gt_files if tp_prev == d['tp']] inp_tp = [d for d in inp_files if tp == d['tp']] # gt_tp = [d for d in gt_files if tp == d['tp']] tp_prev = tp if not self.noncumulative else tp_prev gt_tp_prev_datum = self.data[self.data[self.LABEL_FILE_NAME] == gt_tp_prev[0]['path']] tp_dict = { self.LABEL_PREV_FILE_NAME: gt_tp_prev[0]['path'], self.LABEL_PREV_FILE_MAXVAL: gt_tp_prev_datum[self.LABEL_FILE_MAXVAL].iloc[0], self.LABEL_PREV_FILE_SHAPE: gt_tp_prev_datum[self.LABEL_FILE_SHAPE].iloc[0], # "inp_tp_prev": inp_tp_prev[0]['path'], # "gt": gt_tp[0]['path'], "inp_tpkey": inp_tp[0]['path'], "subject_filename": filename, "tpID":tp } tp_dicts.append(tp_dict) self.tp_data = pd.DataFrame.from_dict(tp_dicts) self.data = pd.merge(self.tp_data, self.data, how="left", left_on="inp_tpkey", right_on=self.IMAGE_FILE_NAME) if Size is not None and len(self.data) > Size: self.logger.debug('Dataset is larger tham supplied size. Choosing s subset randomly of size '+str(Size)) self.data = self.data.sample(n = Size, replace = False, random_state=2020) if patch_size!=-1 and fly_under_percent is not None: self.mask = createCenterRatioMask(np.zeros((patch_size,patch_size,patch_size)), fly_under_percent) def _process_TPs(self, files): f_dicts = [] for f in files: f_info = {"path": f} f_parts = os.path.normpath(f).split(os.sep) tp = fnmatch.filter(f_parts, "TP*")[0] f_info["filename"] = "_".join(f_parts[f_parts.index(tp)+1:]) f_info["tp"] = int(tp[2:]) f_dicts.append(f_info) f_dicts = sorted(f_dicts, key=lambda k: k['tp']) filenames = list(set(dic["filename"] for dic in f_dicts)) return f_dicts, filenames def __len__(self): return len(self.data) def __getitem__(self, index): imageFile_max = self.data.iloc[index][self.IMAGE_FILE_MAXVAL] labelFile_max = self.data.iloc[index][self.LABEL_FILE_MAXVAL] if self.pre_load: groundTruthImages = self.pre_loaded_lbl[self.data.iloc[index][self.LABEL_FILE_NAME]] groundTruthImages_handler = groundTruthImages else: groundTruthImages = nibabel.load(self.data.iloc[index][self.LABEL_FILE_NAME]) groundTruthImages_handler = groundTruthImages.dataobj startIndex_depth = self.data.iloc[index][self.STARTINDEX_DEPTH] startIndex_length = self.data.iloc[index][self.STARTINDEX_LENGTH] startIndex_width = self.data.iloc[index][self.STARTINDEX_WIDTH] start_coords = [(startIndex_depth, startIndex_length, startIndex_width)] if self.patch_size_us is not None: startIndex_depth_us = self.data.iloc[index][self.STARTINDEX_DEPTH_US] startIndex_length_us = self.data.iloc[index][self.STARTINDEX_LENGTH_US] startIndex_width_us = self.data.iloc[index][self.STARTINDEX_WIDTH_US] start_coords = start_coords + [(startIndex_depth_us, startIndex_length_us, startIndex_width_us)] if self.patch_size != -1: if len(groundTruthImages.shape) == 4: #don't know why, but an additional dim is noticed in some of the fully-sampled NIFTIs target_voxel = groundTruthImages_handler[startIndex_length:startIndex_length+self.patch_size, startIndex_width:startIndex_width+self.patch_size, 0, startIndex_depth:startIndex_depth+self.patch_size]#.squeeze() else: target_voxel = groundTruthImages_handler[startIndex_length:startIndex_length+self.patch_size, startIndex_width:startIndex_width+self.patch_size, startIndex_depth:startIndex_depth+self.patch_size]#.squeeze() else: if len(groundTruthImages.shape) == 4: #don't know why, but an additional dim is noticed in some of the fully-sampled NIFTIs target_voxel = groundTruthImages_handler[:, :, 0, :]#.squeeze() else: target_voxel = groundTruthImages_handler[...]#.squeeze() if self.fly_under_percent is not None: if self.patch_size != -1: voxel = abs(performUndersampling(np.array(target_voxel).copy(), mask=self.mask, zeropad=False)) voxel = voxel[...,::2] #2 for 25% - harcoded. TODO fix it else: mask = createCenterRatioMask(target_voxel, self.fly_under_percent) voxel = abs(performUndersampling(np.array(target_voxel).copy(), mask=mask, zeropad=False)) voxel = voxel[...,::2] #2 for 25% - harcoded. TODO fix it else: if self.pre_load: images = self.pre_loaded_img[self.data.iloc[index][self.IMAGE_FILE_NAME]] images_handler = images else: images = nibabel.load(self.data.iloc[index][self.IMAGE_FILE_NAME]) images_handler = images.dataobj images = nibabel.load(self.data.iloc[index][self.IMAGE_FILE_NAME]) if self.patch_size_us is not None: voxel = images_handler[startIndex_length_us:startIndex_length_us+self.patch_size_us, startIndex_width_us:startIndex_width_us+self.patch_size_us, startIndex_depth_us:startIndex_depth_us+self.patch_size]#.squeeze() else: if self.patch_size != -1 and self.pre_interpolate is None: voxel = images_handler[startIndex_length:startIndex_length+self.patch_size, startIndex_width:startIndex_width+self.patch_size, startIndex_depth:startIndex_depth+self.patch_size]#.squeeze() else: voxel = images_handler[...] target_slices = np.moveaxis(np.array(target_voxel), -1, 0).astype( np.float32) # get slices in range, convert to array, change axis of depth (because nibabel gives LXWXD, but we need in DXLXW) slices = np.moveaxis(np.array(voxel),-1, 0).astype(np.float32) #get slices in range, convert to array, change axis of depth (because nibabel gives LXWXD, but we need in DXLXW) patch = torch.from_numpy(slices) # patch = patch/torch.max(patch)# normalisation if self.pre_interpolate: patch = F.interpolate(patch.unsqueeze(0).unsqueeze(0), size=tuple(np.roll(groundTruthImages.shape, 1)), mode=self.pre_interpolate, align_corners=False).squeeze() if self.patch_size != -1: patch = patch[startIndex_depth:startIndex_depth+self.patch_size, startIndex_length:startIndex_length+self.patch_size, startIndex_width:startIndex_width+self.patch_size] if self.norm_data: patch = patch/imageFile_max# normalisation targetPatch = torch.from_numpy(target_slices) # targetPatch = targetPatch/torch.max(targetPatch) if self.norm_data: targetPatch = targetPatch/labelFile_max if self.dyn: if self.pre_load: prevTPImages = self.pre_loaded_lbl[self.data.iloc[index][self.LABEL_PREV_FILE_NAME]] prevTPImages_handler = prevTPImages else: prevTPImages = nibabel.load(self.data.iloc[index][self.LABEL_PREV_FILE_NAME]) prevTPImages_handler = prevTPImages.dataobj if self.patch_size != -1: if len(prevTPImages.shape) == 4: #don't know why, but an additional dim is noticed in some of the fully-sampled NIFTIs prevTP_voxel = prevTPImages_handler[startIndex_length:startIndex_length+self.patch_size, startIndex_width:startIndex_width+self.patch_size, 0, startIndex_depth:startIndex_depth+self.patch_size]#.squeeze() else: prevTP_voxel = prevTPImages_handler[startIndex_length:startIndex_length+self.patch_size, startIndex_width:startIndex_width+self.patch_size, startIndex_depth:startIndex_depth+self.patch_size]#.squeeze() else: if len(prevTPImages.shape) == 4: #don't know why, but an additional dim is noticed in some of the fully-sampled NIFTIs prevTP_voxel = prevTPImages_handler[:, :, 0, :]#.squeeze() else: prevTP_voxel = prevTPImages_handler[...]#.squeeze() prevTP_slices = np.moveaxis(np.array(prevTP_voxel), -1, 0).astype(np.float32) prevTPPatch = torch.from_numpy(prevTP_slices) # prevTPPatch = prevTPPatch/torch.max(prevTPPatch) if self.norm_data: prevTPPatch = prevTPPatch/self.data.iloc[index][self.LABEL_PREV_FILE_MAXVAL] #to deal the patches which has smaller size if self.pad_patch: pad = () for dim in range(len(targetPatch.shape)): pad_needed = self.patch_size - targetPatch.shape[dim] pad_dim = (pad_needed//2, pad_needed-(pad_needed//2)) pad += pad_dim if self.patch_size_us is None and self.fly_under_percent is None: pad_us = pad else: pad_us = () if self.patch_size_us is None and self.fly_under_percent is not None: real_patch_us = int(self.patch_size * (self.fly_under_percent*2)) #TODO: works for 25%, but not sure about others. Need to fix the logic else: real_patch_us = self.patch_size_us for dim in range(len(patch.shape)): pad_needed = real_patch_us - patch.shape[dim] pad_dim = (pad_needed//2, pad_needed-(pad_needed//2)) pad_us += pad_dim patch = F.pad(patch, pad_us[::-1]) #tuple has to be reveresed before using it for padding. As the tuple contains in DHW manner, and input is needed as WHD mannger targetPatch = F.pad(targetPatch, pad[::-1]) if self.dyn: prevTPPatch = F.pad(prevTPPatch, pad[::-1]) else: pad = None if self.dyn: patch = torch.stack([prevTPPatch, patch]) else: patch = patch.unsqueeze(0) targetPatch = targetPatch.unsqueeze(0) if self.return_coords is True: lblfilename = self.data.iloc[index][self.LABEL_FILE_NAME] return patch, targetPatch, np.array(start_coords), os.path.basename(os.path.dirname(lblfilename)) +"_"+os.path.basename(lblfilename), np.array([(self.data.iloc[index][self.LABEL_FILE_SHAPE]), (self.data.iloc[index][self.IMAGE_FILE_SHAPE])]), np.array(pad[::-1]) if pad is not None else -1 else: return patch, targetPatch # DATASET_FOLDER = "/nfs1/schatter/Chimp/data_3D_sr/" # DATASET_FOLDER = r"S:\MEMoRIAL_SharedStorage_M1.2+4+7\Data\Skyra\unet_3D_sr" # US_Folder = 'Center25Mask' # patch_size=64 # import logging # logger = logging.getLogger('x') # traindataset = SRDataset(logger, patch_size, DATASET_FOLDER + '/usVal/' + US_Folder + '/', DATASET_FOLDER + '/hrVal/', stride_depth =64, # stride_length=64, stride_width=64,Size =10, patch_size_us=None, return_coords=True) # train_loader = torch.utils.data.DataLoader(traindataset, batch_size=8, shuffle=True) # for epoch in range(3): # for batch_index, (local_batch, local_labels) in enumerate(train_loader): # self.logger.debug(str(epoch) + " "+ str(batch_index))
24,444
54.938215
300
py
DDoS
DDoS-master/utils/data.py
import fnmatch import os import random from glob import glob import numpy as np import torch import torchio as tio from torchio.data.io import read_image from .motion import MotionCorrupter __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" def create_trainDS(path, p=1, **kwargs): files = glob(path+"/**/*.nii", recursive=True) + glob(path+"/**/*.nii.gz", recursive=True) subjects = [] for file in files: subjects.append(tio.Subject( im=tio.ScalarImage(file), filename=os.path.basename(file), )) moco = MotionCorrupter(**kwargs) transforms = [ tio.Lambda(moco.perform, p = p) ] transform = tio.Compose(transforms) subjects_dataset = tio.SubjectsDataset(subjects, transform=transform) return subjects_dataset def create_trainDS_precorrupt(path_gt, path_corrupt, p=1, norm_mode=0): files = glob(path_gt+"/**/*.nii", recursive=True) + glob(path_gt+"/**/*.nii.gz", recursive=True) subjects = [] for file in files: subjects.append(tio.Subject( im=tio.ScalarImage(file), filename=os.path.basename(file), )) transforms = [ ReadCorrupted(path_corrupt=path_corrupt, p=p, norm_mode=norm_mode) ] transform = tio.Compose(transforms) subjects_dataset = tio.SubjectsDataset(subjects, transform=transform) return subjects_dataset def createTIODS(path_gt, path_corrupt, is_infer=False, p=1, transforms = [], **kwargs): files_gt = glob(path_gt+"/**/*.nii", recursive=True) + glob(path_gt+"/**/*.nii.gz", recursive=True) if path_corrupt: files_inp = glob(path_corrupt+"/**/*.nii", recursive=True) + glob(path_corrupt+"/**/*.nii.gz", recursive=True) corruptFly = False else: files_inp = files_gt.copy() corruptFly = True subjects = [] for file in files_inp: filename = os.path.basename(file) gt_files = [f for f in files_gt if filename in f] if len(gt_files) > 0: gt_path = gt_files[0] files_gt.remove(gt_path) subjects.append(tio.Subject( gt=tio.ScalarImage(gt_path), inp=tio.ScalarImage(file), filename=filename, tag="CorruptNGT", )) if corruptFly: moco = MotionCorrupter(**kwargs) transforms.append(tio.Lambda(moco.perform, p = p)) transform = tio.Compose(transforms) subjects_dataset = tio.SubjectsDataset(subjects, transform=transform) return subjects_dataset def __process_TPs(files): f_dicts = [] for f in files: f_info = {"path": f} f_parts = os.path.normpath(f).split(os.sep) tp = fnmatch.filter(f_parts, "TP*")[0] f_info["filename"] = "_".join(f_parts[f_parts.index(tp)+1:]) f_info["tp"] = int(tp[2:]) f_dicts.append(f_info) f_dicts = sorted(f_dicts, key=lambda k: k['tp']) filenames = list(set(dic["filename"] for dic in f_dicts)) return f_dicts, filenames class ProcessTIOSubsTPs(): def __init__(self): pass def __call__(self, subject): gt_tp_prev = subject['gt_tp_prev'][tio.DATA] inp_tp = subject['inp'][tio.DATA] subject["inp"][tio.DATA] = torch.cat([gt_tp_prev, inp_tp], dim=0) return subject def createTIODynDS(path_gt, path_corrupt, is_infer=False, p=1, transforms = [], **kwargs): files_gt = glob(path_gt+"/**/*.nii", recursive=True) + glob(path_gt+"/**/*.nii.gz", recursive=True) if path_corrupt: files_inp = glob(path_corrupt+"/**/*.nii", recursive=True) + glob(path_corrupt+"/**/*.nii.gz", recursive=True) corruptFly = False else: files_inp = files_gt.copy() corruptFly = True subjects = [] inp_dicts, files_inp = __process_TPs(files_inp) gt_dicts, _ = __process_TPs(files_gt) for filename in files_inp: inp_files = [d for d in inp_dicts if filename in d['filename']] gt_files = [d for d in gt_dicts if filename in d['filename']] tps = list(set(dic["tp"] for dic in inp_files)) tp_prev = tps.pop(0) for tp in tps: inp_tp_prev = [d for d in inp_files if tp_prev == d['tp']] gt_tp_prev = [d for d in gt_files if tp_prev == d['tp']] inp_tp = [d for d in inp_files if tp == d['tp']] gt_tp = [d for d in gt_files if tp == d['tp']] tp_prev = tp if len(gt_tp_prev) > 0 and len(gt_tp) > 0: subjects.append(tio.Subject( gt_tp_prev=tio.ScalarImage(gt_tp_prev[0]['path']), inp_tp_prev=tio.ScalarImage(inp_tp_prev[0]['path']), gt=tio.ScalarImage(gt_tp[0]['path']), inp=tio.ScalarImage(inp_tp[0]['path']), filename=filename, tp=tp, tag="CorruptNGT", )) else: print("Warning: Not Implemented if GT is missing. Skipping Sub-TP.") continue if corruptFly: moco = MotionCorrupter(**kwargs) transforms.append(tio.Lambda(moco.perform, p = p)) transforms.append(ProcessTIOSubsTPs()) transform = tio.Compose(transforms) subjects_dataset = tio.SubjectsDataset(subjects, transform=transform) return subjects_dataset def create_patchDS(train_subs, val_subs, patch_size, patch_qlen, patch_per_vol, inference_strides): train_queue = None val_queue = None if train_subs is not None: sampler = tio.data.UniformSampler(patch_size) train_queue = tio.Queue( subjects_dataset=train_subs, max_length=patch_qlen, samples_per_volume=patch_per_vol, sampler=sampler, num_workers=0, start_background=True ) if val_subs is not None: overlap = np.subtract(patch_size, inference_strides) grid_samplers = [] for i in range(len(val_subs)): grid_sampler = tio.inference.GridSampler(val_subs[i], patch_size, overlap) grid_samplers.append(grid_sampler) val_queue = torch.utils.data.ConcatDataset(grid_samplers) return train_queue, val_queue class ReadCorrupted(tio.transforms.Transform): def __init__(self, path_corrupt, p=1, norm_mode=0): super().__init__(p=p) self.path_corrupt=path_corrupt self.norm_mode = norm_mode def apply_transform(self, subject): corrupted_query = subject.filename.split(".")[0]+"*" files = glob(self.path_corrupt+"/**/"+corrupted_query, recursive=True) corrupt_path = files[random.randint(0, len(files)-1)] transformed, _ = read_image(corrupt_path) vol = subject['im'][tio.DATA].float() transformed = transformed.float() if self.norm_mode==1: vol = vol/vol.max() transformed = transformed/transformed.max() elif self.norm_mode==2: vol = (vol-vol.min())/(vol.max()-vol.min()) transformed = (transformed-transformed.min())/(transformed.max()-transformed.min()) subject['im'][tio.DATA] = torch.cat([vol,transformed], 0) return subject
7,985
38.93
118
py
DDoS
DDoS-master/utils/interpnorm_vols.py
import os import random from glob import glob import nibabel as nib import numpy as np import torch import torch.nn.functional as F from tqdm import tqdm __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" path_woZPad = "/project/schatter/Chimp/Data/usCHAOSWoT2/Center6p25MaskWoPad" path_GT = "/project/schatter/Chimp/Data/hrCHAOS" outpath_interpNorm = "/project/schatter/Chimp/Data/usCHAOSWoT2/Center6p25MaskWoPad_Tri_Norm" outpath_GTNorm = "/project/schatter/Chimp/Data/hrCHAOS_Norm" files = glob(path_woZPad+"/**/*.nii", recursive=True) + glob(path_woZPad+"/**/*.nii.gz", recursive=True) files_gt = glob(path_GT+"/**/*.nii", recursive=True) + glob(path_GT+"/**/*.nii.gz", recursive=True) def SaveNIFTI(data, path): os.makedirs(os.path.split(path)[0], exist_ok=True) nib.save(nib.Nifti1Image(data, np.eye(4)), path) for file in tqdm(files): filename = os.path.basename(file) gt_files = [f for f in files_gt if filename in f] gt_path = gt_files[0] gt = nib.load(gt_path).dataobj[...] gt_max = gt.max() gt = (gt.astype(np.float32))/gt_max SaveNIFTI(gt, gt_path.replace(path_GT, outpath_GTNorm)) images = nib.load(file).dataobj[...] img_max = images.max() images = torch.from_numpy(images.astype(np.float32)) images = F.interpolate(images.unsqueeze(0).unsqueeze(0), mode="trilinear", size=gt.shape).squeeze() images = (images/img_max).numpy() SaveNIFTI(images, file.replace(path_woZPad, outpath_interpNorm))
1,778
35.306122
110
py
DDoS
DDoS-master/utils/utilities.py
import os from copy import deepcopy from statistics import median import random import nibabel as nib import numpy as np import torch import torch.nn.functional as F import torchcomplex.nn.functional as cF import torchio as tio import torchvision.utils as vutils from scipy import ndimage import wandb from pynufft import NUFFT from sewar.full_ref import ssim as SSIM2DCalc from sewar.full_ref import uqi as UQICalc from skimage.metrics import (normalized_root_mse, peak_signal_noise_ratio, structural_similarity) from torchcomplex.utils.signaltools import resample from tricorder.math.transforms.fourier import fftNc_np, ifftNc_np from utils.elastic_transform import RandomElasticDeformation, warp_image __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" class Interpolator(): def __init__(self, mode=None): if mode in ["sinc", "nearest", "linear", "bilinear", "bicubic", "trilinear", "area"]: self.mode = mode else: self.mode = None def perform_sinc(self, images, out_shape): axes = np.argwhere(np.equal(images.shape[2:], out_shape) == False).squeeze(1) #2 dims for batch and channel out_shape = [out_shape[i] for i in axes] return resample(images, out_shape, axis=axes+2) #2 dims for batch and channel def __call__(self, images, out_shape): if self.mode is None: return images elif images.is_complex(): return cF.interpolate(images, size=out_shape, mode=self.mode) elif self.mode == "sinc": return self.perform_sinc(images, out_shape) else: return F.interpolate(images, size=out_shape, mode=self.mode) def tensorboard_images(writer, inputs, outputs, targets, epoch, section='train'): writer.add_image('{}/output'.format(section), vutils.make_grid(outputs[0, 0, ...], normalize=True, scale_each=True), epoch) if inputs is not None: writer.add_image('{}/input'.format(section), vutils.make_grid(inputs[0, 0, ...], normalize=True, scale_each=True), epoch) if targets is not None: writer.add_image('{}/target'.format(section), vutils.make_grid(targets[0, 0, ...], normalize=True, scale_each=True), epoch) def SaveNIFTI(data, file_path): """Save a NIFTI file using given file path from an array Using: NiBabel""" if(np.iscomplex(data).any()): data = abs(data) nii = nib.Nifti1Image(data, np.eye(4)) nib.save(nii, file_path) def sharpTP(vol, alpha=0.5): filteredVOL = ndimage.gaussian_filter(vol, 1) return vol + alpha * (vol - filteredVOL) def applyDCS(output, fully, under_mask=None, missing_mask=None, mat=None, isCartesian=True, norm=True): if norm: fully /= fully.max() output /= output.max() if isCartesian: fullyK = fftNc_np(fully, axes=(0,1)) underK = fullyK*under_mask outK = fftNc_np(output, axes=(0,1)) missingK = outK*missing_mask finalK = underK+missingK return abs(ifftNc_np(finalK, axes=(0,1))) else: om = mat['om'] invom = mat['invom'] fullom = mat['fullom'] dcfFullRes = mat['dcfFullRes'].squeeze() imageSize = fully.shape[0] baseresolution = imageSize*2 interpolationSize4NUFFT = 6 NufftObjOM = NUFFT() NufftObjInvOM = NUFFT() NufftObjFullOM = NUFFT() Nd = (baseresolution, baseresolution) # image size Kd = (baseresolution*2, baseresolution*2) # k-space size - TODO: multiply back by 2 Jd = (interpolationSize4NUFFT, interpolationSize4NUFFT) # interpolation size NufftObjOM.plan(om, Nd, Kd, Jd) NufftObjInvOM.plan(invom, Nd, Kd, Jd) NufftObjFullOM.plan(fullom, Nd, Kd, Jd) for slc in range(fully.shape[-1]): oversam_fully = np.zeros((baseresolution,baseresolution), dtype=fully.dtype) oversam_fully[imageSize//2:imageSize+imageSize//2,imageSize//2:imageSize+imageSize//2] = fully[...,slc] oversam_output = np.zeros((baseresolution,baseresolution), dtype=output.dtype) oversam_output[imageSize//2:imageSize+imageSize//2,imageSize//2:imageSize+imageSize//2] = output[...,slc] yUnder = NufftObjOM.forward(oversam_fully) yMissing = NufftObjInvOM.forward(oversam_output) yCorrected = np.concatenate((yUnder,yMissing)) yCorrected = np.multiply(dcfFullRes,yCorrected) oversam_output_corrected = NufftObjFullOM.adjoint(yCorrected) output_corrected = oversam_output_corrected[imageSize//2:imageSize+imageSize//2,imageSize//2:imageSize+imageSize//2] output[...,slc] = abs(output_corrected).astype(fully.dtype) return output def process_DDoS_SRPrev(SRPrev, start_coords, patch_size, pad, lr_imgs): for i in range(lr_imgs.shape[0]): (startIndex_depth, startIndex_length, startIndex_width) = start_coords[i][0].numpy() if patch_size != -1: prevTP_voxel = SRPrev[startIndex_length:startIndex_length+patch_size, startIndex_width:startIndex_width+patch_size, startIndex_depth:startIndex_depth+patch_size]#.squeeze() else: prevTP_voxel = SRPrev[...]#.squeeze() prevTP_slices = np.moveaxis(np.array(prevTP_voxel), -1, 0).astype(np.float32) prevTPPatch = torch.from_numpy(prevTP_slices) prevTPPatch = prevTPPatch/SRPrev.max() lr_imgs[i,0] = F.pad(prevTPPatch, tuple(pad[i])) return lr_imgs def process_valBatch(batch): inp = [] gt = [] gt_flag = [] for i in range(len(batch['tag'])): gt_flag.append(True) batch_tag = batch['tag'][i] if batch_tag == "CorruptNGT" or batch_tag == "GTOnly": inp.append(batch['inp'][tio.DATA][i,...]) gt.append(batch['gt'][tio.DATA][i,...]) elif batch_tag == "FlyCorrupt": gt.append(batch['im'][tio.DATA][i,0,...].unsqueeze(1)) if batch['im'][tio.DATA].shape[1] == 2: inp.append(batch['im'][tio.DATA][i,1,...].unsqueeze(1)) else: #Use motion free image inp.append(deepcopy(gt[-1])) elif batch_tag == "CorruptOnly": inp.append(batch['inp'][tio.DATA][i,...]) gt.append(batch['inp'][tio.DATA][i,...]) gt_flag[-1] = False inp = torch.stack(inp,dim=0) gt = torch.stack(gt,dim=0) return inp, gt, gt_flag def getSSIM(gt, out, gt_flag=None, data_range=1): if gt_flag is None: gt_flag = np.ones(gt.shape[0]) vals = [] for i in range(gt.shape[0]): if not bool(gt_flag[i]): continue for j in range(gt.shape[1]): vals.append(structural_similarity(gt[i,j,...], out[i,j,...], data_range=data_range)) return median(vals) def calc_metircs(gt, out, tag): ssim, ssimMAP = structural_similarity(gt, out, data_range=1, full=True) nrmse = normalized_root_mse(gt, out) psnr = peak_signal_noise_ratio(gt, out, data_range=1) uqi = UQICalc(gt, out) metrics = { "SSIM"+tag: ssim, "NRMSE"+tag: nrmse, "PSNR"+tag: psnr, "UQI"+tag: uqi } return metrics, ssimMAP def MinMax(data): return (data-data.min())/(data.max()-data.min()) def convert_image(img, source, target): """ Convert an image from a source format to a target format. :param img: image :param source: source format, one of 'pil' (PIL image), '[0, 1]' or '[-1, 1]' (pixel value ranges) :param target: target format, one of 'pil' (PIL image), '[0, 255]', '[0, 1]', '[-1, 1]' (pixel value ranges), 'imagenet-norm' (pixel values standardized by imagenet mean and std.), 'y-channel' (luminance channel Y in the YCbCr color format, used to calculate PSNR and SSIM) :return: converted image """ assert source in {'pil', '[0, 1]', '[-1, 1]'}, "Cannot convert from source format %s!" % source assert target in {'pil', '[0, 255]', '[0, 1]', '[-1, 1]', 'imagenet-norm', 'y-channel'}, "Cannot convert to target format %s!" % target # Convert from source to [0, 1] if source == 'pil': img = FT.to_tensor(img) elif source == '[0, 1]': pass # already in [0, 1] elif source == '[-1, 1]': img = (img + 1.) / 2. # Convert from [0, 1] to target if target == 'pil': img = FT.to_pil_image(img) elif target == '[0, 255]': img = 255. * img elif target == '[0, 1]': pass # already in [0, 1] elif target == '[-1, 1]': img = 2. * img - 1. elif target == 'imagenet-norm': if img.ndimension() == 3: img = (img - imagenet_mean) / imagenet_std elif img.ndimension() == 4: img = (img - imagenet_mean_cuda) / imagenet_std_cuda elif target == 'y-channel': # Based on definitions at https://github.com/xinntao/BasicSR/wiki/Color-conversion-in-SR # torch.dot() does not work the same way as numpy.dot() # So, use torch.matmul() to find the dot product between the last dimension of an 4-D tensor and a 1-D tensor img = torch.matmul(255. * img.permute(0, 2, 3, 1)[:, 4:-4, 4:-4, :], rgb_weights) / 255. + 16. return img class ResSaver(): def __init__(self, out_path, save_inp=False, save_out=True, analyse_out=True, do_norm=False): self.out_path = out_path self.save_inp = save_inp self.do_norm = do_norm self.save_out = save_out self.analyse_out = analyse_out def CalcNSave(self, out, inp, gt, outfolder, already_numpy=False): outpath = os.path.join(self.out_path, outfolder) os.makedirs(outpath, exist_ok=True) if not already_numpy: inp = inp.numpy() out = out.numpy() if self.save_out: SaveNIFTI(out, os.path.join(outpath, "out.nii.gz")) if self.save_inp: SaveNIFTI(inp, os.path.join(outpath, "inp.nii.gz")) if gt is not None: if not already_numpy: gt = gt.numpy() if self.do_norm: inp = convert_image(inp, source='[-1, 1]', target='[0, 1]') #inp/inp.max() gt = convert_image(gt, source='[-1, 1]', target='[0, 1]') #gt/gt.max() if self.analyse_out: out = convert_image(out, source='[-1, 1]', target='[0, 1]') #out/out.max() out_metrics, out_ssimMAP = calc_metircs(gt, out, tag="Out") SaveNIFTI(out_ssimMAP, os.path.join(outpath, "ssimMAPOut.nii.gz")) else: out_metrics = {} inp_metrics, inp_ssimMAP = calc_metircs(gt, inp, tag="Inp") SaveNIFTI(inp_ssimMAP, os.path.join(outpath, "ssimMAPInp.nii.gz")) metrics = {**out_metrics, **inp_metrics} return metrics #The WnB functions are here, but not been tested (even not finished) def WnB_ArtefactLog_DS(run, datasets, meta={}, names = ["training", "validation", "test"], description="Train-Val(-Test) Split"): raw_data = wandb.Artifact("DSSplit", type="dataset", description=description, metadata={"sizes": [len(dataset) for dataset in datasets], **meta}) for name, dataset in zip(names, datasets): with raw_data.new_file(name + ".npz", mode="wb") as file: np.savez(file, ds=dataset) run.log_artifact(raw_data) def WnB_ReadArtefact_DS(run, tag="latest", names = ["training", "validation", "test"]): raw_data_artifact = run.use_artifact('DSSplit:'+tag) raw_dataset = raw_data_artifact.download() datasets = [] for split in names: raw_split = np.load(os.path.join(raw_dataset, split + ".npz"))['ds'] datasets.append(raw_split) return datasets def WnB_ArtefactLog_Model(run, model, config, description="MyModel"): model_artifact = wandb.Artifact("Model", type="model", description=description, metadata=dict(config)) model.save("initialized_model.keras") model_artifact.add_file("initialized_model.keras") wandb.save("initialized_model.keras") run.log_artifact(model_artifact) def WnB_ReadArtefact_Model(run, tag="latest"): model_artifact = run.use_artifact('Model:'+tag) model_dir = model_artifact.download() model_path = os.path.join(model_dir, "initialized_model.keras") model = keras.models.load_model(model_path) model_config = model_artifact.metadata return model, model_config def deformOTF(input_batch): elastic = RandomElasticDeformation( num_control_points=random.choice([5, 6, 7]), max_displacement=random.uniform(0.7, 2.0), locked_borders=2 ) elastic.cuda() input_batch_transformed, _, _ = elastic(input_batch) input_batch_transformed = torch.nan_to_num(input_batch_transformed) return input_batch_transformed #/ torch.amax(input_batch_transformed, dim=[1,2,3,4])
13,876
38.991354
184
py
DDoS
DDoS-master/utils/datasets.py
# from __future__ import self.logger.debug_function, division import glob import os import sys from random import randint, random, seed import nibabel import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data import torchvision.transforms as transforms from torch.utils.data import Dataset from utils.customutils import createCenterRatioMask, performUndersampling __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" torch.manual_seed(2020) np.random.seed(2020) seed(2020) class SRDataset(Dataset): def __init__(self,logger, patch_size, dir_path, label_dir_path, stride_depth=16, stride_length=32, stride_width=32, Size=4000, fly_under_percent=None, patch_size_us=None, return_coords=False, pad_patch=True, pre_interpolate=None, norm_data=True, pre_load=False): self.patch_size = patch_size #-1 = full vol self.stride_depth = stride_depth self.stride_length = stride_length self.stride_width = stride_width self.size = Size self.logger = logger self.fly_under_percent = fly_under_percent #if None, then use already undersampled data. Gets priority over patch_size_us. They are both mutually exclusive self.return_coords = return_coords self.pad_patch = pad_patch self.pre_interpolate = pre_interpolate if patch_size == patch_size_us: patch_size_us = None if patch_size!=-1 and patch_size_us is not None: stride_length_us = stride_length // (patch_size//patch_size_us) stride_width_us = stride_width // (patch_size//patch_size_us) self.stride_length_us = stride_length_us self.stride_width_us = stride_width_us elif patch_size==-1: patch_size_us = None if self.fly_under_percent is not None: patch_size_us = None self.patch_size_us = patch_size_us #If already downsampled data is supplied, then this can be used. Calculate already based on the downsampling size. self.norm_data = norm_data self.pre_load = pre_load self.pre_loaded_lbl = {} self.pre_loaded_img = {} if not self.norm_data: print("No Norm") #TODO remove # Constants self.IMAGE_FILE_NAME = "imageFilename" self.IMAGE_FILE_SHAPE = "imageFileShape" self.IMAGE_FILE_MAXVAL = "imageFileMaxVal" self.LABEL_FILE_NAME = "labelFilename" self.LABEL_FILE_SHAPE = "labelFileShape" self.LABEL_FILE_MAXVAL = "labelFileMaxVal" self.STARTINDEX_DEPTH = "startIndex_depth" self.STARTINDEX_LENGTH = "startIndex_length" self.STARTINDEX_WIDTH = "startIndex_width" self.STARTINDEX_DEPTH_US = "startIndex_depth_us" self.STARTINDEX_LENGTH_US = "startIndex_length_us" self.STARTINDEX_WIDTH_US = "startIndex_width_us" self.trans = transforms.ToTensor() # used to convert tiffimagefile to tensor dataDict = { self.IMAGE_FILE_NAME: [], self.IMAGE_FILE_SHAPE: [], self.IMAGE_FILE_MAXVAL:[], self.LABEL_FILE_NAME: [], self.LABEL_FILE_SHAPE: [], self.LABEL_FILE_MAXVAL:[], self.STARTINDEX_DEPTH: [],self.STARTINDEX_LENGTH: [],self.STARTINDEX_WIDTH: [], self.STARTINDEX_DEPTH_US: [],self.STARTINDEX_LENGTH_US: [],self.STARTINDEX_WIDTH_US: []} column_names = [ self.IMAGE_FILE_NAME, self.IMAGE_FILE_SHAPE, self.IMAGE_FILE_MAXVAL, self.LABEL_FILE_NAME, self.LABEL_FILE_SHAPE, self.LABEL_FILE_MAXVAL, self.STARTINDEX_DEPTH, self.STARTINDEX_LENGTH,self.STARTINDEX_WIDTH, self.STARTINDEX_DEPTH_US, self.STARTINDEX_LENGTH_US,self.STARTINDEX_WIDTH_US] self.data = pd.DataFrame(columns=column_names) files_us = glob.glob(dir_path+'/**/*.nii', recursive = True) files_us += glob.glob(dir_path+'/**/*.nii.gz', recursive = True) for imageFileName in files_us: labelFileName = imageFileName.replace(dir_path[:-1], label_dir_path[:-1]) #[:-1] is needed to remove the trailing slash for shitty windows if imageFileName == labelFileName: sys.exit('Input and Output save file') if not(os.path.isfile(imageFileName) and os.path.isfile(labelFileName)): #trick to include the other file extension if labelFileName.endswith('.nii.nii.gz'): labelFileName = labelFileName.replace('.nii.nii.gz', '.nii.gz') elif labelFileName.endswith('.nii.gz'): labelFileName = labelFileName.replace('.nii.gz', '.nii') else: labelFileName = labelFileName.replace('.nii', '.nii.gz') #check again, after replacing the file extension if not(os.path.isfile(imageFileName) and os.path.isfile(labelFileName)): self.logger.debug("skipping file as label for the corresponding image doesn't exist :"+ str(imageFileName)) continue imageFile = nibabel.load(imageFileName) # shape (Length X Width X Depth X Channels) header_shape_us = imageFile.header.get_data_shape() imageFile_data = imageFile.get_data() imageFile_max = imageFile_data.max() labelFile = nibabel.load(labelFileName) # shape (Length X Width X Depth X Channels) - changed to label file name as input image can have different (lower) size header_shape = labelFile.header.get_data_shape() labelFile_data = labelFile.get_data() labelFile_max = labelFile_data.max() self.logger.debug(header_shape) n_depth,n_length,n_width = header_shape[2],header_shape[0],header_shape[1] # gives depth which is no. of slices n_depth_us,n_length_us,n_width_us = header_shape_us[2],header_shape_us[0],header_shape_us[1] # gives depth which is no. of slices if self.pre_load: self.pre_loaded_img[imageFileName] = imageFile_data self.pre_loaded_lbl[labelFileName] = labelFile_data if patch_size!=1 and (n_depth<patch_size or n_length<patch_size or n_width<patch_size): self.logger.debug("skipping file because of its size being less than the patch size :"+ str(imageFileName)) continue ############ Following the fully sampled size if patch_size != -1: depth_i =0 ranger_depth = int((n_depth-patch_size)/stride_depth)+1 for depth_index in range(ranger_depth if n_depth%patch_size==0 else ranger_depth+1): # iterate through the whole image voxel, and extract patch length_i = 0 # self.logger.debug("depth") # self.logger.debug(depth_i) ranger_length = int((n_length-patch_size)/stride_length)+1 for length_index in range(ranger_length if n_length%patch_size==0 else ranger_length+1): width_i = 0 # self.logger.debug("length") # self.logger.debug(length_i) ranger_width = int((n_width - patch_size)/stride_width)+1 for width_index in range(ranger_width if n_width%patch_size==0 else ranger_width+1): # self.logger.debug("width") # self.logger.debug(width_i) dataDict[self.IMAGE_FILE_NAME].append(imageFileName) dataDict[self.IMAGE_FILE_SHAPE].append(header_shape_us) dataDict[self.IMAGE_FILE_MAXVAL].append(imageFile_max) dataDict[self.LABEL_FILE_NAME].append(labelFileName) dataDict[self.LABEL_FILE_SHAPE].append(header_shape) dataDict[self.LABEL_FILE_MAXVAL].append(labelFile_max) dataDict[self.STARTINDEX_DEPTH].append(depth_i) dataDict[self.STARTINDEX_LENGTH].append(length_i) dataDict[self.STARTINDEX_WIDTH].append(width_i) if patch_size_us is None: #data is zero padded dataDict[self.STARTINDEX_DEPTH_US].append(depth_i) dataDict[self.STARTINDEX_LENGTH_US].append(length_i) dataDict[self.STARTINDEX_WIDTH_US].append(width_i) width_i += stride_width length_i += stride_length depth_i += stride_depth else: dataDict[self.IMAGE_FILE_NAME].append(imageFileName) dataDict[self.IMAGE_FILE_SHAPE].append(header_shape_us) dataDict[self.IMAGE_FILE_MAXVAL].append(imageFile_max) dataDict[self.LABEL_FILE_NAME].append(labelFileName) dataDict[self.LABEL_FILE_SHAPE].append(header_shape) dataDict[self.LABEL_FILE_MAXVAL].append(labelFile_max) dataDict[self.STARTINDEX_DEPTH].append(0) dataDict[self.STARTINDEX_LENGTH].append(0) dataDict[self.STARTINDEX_WIDTH].append(0) dataDict[self.STARTINDEX_DEPTH_US].append(0) dataDict[self.STARTINDEX_LENGTH_US].append(0) dataDict[self.STARTINDEX_WIDTH_US].append(0) ############ Following the undersampled size, only if patch_size_us has been provied if patch_size_us is not None: depth_i =0 ranger_depth = int((n_depth_us-patch_size_us)/stride_depth)+1 for depth_index in range(ranger_depth if n_depth_us%patch_size_us==0 else ranger_depth+1): # iterate through the whole image voxel, and extract patch length_i = 0 # self.logger.debug("depth") # self.logger.debug(depth_i) ranger_length = int((n_length_us-patch_size_us)/stride_length_us)+1 for length_index in range(ranger_length if n_length_us%patch_size_us==0 else ranger_length+1): width_i = 0 # self.logger.debug("length") # self.logger.debug(length_i) ranger_width = int((n_width_us - patch_size_us)/stride_width_us)+1 for width_index in range(ranger_width if n_width_us%patch_size_us==0 else ranger_width+1): # self.logger.debug("width") # self.logger.debug(width_i) dataDict[self.STARTINDEX_DEPTH_US].append(depth_i) dataDict[self.STARTINDEX_LENGTH_US].append(length_i) dataDict[self.STARTINDEX_WIDTH_US].append(width_i) width_i += stride_width_us length_i += stride_length_us depth_i += stride_depth self.data = pd.DataFrame.from_dict(dataDict) self.logger.debug(len(self.data)) if Size is not None and len(self.data) > Size: self.logger.debug('Dataset is larger tham supplied size. Choosing s subset randomly of size '+str(Size)) self.data = self.data.sample(n = Size, replace = False, random_state=2020) if patch_size!=-1 and fly_under_percent is not None: self.mask = createCenterRatioMask(np.zeros((patch_size,patch_size,patch_size)), fly_under_percent) def __len__(self): return len(self.data) def __getitem__(self, index): ''' imageFilename: 0 imageFileShape: 1 imageFileMaxVal: 2 labelFilename: 3 labelFileShape: 4 labelFileMaxVal: 5 startIndex_depth : 6 startIndex_length : 7 startIndex_width : 8 startIndex_depth_us : 9 startIndex_length_us : 10 startIndex_width_us : 11 ''' imageFile_max = self.data.iloc[index, 2] labelFile_max = self.data.iloc[index, 5] if self.pre_load: groundTruthImages = self.pre_loaded_lbl[self.data.iloc[index, 3]] groundTruthImages_handler = groundTruthImages else: groundTruthImages = nibabel.load(self.data.iloc[index, 3]) groundTruthImages_handler = groundTruthImages.dataobj startIndex_depth = self.data.iloc[index, 6] startIndex_length = self.data.iloc[index, 7] startIndex_width = self.data.iloc[index, 8] start_coords = [(startIndex_depth, startIndex_length, startIndex_width)] if self.patch_size_us is not None: startIndex_depth_us = self.data.iloc[index, 9] startIndex_length_us = self.data.iloc[index, 10] startIndex_width_us = self.data.iloc[index, 11] start_coords = start_coords + [(startIndex_depth_us, startIndex_length_us, startIndex_width_us)] if self.patch_size != -1: if len(groundTruthImages.shape) == 4: #don't know why, but an additional dim is noticed in some of the fully-sampled NIFTIs target_voxel = groundTruthImages_handler[startIndex_length:startIndex_length+self.patch_size, startIndex_width:startIndex_width+self.patch_size, 0, startIndex_depth:startIndex_depth+self.patch_size]#.squeeze() else: target_voxel = groundTruthImages_handler[startIndex_length:startIndex_length+self.patch_size, startIndex_width:startIndex_width+self.patch_size, startIndex_depth:startIndex_depth+self.patch_size]#.squeeze() else: if len(groundTruthImages.shape) == 4: #don't know why, but an additional dim is noticed in some of the fully-sampled NIFTIs target_voxel = groundTruthImages_handler[:, :, 0, :]#.squeeze() else: target_voxel = groundTruthImages_handler[...]#.squeeze() if self.fly_under_percent is not None: if self.patch_size != -1: voxel = abs(performUndersampling(np.array(target_voxel).copy(), mask=self.mask, zeropad=False)) voxel = voxel[...,::2] #2 for 25% - harcoded. TODO fix it else: mask = createCenterRatioMask(target_voxel, self.fly_under_percent) voxel = abs(performUndersampling(np.array(target_voxel).copy(), mask=mask, zeropad=False)) voxel = voxel[...,::2] #2 for 25% - harcoded. TODO fix it else: if self.pre_load: images = self.pre_loaded_img[self.data.iloc[index, 0]] images_handler = images else: images = nibabel.load(self.data.iloc[index, 0]) images_handler = images.dataobj images = nibabel.load(self.data.iloc[index, 0]) if self.patch_size_us is not None: voxel = images_handler[startIndex_length_us:startIndex_length_us+self.patch_size_us, startIndex_width_us:startIndex_width_us+self.patch_size_us, startIndex_depth_us:startIndex_depth_us+self.patch_size]#.squeeze() else: if self.patch_size != -1 and self.pre_interpolate is None: voxel = images_handler[startIndex_length:startIndex_length+self.patch_size, startIndex_width:startIndex_width+self.patch_size, startIndex_depth:startIndex_depth+self.patch_size]#.squeeze() else: voxel = images_handler[...] target_slices = np.moveaxis(np.array(target_voxel), -1, 0).astype( np.float32) # get slices in range, convert to array, change axis of depth (because nibabel gives LXWXD, but we need in DXLXW) slices = np.moveaxis(np.array(voxel),-1, 0).astype(np.float32) #get slices in range, convert to array, change axis of depth (because nibabel gives LXWXD, but we need in DXLXW) patch = torch.from_numpy(slices) # patch = patch/torch.max(patch)# normalisation if self.pre_interpolate: patch = F.interpolate(patch.unsqueeze(0).unsqueeze(0), size=tuple(np.roll(groundTruthImages.shape, 1)), mode=self.pre_interpolate, align_corners=False).squeeze() if self.patch_size != -1: patch = patch[startIndex_depth:startIndex_depth+self.patch_size, startIndex_length:startIndex_length+self.patch_size, startIndex_width:startIndex_width+self.patch_size] if self.norm_data: patch = patch/imageFile_max# normalisation targetPatch = torch.from_numpy(target_slices) # targetPatch = targetPatch/torch.max(targetPatch) if self.norm_data: targetPatch = targetPatch/labelFile_max #to deal the patches which has smaller size if self.pad_patch: pad = () for dim in range(len(targetPatch.shape)): pad_needed = self.patch_size - targetPatch.shape[dim] pad_dim = (pad_needed//2, pad_needed-(pad_needed//2)) pad += pad_dim if self.patch_size_us is None and self.fly_under_percent is None: pad_us = pad else: pad_us = () if self.patch_size_us is None and self.fly_under_percent is not None: real_patch_us = int(self.patch_size * (self.fly_under_percent*2)) #TODO: works for 25%, but not sure about others. Need to fix the logic else: real_patch_us = self.patch_size_us for dim in range(len(patch.shape)): pad_needed = real_patch_us - patch.shape[dim] pad_dim = (pad_needed//2, pad_needed-(pad_needed//2)) pad_us += pad_dim patch = F.pad(patch, pad_us[::-1]) #tuple has to be reveresed before using it for padding. As the tuple contains in DHW manner, and input is needed as WHD mannger targetPatch = F.pad(targetPatch, pad[::-1]) else: pad = None if self.return_coords is True: return patch, targetPatch, np.array(start_coords), os.path.basename(self.data.iloc[index, 3]), np.array([(self.data.iloc[index, 4]), (self.data.iloc[index, 1])]), np.array(pad[::-1]) if pad is not None else -1 else: return patch, targetPatch # DATASET_FOLDER = "/nfs1/schatter/Chimp/data_3D_sr/" # DATASET_FOLDER = r"S:\MEMoRIAL_SharedStorage_M1.2+4+7\Data\Skyra\unet_3D_sr" # US_Folder = 'Center25Mask' # patch_size=64 # import logging # logger = logging.getLogger('x') # traindataset = SRDataset(logger, patch_size, DATASET_FOLDER + '/usVal/' + US_Folder + '/', DATASET_FOLDER + '/hrVal/', stride_depth =64, # stride_length=64, stride_width=64,Size =10, patch_size_us=None, return_coords=True) # train_loader = torch.utils.data.DataLoader(traindataset, batch_size=8, shuffle=True) # for epoch in range(3): # for batch_index, (local_batch, local_labels) in enumerate(train_loader): # self.logger.debug(str(epoch) + " "+ str(batch_index))
19,535
53.266667
261
py
DDoS
DDoS-master/utils/customutils.py
import numpy as np import scipy.io as sio __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" def fft2c(x, shape=None, axes=(0,1), shiftAxes = (0,1), normalize=None): # originally was axes=(-2,-1), shiftAxes = None f = np.empty(x.shape,dtype=np.complex128) if(len(x.shape) == 4): for i in range(x.shape[-1]): for j in range(x.shape[-2]): f[:,:,j,i] = np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(x[:,:,j,i]), s=shape, norm=normalize)) elif(len(x.shape) == 3): for i in range(x.shape[-1]): f[:,:,i] = np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(x[:,:,i]), s=shape, norm=normalize)) else: f = np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(x), s=shape, norm=normalize)) return f def ifft2c(x, shape=None, axes=(0,1), shiftAxes = (0,1), normalize=None): # originally was axes=(-2,-1), shiftAxes = None f = np.empty(x.shape,dtype=np.complex128) if(len(x.shape) == 4): for i in range(x.shape[-1]): for j in range(x.shape[-2]): f[:,:,j,i] = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(x[:,:,j,i]), s=shape, norm=normalize)) elif(len(x.shape) == 3): for i in range(x.shape[-1]): f[:,:,i] = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(x[:,:,i]), s=shape, norm=normalize)) else: f = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(x), s=shape, norm=normalize)) return f def createCenterRatioMask(slice, percent, returnNumLinesRemoved=False): dim1 = slice.shape[0] dim2 = slice.shape[1] ratio = dim2/dim1 mask = np.ones(slice.shape) dim1_now = dim1 dim2_should = dim2 i = 0 currentPercent = 1 while currentPercent > percent: i += 1 mask[0:i, :] = 0 mask[slice.shape[0]-i:, :] = 0 dim1_now = dim1 - (i*2) dim2_should = round(dim1_now * ratio) dim2_removal = int((dim2-dim2_should)/2) mask[:, 0:dim2_removal] = 0 mask[:, slice.shape[1]-dim2_removal:] = 0 currentPercent = np.count_nonzero(mask)/mask.size if returnNumLinesRemoved: linesRemoved_dim1 = dim1 - dim1_now linesRemoved_dim2 = dim2 - dim2_should return mask, (linesRemoved_dim1, linesRemoved_dim2) else: return mask def performUndersampling(fullImgVol, mask=None, maskmatpath=None, zeropad=True): #Either send mask, or maskmatpath. #path will only be used in mask not supplied fullKSPVol = fft2c(fullImgVol) underKSPVol = performUndersamplingKSP(fullKSPVol, mask, maskmatpath,zeropad) underImgVol = ifft2c(underKSPVol) return underImgVol def performUndersamplingKSP(fullKSPVol, mask=None, maskmatpath=None, zeropad=True): #Either send mask, or maskmatpath. #path will only be used in mask not supplied if mask is None: mask = sio.loadmat(maskmatpath)['mask'] if zeropad: underKSPVol = np.multiply(fullKSPVol.transpose((2,0,1)), mask).transpose((1,2,0)) else: temp = [] for i in range(mask.shape[0]): maskline = mask[i,:] if maskline.any(): temp.append(fullKSPVol[i,...]) temp = np.array(temp) underKSPVol = [] for i in range(mask.shape[1]): maskline = mask[:,i] if maskline.any(): underKSPVol.append(temp[:,i,...]) underKSPVol = np.array(underKSPVol).swapaxes(0,1) return underKSPVol
3,761
36.247525
121
py
DDoS
DDoS-master/utils/__init__.py
0
0
0
py
DDoS
DDoS-master/utils/motion.py
import math import multiprocessing.dummy as multiprocessing import random from collections import defaultdict from typing import List import numpy as np import SimpleITK as sitk import torch import torchio as tio from scipy.ndimage import affine_transform from torchio.transforms import Motion, RandomMotion from torchio.transforms.interpolation import Interpolation # import multiprocessing __author__ = "Soumick Chatterjee, Alessandro Sciarra" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Alessandro Sciarra"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" class CustomMotion(Motion): def __init__(self, noise_dir=2, **kargs): super(CustomMotion, self).__init__(**kargs) self.noise_dir = noise_dir def add_artifact( self, image: sitk.Image, transforms: List[sitk.Euler3DTransform], times: np.ndarray, interpolation: Interpolation, ): images = self.resample_images(image, transforms, interpolation) arrays = [sitk.GetArrayViewFromImage(im) for im in images] arrays = [array.transpose() for array in arrays] # ITK to NumPy spectra = [self.fourier_transform(array) for array in arrays] self.sort_spectra(spectra, times) result_spectrum = np.empty_like(spectra[0]) noise_dir = self.noise_dir if noise_dir == -1: noise_dir = random.randint(0,1) last_index = result_spectrum.shape[noise_dir] #it can be 0, 1 or 2 indices = (last_index * times).astype(int).tolist() indices.append(last_index) ini = 0 for spectrum, fin in zip(spectra, indices): if noise_dir == 0: result_spectrum[..., ini:fin,:,:] = spectrum[..., ini:fin,:,:] #depending upon last_index value, move ini:fin left or right [at the end :,: for 0, : for 1, none for 2] elif noise_dir == 1: result_spectrum[..., ini:fin,:] = spectrum[..., ini:fin,:] else: #original result_spectrum[..., ini:fin] = spectrum[..., ini:fin] ini = fin result_image = np.real(self.inv_fourier_transform(result_spectrum)) return result_image.astype(np.float32) class CustomRandomMotion(RandomMotion): def __init__(self, noise_dir=2, **kwargs): super(CustomRandomMotion, self).__init__(**kwargs) self.noise_dir = noise_dir def apply_transform(self, subject): arguments = defaultdict(dict) for name, image in self.get_images_dict(subject).items(): params = self.get_params( self.degrees_range, self.translation_range, self.num_transforms, is_2d=image.is_2d(), ) times_params, degrees_params, translation_params = params arguments['times'][name] = times_params arguments['degrees'][name] = degrees_params arguments['translation'][name] = translation_params arguments['image_interpolation'][name] = self.image_interpolation transform = CustomMotion(noise_dir=self.noise_dir,**self.add_include_exclude(arguments)) transformed = transform(subject) return transformed class RealityMotion(): def __init__(self, n_threads = 4, mu = 0, sigma = 0.1, random_sigma=True): self.n_threads = n_threads self.mu = mu self.sigma = sigma self.sigma_limit = sigma self.random_sigma = random_sigma def __perform_singlePE(self, idx): rot_x = np.random.normal(self.mu, self.sigma, 1)*random.randint(-1,1) rot_y = np.random.normal(self.mu, self.sigma, 1)*random.randint(-1,1) rot_z = np.random.normal(self.mu, self.sigma, 1)*random.randint(-1,1) tran_x = int(np.random.normal(self.mu, self.sigma, 1)*random.randint(-1,1)) tran_y = int(np.random.normal(self.mu, self.sigma, 1)*random.randint(-1,1)) tran_z = int(np.random.normal(self.mu, self.sigma, 1)*random.randint(-1,1)) temp_vol = self.__rot_tran_3d(self.in_vol, rot_x, rot_y, rot_z, tran_x, tran_y, tran_z) temp_k = np.fft.fftn(temp_vol) for slc in range(self.in_vol.shape[2]): self.out_k[idx,:,slc]=temp_k[idx,:,slc] def __call__(self, vol): if self.random_sigma: self.sigma = random.uniform(0, self.sigma_limit) shape = vol.shape device = vol.device self.in_vol = vol.squeeze().cpu().numpy() self.in_vol = self.in_vol/self.in_vol.max() self.out_k = np.zeros((self.in_vol.shape)) + 0j if self.n_threads > 0: pool = multiprocessing.Pool(self.n_threads) pool.map(self.__perform_singlePE, range(self.in_vol.shape[0])) else: for idx in range(self.in_vol.shape[0]): self.__perform_singlePE(idx) vol = np.abs(np.fft.ifftn(self.out_k)) vol = torch.from_numpy(vol).view(shape).to(device) del self.in_vol, self.out_k return vol def __x_rotmat(self, theta): cos_t = math.cos(theta) sin_t = math.sin(theta) return np.array([[1, 0, 0], [0, cos_t, -sin_t], [0, sin_t, cos_t]]) def __y_rotmat(self, theta): cos_t = math.cos(theta) sin_t = math.sin(theta) return np.array([[cos_t, 0, sin_t], [0, 1, 0], [-sin_t, 0, cos_t]]) def __z_rotmat(self, theta): cos_t = math.cos(theta) sin_t = math.sin(theta) return np.array([[cos_t, -sin_t, 0], [sin_t, cos_t, 0], [0, 0, 1]]) def __rot_tran_3d(self, J, rot_x, rot_y, rot_z, tran_x, tran_y, tran_z): M = self.__x_rotmat(rot_x) * self.__y_rotmat(rot_y) * self.__z_rotmat(rot_z) translation = ([tran_x, tran_y, tran_z]) K = affine_transform(J, M, translation, order=1) return K/(K.max()+1e-16) class MotionCorrupter(): def __init__(self, mode=0, degrees=10, translation=10, num_transforms=2, image_interpolation='linear', norm_mode=0, noise_dir=2, mu=0, sigma=0.1, random_sigma=False, n_threads=4): self.mode = mode #0: TorchIO's version, 1: Custom direction specific motion self.degrees = degrees self.translation = translation self.num_transforms = num_transforms self.image_interpolation = image_interpolation self.norm_mode = norm_mode #0: No Norm, 1: Divide by Max, 2: MinMax self.noise_dir = noise_dir #0, 1 or 2 - which direction the motion is generated, only for custom random self.mu = mu #Only for Reality Motion self.sigma = sigma #Only for Reality Motion self.random_sigma = random_sigma #Only for Reality Motion - to randomise the sigma value, treating the provided sigma as upper limit and 0 as lower self.n_threads = n_threads #Only for Reality Motion - to apply motion for each thread encoding line parallel, max thread controlled by this. Set to 0 to perform serially. if mode==0: #TorchIO's version self.corrupter = tio.transforms.RandomMotion(degrees=degrees, translation=translation, num_transforms=num_transforms, image_interpolation=image_interpolation) elif mode==1: #Custom Motion self.corrupter = CustomRandomMotion(degrees=degrees, translation=translation, num_transforms=num_transforms, image_interpolation=image_interpolation, noise_dir=noise_dir) elif mode==2: #Reality motion. self.corrupter = RealityMotion(n_threads=n_threads, mu=mu, sigma=sigma, random_sigma=random_sigma) def perform(self, vol): vol = vol.float() transformed = self.corrupter(vol) if self.norm_mode==1: vol = vol/vol.max() transformed = transformed/transformed.max() elif self.norm_mode==2: vol = (vol-vol.min())/(vol.max()-vol.min()) transformed = (transformed-transformed.min())/(transformed.max()-transformed.min()) return torch.cat([vol,transformed], 0)
8,325
44.497268
183
py
DDoS
DDoS-master/utils/padding.py
#parital source: https://github.com/c22n/unet-pytorch from typing import Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Function, Variable from torch.nn.modules.utils import _ntuple __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Soumick Chatterjee & OvGU:ESF:MEMoRIAL" __credits__ = ["Soumick Chatterjee"] __license__ = "GPL" __version__ = "1.0.0" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" def flip(x: Variable, dim: int) -> Variable: """Flip torch Variable along given dimension axis.""" xsize = x.size() dim = x.dim() + dim if dim < 0 else dim x = x.contiguous().view(-1, *xsize[dim:]) x = x.view(x.size(0), x.size(1), -1)[:, getattr(torch.arange(x.size(1)-1, -1, -1), ('cpu', 'cuda')[x.is_cuda])().long(), :] return x.view(xsize) class ReflectionPad3d(nn.Module): """Wrapper for ReflectionPadNd function in 3 dimensions.""" def __init__(self, padding: Union[int, Tuple[int]]): super(ReflectionPad3d, self).__init__() self.padding = _ntuple(6)(padding) def forward(self, input: Variable) -> Variable: return ReflectionPadNd.apply(input, self.padding) def __repr__(self) -> str: return self.__class__.__name__ + '(' \ + str(self.padding) + ')' class ReflectionPadNd(Function): """Padding for same convolutional layer.""" # @staticmethod # def symbolic(g, input: Variable, padding: Union[int, Tuple[int]]): # paddings = prepare_onnx_paddings(len(input.type().sizes()), pad) # return g.op("Pad", input, pads_i=paddings, mode_s="reflect") @staticmethod def forward(ctx: Function, input: Variable, pad: Tuple[int]) -> Variable: ctx.pad = pad ctx.input_size = input.size() ctx.l_inp = len(input.size()) ctx.pad_tup = tuple([(a, b) for a, b in zip(pad[:-1:2], pad[1::2])] [::-1]) ctx.l_pad = len(ctx.pad_tup) ctx.l_diff = ctx.l_inp - ctx.l_pad assert ctx.l_inp >= ctx.l_pad new_dim = tuple([sum((d,) + ctx.pad_tup[i]) for i, d in enumerate(input.size()[-ctx.l_pad:])]) assert all([d > 0 for d in new_dim]), 'input is too small' # Create output tensor by concatenating with reflected chunks. output = input.new(input.size()[:(ctx.l_diff)] + new_dim).zero_() c_input = input for i, p in zip(range(ctx.l_inp)[-ctx.l_pad:], ctx.pad_tup): if p[0] > 0: chunk1 = flip(c_input.narrow(i, 0, pad[0]), i) c_input = torch.cat((chunk1, c_input), i) if p[1] > 0: chunk2 = flip(c_input.narrow(i, c_input.shape[i]-p[1], p[1]), i) c_input = torch.cat((c_input, chunk2), i) output.copy_(c_input) return output @staticmethod def backward(ctx: Function, grad_output: Variable) -> Variable: grad_input = Variable(grad_output.data.new(ctx.input_size).zero_()) grad_input_slices = [slice(0, x,) for x in ctx.input_size] cg_output = grad_output for i_s, p in zip(range(ctx.l_inp)[-ctx.l_pad:], ctx.pad_tup): if p[0] > 0: cg_output = cg_output.narrow(i_s, p[0], cg_output.size(i_s) - p[0]) if p[1] > 0: cg_output = cg_output.narrow(i_s, 0, cg_output.size(i_s) - p[1]) gis = tuple(grad_input_slices) grad_input[gis] = cg_output return grad_input, None, None
3,703
36.04
80
py
DDoS
DDoS-master/utils/pLoss/Resnet2D.py
#!/usr/bin/env python """ Original file Resnet2Dv2b14 of NCC1701 """ import torch import torch.nn as nn import torch.nn.functional as F #from utils.TorchAct.pelu import PELU_oneparam as PELU __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2018, Soumick Chatterjee & OvGU:ESF:MEMoRIAL" __credits__ = ["Soumick Chatterjee"] __license__ = "GPL" __version__ = "1.0.0" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" class ResidualBlock(nn.Module): def __init__(self, in_features, relu, norm): super(ResidualBlock, self).__init__() conv_block = [ nn.ReflectionPad2d(1), nn.Conv2d(in_features, in_features, 3), norm(in_features), relu(), nn.Dropout2d(p=0.2), nn.ReflectionPad2d(1), nn.Conv2d(in_features, in_features, 3), norm(in_features) ] self.conv_block = nn.Sequential(*conv_block) def forward(self, x): return x + self.conv_block(x) class ResNet(nn.Module): def __init__(self, in_channels=1, out_channels=1, res_blocks=14, starting_n_features=64, updown_blocks=2, is_relu_leaky=True, final_out_sigmoid=True, do_batchnorm=True): #should use 14 as that gives number of trainable parameters close to number of possible pixel values in a image 256x256 super(ResNet, self).__init__() if is_relu_leaky: relu = nn.PReLU else: relu = nn.ReLU if do_batchnorm: norm = nn.BatchNorm2d else: norm = nn.InstanceNorm2d # Initial convolution block model = [ nn.ReflectionPad2d(3), nn.Conv2d(in_channels, starting_n_features, 7), norm(starting_n_features), relu() ] # Downsampling in_features = starting_n_features out_features = in_features*2 for _ in range(updown_blocks): model += [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1), norm(out_features), relu() ] in_features = out_features out_features = in_features*2 # Residual blocks for _ in range(res_blocks): model += [ResidualBlock(in_features, relu, norm)] # Upsampling out_features = in_features//2 for _ in range(updown_blocks): model += [ nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1), norm(out_features), relu() ] in_features = out_features out_features = in_features//2 # Output layer model += [ nn.ReflectionPad2d(3), nn.Conv2d(starting_n_features, out_channels, 7)] #final activation if final_out_sigmoid: model += [ nn.Sigmoid(), ] else: model += [ relu(), ] self.model = nn.Sequential(*model) def forward(self, input): return self.model(input)
3,150
31.484536
294
py
DDoS
DDoS-master/utils/pLoss/__init__.py
0
0
0
py
DDoS
DDoS-master/utils/pLoss/VesselSeg_UNet3d_DeepSup.py
# -*- coding: utf-8 -*- """ """ # from __future__ import print_function, division import torch import torch.nn as nn import torch.utils.data #from Utils.wta import KWinnersTakeAll __author__ = "Kartik Prabhu, Mahantesh Pattadkal, and Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Kartik Prabhu", "Mahantesh Pattadkal", "Soumick Chatterjee"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" class conv_block(nn.Module): """ Convolution Block """ def __init__(self, in_channels, out_channels, k_size=3, stride=1, padding=1, bias=True): super(conv_block, self).__init__() self.conv = nn.Sequential( nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=padding, bias=bias), nn.BatchNorm3d(num_features=out_channels), nn.ReLU(inplace=True), nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=padding, bias=bias), nn.BatchNorm3d(num_features=out_channels), nn.ReLU(inplace=True) ) def forward(self, x): x = self.conv(x) return x class conv_block_v2(nn.Module): """ Convolution Block With WTA """ def __init__(self, in_channels, out_channels, k_size=3, stride=1, padding=1, bias=True): super(conv_block_v2, self).__init__() self.conv = nn.Sequential( nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=padding, bias=bias), nn.BatchNorm3d(num_features=out_channels), nn.ReLU(inplace=True), nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=padding, bias=bias), nn.BatchNorm3d(num_features=out_channels), nn.ReLU(inplace=True), #KWinnersTakeAll(0.02) ) def forward(self, x): x = self.conv(x) return x class up_conv(nn.Module): """ Up Convolution Block """ # def __init__(self, in_ch, out_ch): def __init__(self, in_channels, out_channels, k_size=3, stride=1, padding=1, bias=True): super(up_conv, self).__init__() self.up = nn.Sequential( nn.Upsample(scale_factor=2), nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=padding, bias=bias), nn.BatchNorm3d(num_features=out_channels), nn.ReLU(inplace=True)) def forward(self, x): x = self.up(x) return x class U_Net_DeepSup(nn.Module): """ UNet - Basic Implementation Input _ [batch * channel(# of channels of each image) * depth(# of frames) * height * width]. Paper : https://arxiv.org/abs/1505.04597 """ def __init__(self, in_ch=1, out_ch=1): super(U_Net_DeepSup, self).__init__() n1 = 64 #TODO: original paper starts with 64 filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16] # 64,128,256,512,1024 self.Maxpool1 = nn.MaxPool3d(kernel_size=2, stride=2) self.Maxpool2 = nn.MaxPool3d(kernel_size=2, stride=2) self.Maxpool3 = nn.MaxPool3d(kernel_size=2, stride=2) self.Maxpool4 = nn.MaxPool3d(kernel_size=2, stride=2) self.Conv1 = conv_block(in_ch, filters[0]) self.Conv2 = conv_block(filters[0], filters[1]) self.Conv3 = conv_block(filters[1], filters[2]) self.Conv4 = conv_block(filters[2], filters[3]) self.Conv5 = conv_block(filters[3], filters[4]) #1x1x1 Convolution for Deep Supervision self.Conv_d3 = conv_block(filters[1], 1) self.Conv_d4 = conv_block(filters[2], 1) self.Up5 = up_conv(filters[4], filters[3]) self.Up_conv5 = conv_block(filters[4], filters[3]) self.Up4 = up_conv(filters[3], filters[2]) self.Up_conv4 = conv_block(filters[3], filters[2]) self.Up3 = up_conv(filters[2], filters[1]) self.Up_conv3 = conv_block(filters[2], filters[1]) self.Up2 = up_conv(filters[1], filters[0]) self.Up_conv2 = conv_block(filters[1], filters[0]) self.Conv = nn.Conv3d(filters[0], out_ch, kernel_size=1, stride=1, padding=0) # self.active = torch.nn.Sigmoid() def forward(self, x): # print("unet") # print(x.shape) # print(padded.shape) e1 = self.Conv1(x) # print("conv1:") # print(e1.shape) e2 = self.Maxpool1(e1) e2 = self.Conv2(e2) # print("conv2:") # print(e2.shape) e3 = self.Maxpool2(e2) e3 = self.Conv3(e3) # print("conv3:") # print(e3.shape) e4 = self.Maxpool3(e3) e4 = self.Conv4(e4) # print("conv4:") # print(e4.shape) e5 = self.Maxpool4(e4) e5 = self.Conv5(e5) # print("conv5:") # print(e5.shape) d5 = self.Up5(e5) # print("d5:") # print(d5.shape) # print("e4:") # print(e4.shape) d5 = torch.cat((e4, d5), dim=1) d5 = self.Up_conv5(d5) # print("upconv5:") # print(d5.size) d4 = self.Up4(d5) # print("d4:") # print(d4.shape) d4 = torch.cat((e3, d4), dim=1) d4 = self.Up_conv4(d4) d4_out = self.Conv_d4(d4) # print("upconv4:") # print(d4.shape) d3 = self.Up3(d4) d3 = torch.cat((e2, d3), dim=1) d3 = self.Up_conv3(d3) d3_out = self.Conv_d3(d3) # print("upconv3:") # print(d3.shape) d2 = self.Up2(d3) d2 = torch.cat((e1, d2), dim=1) d2 = self.Up_conv2(d2) # print("upconv2:") # print(d2.shape) out = self.Conv(d2) # print("out:") # print(out.shape) # d1 = self.active(out) return out, d3_out , d4_out class U_Net_DeepSup_level4(nn.Module): """ UNet - Basic Implementation Input _ [batch * channel(# of channels of each image) * depth(# of frames) * height * width]. Paper : https://arxiv.org/abs/1505.04597 """ def __init__(self, in_ch=1, out_ch=1): super(U_Net_DeepSup_level4, self).__init__() n1 = 64 #TODO: original paper starts with 64 filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16] # 64,128,256,512,1024 self.Maxpool1 = nn.MaxPool3d(kernel_size=2, stride=2) self.Maxpool2 = nn.MaxPool3d(kernel_size=2, stride=2) self.Maxpool3 = nn.MaxPool3d(kernel_size=2, stride=2) self.Maxpool4 = nn.MaxPool3d(kernel_size=2, stride=2) self.Conv1 = conv_block(in_ch, filters[0]) self.Conv2 = conv_block(filters[0], filters[1]) self.Conv3 = conv_block(filters[1], filters[2]) self.Conv4 = conv_block(filters[2], filters[3]) self.Conv5 = conv_block(filters[3], filters[4]) #1x1x1 Convolution for Deep Supervision self.Conv_d3 = conv_block(filters[1], 1) self.Conv_d4 = conv_block(filters[2], 1) self.Conv_d5 = conv_block(filters[3], 1) self.Up5 = up_conv(filters[4], filters[3]) self.Up_conv5 = conv_block(filters[4], filters[3]) self.Up4 = up_conv(filters[3], filters[2]) self.Up_conv4 = conv_block(filters[3], filters[2]) self.Up3 = up_conv(filters[2], filters[1]) self.Up_conv3 = conv_block(filters[2], filters[1]) self.Up2 = up_conv(filters[1], filters[0]) self.Up_conv2 = conv_block(filters[1], filters[0]) self.Conv = nn.Conv3d(filters[0], out_ch, kernel_size=1, stride=1, padding=0) # self.active = torch.nn.Sigmoid() def forward(self, x): # print("unet") # print(x.shape) # print(padded.shape) e1 = self.Conv1(x) # print("conv1:") # print(e1.shape) e2 = self.Maxpool1(e1) e2 = self.Conv2(e2) # print("conv2:") # print(e2.shape) e3 = self.Maxpool2(e2) e3 = self.Conv3(e3) # print("conv3:") # print(e3.shape) e4 = self.Maxpool3(e3) e4 = self.Conv4(e4) # print("conv4:") # print(e4.shape) e5 = self.Maxpool4(e4) e5 = self.Conv5(e5) # print("conv5:") # print(e5.shape) d5 = self.Up5(e5) # print("d5:") # print(d5.shape) # print("e4:") # print(e4.shape) d5 = torch.cat((e4, d5), dim=1) d5 = self.Up_conv5(d5) # print("upconv5:") # print(d5.size) d5_out = self.Conv_d5(d5) d4 = self.Up4(d5) # print("d4:") # print(d4.shape) d4 = torch.cat((e3, d4), dim=1) d4 = self.Up_conv4(d4) d4_out = self.Conv_d4(d4) # print("upconv4:") # print(d4.shape) d3 = self.Up3(d4) d3 = torch.cat((e2, d3), dim=1) d3 = self.Up_conv3(d3) d3_out = self.Conv_d3(d3) # print("upconv3:") # print(d3.shape) d2 = self.Up2(d3) d2 = torch.cat((e1, d2), dim=1) d2 = self.Up_conv2(d2) # print("upconv2:") # print(d2.shape) out = self.Conv(d2) # print("out:") # print(out.shape) # d1 = self.active(out) return [out, d3_out , d4_out , d5_out] class U_Net_DeepSup_level4_wta(nn.Module): """ UNet - Basic Implementation Input _ [batch * channel(# of channels of each image) * depth(# of frames) * height * width]. Paper : https://arxiv.org/abs/1505.04597 """ def __init__(self, in_ch=1, out_ch=1): super(U_Net_DeepSup_level4_wta, self).__init__() n1 = 64 # TODO: original paper starts with 64 filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16] # 64,128,256,512,1024 self.Maxpool1 = nn.MaxPool3d(kernel_size=2, stride=2) self.Maxpool2 = nn.MaxPool3d(kernel_size=2, stride=2) self.Maxpool3 = nn.MaxPool3d(kernel_size=2, stride=2) self.Maxpool4 = nn.MaxPool3d(kernel_size=2, stride=2) self.Conv1 = conv_block_v2(in_ch, filters[0]) self.Conv2 = conv_block_v2(filters[0], filters[1]) self.Conv3 = conv_block_v2(filters[1], filters[2]) self.Conv4 = conv_block_v2(filters[2], filters[3]) self.Conv5 = conv_block_v2(filters[3], filters[4]) # 1x1x1 Convolution for Deep Supervision self.Conv_d3 = conv_block_v2(filters[1], 1) self.Conv_d4 = conv_block_v2(filters[2], 1) self.Conv_d5 = conv_block_v2(filters[3], 1) self.Up5 = up_conv(filters[4], filters[3]) self.Up_conv5 = conv_block(filters[4], filters[3]) self.Up4 = up_conv(filters[3], filters[2]) self.Up_conv4 = conv_block(filters[3], filters[2]) self.Up3 = up_conv(filters[2], filters[1]) self.Up_conv3 = conv_block(filters[2], filters[1]) self.Up2 = up_conv(filters[1], filters[0]) self.Up_conv2 = conv_block(filters[1], filters[0]) self.Conv = nn.Conv3d(filters[0], out_ch, kernel_size=1, stride=1, padding=0) # self.active = torch.nn.Sigmoid() def forward(self, x): # print("unet") # print(x.shape) # print(padded.shape) e1 = self.Conv1(x) # print("conv1:") # print(e1.shape) e2 = self.Maxpool1(e1) e2 = self.Conv2(e2) # print("conv2:") # print(e2.shape) e3 = self.Maxpool2(e2) e3 = self.Conv3(e3) # print("conv3:") # print(e3.shape) e4 = self.Maxpool3(e3) e4 = self.Conv4(e4) # print("conv4:") # print(e4.shape) e5 = self.Maxpool4(e4) e5 = self.Conv5(e5) # print("conv5:") # print(e5.shape) d5 = self.Up5(e5) # print("d5:") # print(d5.shape) # print("e4:") # print(e4.shape) d5 = torch.cat((e4, d5), dim=1) d5 = self.Up_conv5(d5) # print("upconv5:") # print(d5.size) d5_out = self.Conv_d5(d5) d4 = self.Up4(d5) # print("d4:") # print(d4.shape) d4 = torch.cat((e3, d4), dim=1) d4 = self.Up_conv4(d4) d4_out = self.Conv_d4(d4) # print("upconv4:") # print(d4.shape) d3 = self.Up3(d4) d3 = torch.cat((e2, d3), dim=1) d3 = self.Up_conv3(d3) d3_out = self.Conv_d3(d3) # print("upconv3:") # print(d3.shape) d2 = self.Up2(d3) d2 = torch.cat((e1, d2), dim=1) d2 = self.Up_conv2(d2) # print("upconv2:") # print(d2.shape) out = self.Conv(d2) # print("out:") # print(out.shape) # d1 = self.active(out) return [out, d3_out, d4_out, d5_out]
13,151
29.09611
110
py
DDoS
DDoS-master/utils/pLoss/perceptual_loss.py
import math import torch import torch.nn as nn import torchvision # from utils.utils import * # from pytorch_msssim import SSIM from .Resnet2D import ResNet from .simpleunet import UNet from .VesselSeg_UNet3d_DeepSup import U_Net_DeepSup __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" class PerceptualLoss(torch.nn.Module): #currently configured for 1 channel only, with datarange as 1 for SSIM def __init__(self, device="cuda:0", loss_model="densenet161", n_level=math.inf, resize=None, loss_type="L1", mean=[], std=[]): super(PerceptualLoss, self).__init__() blocks = [] if loss_model == "resnet2D": #TODO: not finished model = ResNet(in_channels=1, out_channels=1).to(device) chk = torch.load(r"./utils/pLoss/ResNet14_IXIT2_Base_d1p75_t0_n10_dir01_5depth_L1Loss_best.pth.tar", map_location=device) model.load_state_dict(chk['state_dict']) elif loss_model == "unet2D": model = UNet(in_channels=1, out_channels=1, depth=5, wf=6, padding=True, batch_norm=False, up_mode='upsample', droprate=0.0, is3D=False, returnBlocks=False, downPath=True, upPath=True).to(device) chk = torch.load(r"./utils/pLoss/SimpleU_IXIT2_Base_d1p75_t0_n10_dir01_5depth_L1Loss_best.pth.tar", map_location=device) model.load_state_dict(chk['state_dict']) blocks.append(model.down_path[0].block.eval()) if n_level >= 2: blocks.append( nn.Sequential( nn.AvgPool2d(2), model.down_path[1].block.eval() ) ) if n_level >= 3: blocks.append( nn.Sequential( nn.AvgPool2d(2), model.down_path[2].block.eval() ) ) if n_level >= 4: blocks.append( nn.Sequential( nn.AvgPool2d(2), model.down_path[3].block.eval() ) ) elif loss_model == "unet3Dds": model = U_Net_DeepSup().to(device) chk = torch.load(r"./utils/pLoss/VesselSeg_UNet3d_DeepSup.pth", map_location=device) model.load_state_dict(chk['state_dict']) blocks.append(model.Conv1.conv.eval()) if n_level >= 2: blocks.append( nn.Sequential( model.Maxpool1.eval(), model.Conv2.conv.eval() ) ) if n_level >= 3: blocks.append( nn.Sequential( model.Maxpool2.eval(), model.Conv3.conv.eval() ) ) if n_level >= 4: blocks.append( nn.Sequential( model.Maxpool3.eval(), model.Conv4.conv.eval() ) ) if n_level >= 5: blocks.append( nn.Sequential( model.Maxpool4.eval(), model.Conv5.conv.eval() ) ) elif loss_model == "resnext1012D": model = torchvision.models.resnext101_32x8d() model.conv1 = nn.Conv2d(1, model.conv1.out_channels, kernel_size=model.conv1.kernel_size, stride=model.conv1.stride, padding=model.conv1.padding, bias=False if model.conv1.bias is None else True) model.fc = nn.Linear(in_features=model.fc.in_features, out_features=33, bias=False if model.fc.bias is None else True) model.to(device) # chk = torch.load(r"./utils/pLoss/ResNet14_IXIT2_Base_d1p75_t0_n10_dir01_5depth_L1Loss_best.pth.tar", map_location=device) # model.load_state_dict(chk['state_dict']) blocks.append( nn.Sequential( model.conv1.eval(), model.bn1.eval(), model.relu.eval(), ) ) if n_level >= 2: blocks.append( nn.Sequential( model.maxpool.eval(), model.layer1.eval() ) ) if n_level >= 3: blocks.append(model.layer2.eval()) if n_level >= 4: blocks.append(model.layer3.eval()) if n_level >= 5: blocks.append(model.layer4.eval()) elif loss_model == "densenet161": model = torchvision.models.densenet161() model.features.conv0 = nn.Conv2d(1, model.features.conv0.out_channels, kernel_size=model.features.conv0.kernel_size, stride=model.features.conv0.stride, padding=model.features.conv0.padding, bias=False if model.features.conv0.bias is None else True) model.classifier = nn.Linear(in_features=model.classifier.in_features, out_features=33, bias=False if model.classifier.bias is None else True) model.to(device) # chk = torch.load(r"./utils/pLoss/ResNet14_IXIT2_Base_d1p75_t0_n10_dir01_5depth_L1Loss_best.pth.tar", map_location=device) # model.load_state_dict(chk['state_dict']) model = model.features blocks.append( nn.Sequential( model.conv0.eval(), model.norm0.eval(), model.relu0.eval(), ) ) if n_level >= 2: blocks.append( nn.Sequential( model.pool0.eval(), model.denseblock1.eval() ) ) if n_level >= 3: blocks.append(model.denseblock2.eval()) if n_level >= 4: blocks.append(model.denseblock3.eval()) if n_level >= 5: blocks.append(model.denseblock4.eval()) for bl in blocks: for params in bl.parameters(): params.requires_grad = False self.blocks = nn.ModuleList(blocks) self.transform = nn.functional.interpolate if (mean is not None and len(mean) > 1) and (std is not None and len(std) > 1) and (len(mean) == len(std)): self.mean = nn.Parameter(torch.tensor(mean).view(1,len(mean),1,1)) self.std = nn.Parameter(torch.tensor(std).view(1,len(std),1,1)) else: self.mean = None self.std = None self.resize = resize if loss_type == "L1": self.loss_func = torch.nn.functional.l1_loss elif loss_type == "MultiSSIM": self.loss_func = MultiSSIM(reduction='mean').to(device) elif loss_type == "SSIM3D": self.loss_func = SSIM(data_range=1, size_average=True, channel=1, spatial_dims=3).to(device) elif loss_type == "SSIM2D": self.loss_func = SSIM(data_range=1, size_average=True, channel=1, spatial_dims=2).to(device) def forward(self, input, target): if self.mean is not None: input = (input-self.mean) / self.std target = (target-self.mean) / self.std if self.resize: input = self.transform(input, mode='trilinear' if len(input.shape) == 5 else 'bilinear', size=self.resize, align_corners=False) target = self.transform(target, mode='trilinear' if len(input.shape) == 5 else 'bilinear', size=self.resize, align_corners=False) loss = 0.0 x = input y = target for block in self.blocks: x = block(x) y = block(y) loss += self.loss_func(x, y) return loss if __name__ == '__main__': x = PerceptualLoss(resize=None).cuda() a = torch.rand(2,1,24,24).cuda() b = torch.rand(2,1,24,24).cuda() l = x(a,b) sdsd
8,538
42.345178
154
py
DDoS
DDoS-master/utils/pLoss/simpleunet.py
import torch import torch.nn.functional as F from torch import nn __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" class UNetConvBlock(nn.Module): def __init__(self, in_size, out_size, padding, batch_norm): super(UNetConvBlock, self).__init__() block = [] block.append(layer_conv(in_size, out_size, kernel_size=3, padding=int(padding))) block.append(nn.ReLU()) if batch_norm: block.append(layer_batchnorm(out_size)) block.append(layer_conv(out_size, out_size, kernel_size=3, padding=int(padding))) block.append(nn.ReLU()) if batch_norm: block.append(layer_batchnorm(out_size)) self.block = nn.Sequential(*block) def forward(self, x): out = self.block(x) return out class UNetUpBlock(nn.Module): def __init__(self, in_size, out_size, up_mode, padding, batch_norm): super(UNetUpBlock, self).__init__() if up_mode == 'upconv': self.up = layer_convtrans(in_size, out_size, kernel_size=2, stride=2) elif up_mode == 'upsample': self.up = nn.Sequential(nn.Upsample(mode=interp_mode, scale_factor=2), layer_conv(in_size, out_size, kernel_size=1)) self.conv_block = UNetConvBlock(in_size, out_size, padding, batch_norm) def center_crop(self, layer, target_size): _, _, layer_depth, layer_height, layer_width = layer.size() diff_z = (layer_depth - target_size[0]) // 2 diff_y = (layer_height - target_size[1]) // 2 diff_x = (layer_width - target_size[2]) // 2 return layer[:, :, diff_z:(diff_z + target_size[0]), diff_y:(diff_y + target_size[1]), diff_x:(diff_x + target_size[2])] # _, _, layer_height, layer_width = layer.size() #for 2D data # diff_y = (layer_height - target_size[0]) // 2 # diff_x = (layer_width - target_size[1]) // 2 # return layer[:, :, diff_y:(diff_y + target_size[0]), diff_x:(diff_x + target_size[1])] def forward(self, x, bridge): up = self.up(x) # bridge = self.center_crop(bridge, up.shape[2:]) #sending shape ignoring 2 digit, so target size start with 0,1,2 up = F.interpolate(up, size=bridge.shape[2:], mode=interp_mode) out = torch.cat([up, bridge], 1) out = self.conv_block(out) return out class UNet(nn.Module): """ Implementation of U-Net: Convolutional Networks for Biomedical Image Segmentation (Ronneberger et al., 2015) https://arxiv.org/abs/1505.04597 Using the default arguments will yield the exact version used in the original paper Args: in_channels (int): number of input channels out_channels (int): number of output channels depth (int): depth of the network wf (int): number of filters in the first layer is 2**wf padding (bool): if True, apply padding such that the input shape is the same as the output. This may introduce artifacts batch_norm (bool): Use BatchNorm after layers with an activation function up_mode (str): one of 'upconv' or 'upsample'. 'upconv' will use transposed convolutions for learned upsampling. 'upsample' will use bilinear upsampling. droprate (float): Rate of dropout. If undesired, then 0.0 is3D (bool): If a 3D or 2D version of U-net returnBlocks (bool) : If True, it will return the blocks created during downPath. If downPath is False, then it will be ignored downPath and upPath (bool): If only the downpath or uppath of the U-Net is needed, make the other one False Forward call: x (Tensor): Input Tensor blocks (list of Tensors): If only upPath is set to True, then this will be used during the forward of the uppath. If not desired, then supply blank list """ def __init__(self, in_channels=1, out_channels=1, depth=3, wf=6, padding=True, batch_norm=False, up_mode='upconv', droprate=0.0, is3D=False, returnBlocks=False, downPath=True, upPath=True): super(UNet, self).__init__() layers = {} if is3D: layers["layer_conv"] = nn.Conv3d layers["layer_convtrans"] = nn.ConvTranspose3d layers["layer_batchnorm"] = nn.BatchNorm3d layers["layer_drop"] = nn.Dropout3d layers["func_avgpool"] = F.avg_pool3d layers["interp_mode"] = 'trilinear' else: layers["layer_conv"] = nn.Conv2d layers["layer_convtrans"] = nn.ConvTranspose2d layers["layer_batchnorm"] = nn.BatchNorm2d layers["layer_drop"] = nn.Dropout2d layers["func_avgpool"] = F.avg_pool2d layers["interp_mode"] = 'bilinear' globals().update(layers) self.returnBlocks = returnBlocks self.do_down = downPath self.do_up = upPath self.padding = padding self.depth = depth self.dropout = layer_drop(p=droprate) prev_channels = in_channels self.down_path = nn.ModuleList() for i in range(depth): if self.do_down: self.down_path.append(UNetConvBlock(prev_channels, 2**(wf+i), padding, batch_norm)) prev_channels = 2**(wf+i) self.latent_channels = prev_channels self.up_path = nn.ModuleList() for i in reversed(range(depth - 1)): if self.do_up: self.up_path.append(UNetUpBlock(prev_channels, 2**(wf+i), up_mode, padding, batch_norm)) prev_channels = 2**(wf+i) if self.do_up: self.last = layer_conv(prev_channels, out_channels, kernel_size=1) def forward(self, x, blocks=()): if self.do_down: for i, down in enumerate(self.down_path): x = down(x) if i != len(self.down_path)-1: blocks += (x,) x = func_avgpool(x, 2) x = self.dropout(x) if self.do_up: for i, up in enumerate(self.up_path): x = up(x, blocks[-i-1]) x = self.last(x) if self.returnBlocks and self.do_down: return x, blocks else: return x if __name__ == '__main__': print('#### Test Case ###') from torch.autograd import Variable x = Variable(torch.rand(2,1,64,64)).cuda() model = UNet().cuda() y = model(x) print(y.shape)
7,121
39.237288
160
py
hrv-analysis
hrv-analysis-master/setup.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This script provides setup requirements to install hrvanalysis via pip""" import setuptools # Get long description in READ.md file with open("README.md", "r") as fh: LONG_DESCRIPTION = fh.read() setuptools.setup( name="hrv-analysis", version="1.0.4", author="Robin Champseix", license="GPLv3", author_email="robin.champseix@gmail.com", description="a package to calculate features from Rr Interval for HRV analyses", long_description=LONG_DESCRIPTION, long_description_content_type="text/markdown", include_package_data=True, url="https://github.com/Aura-healthcare/hrv-analysis", packages=setuptools.find_packages(), python_requires='>=3.5', install_requires=[ "numpy>=1.15.1", "astropy>=3.0.4", "nolds>=0.4.1", "scipy>=1.1.0", "pandas>=0.23.4", "matplotlib>=2.2.2" ], classifiers=[ "Programming Language :: Python :: 3", "Operating System :: OS Independent" ] )
1,049
27.378378
84
py
hrv-analysis
hrv-analysis-master/sphinx-docs/source/conf.py
# -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys from recommonmark.parser import CommonMarkParser sys.path.insert(0, os.path.abspath('../..')) # -- Project information ----------------------------------------------------- project = 'hrvanalysis' copyright = '2018, Robin Champseix' author = 'Robin Champseix' # The short X.Y version version = '1.0' # The full version, including alpha/beta/rc tags release = '1.0.0' # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' autosummary_generate = True # Make _autosummary files and include them # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.autosummary', 'sphinx.ext.napoleon' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_parsers = { '.md': CommonMarkParser, } source_suffix = ['.rst', '.md'] #source_suffix = '.rst' # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = None # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # #html_theme = 'alabaster' html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'hrvanalysisdoc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'hrvanalysis.tex', 'hrvanalysis Documentation', 'Robin Champseix', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'hrvanalysis', 'hrvanalysis Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'hrvanalysis', 'hrvanalysis Documentation', author, 'hrvanalysis', 'One line description of project.', 'Miscellaneous'), ] # -- Options for Epub output ------------------------------------------------- # Bibliographic Dublin Core info. epub_title = project # The unique identifier of the text. This can be a ISBN number # or the project homepage. # # epub_identifier = '' # A unique identification for the text. # # epub_uid = '' # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] # -- Extension configuration ------------------------------------------------- extensions = ['sphinx.ext.napoleon']
5,578
28.363158
79
py
hrv-analysis
hrv-analysis-master/tests/tests_plot_methods.py
#!/usr/bin/env python """This script provides methods to test extract_features methods.""" import os import unittest from hrvanalysis.plot import (plot_timeseries, plot_distrib, plot_poincare, plot_psd) TEST_DATA_FILENAME = os.path.join(os.path.dirname(__file__), 'test_nn_intervals.txt') def load_test_data(path): # Load test rr_intervals data with open(path, "r") as text_file: lines = text_file.readlines() nn_intervals = list(map(lambda x: int(x.strip()), lines)) return nn_intervals class ExtractFeaturesTestCase(unittest.TestCase): """Class for UniTests of different methods in extract_features module""" # def test_if_plot_timseries_does_not_send_an_error(self): # nn_intervals = get_rr_interval_list_from_txt_file(TEST_DATA_FILENAME) # plot_timeseries(nn_intervals, normalize=True) # # def test_if_plot_distrib_does_not_send_an_error(self): # nn_intervals = get_rr_interval_list_from_txt_file(TEST_DATA_FILENAME) # plot_distrib(nn_intervals) # # def test_if_plot_poincare_does_not_send_an_error(self): # nn_intervals = get_rr_interval_list_from_txt_file(TEST_DATA_FILENAME) # plot_poincare(nn_intervals) # # def test_if_plot_psd_does_not_send_an_error(self): # nn_intervals = get_rr_interval_list_from_txt_file(TEST_DATA_FILENAME) # plot_psd(nn_intervals, method="lomb") if __name__ == '__main__': unittest.main()
1,456
33.690476
85
py
hrv-analysis
hrv-analysis-master/tests/tests_preprocessing_methods.py
#!/usr/bin/env python """This script provides methods to test clean_outliers methods.""" import unittest import numpy as np from hrvanalysis.preprocessing import (remove_outliers, interpolate_nan_values, remove_ectopic_beats, get_nn_intervals) class CleanOutliersTestCase(unittest.TestCase): """Class for UniTests of different methods in clean_outliers module""" def test_high_low_outlier(self): rri_list = [700, 600, 2300, 200, 1000, 230, 1200] self.assertAlmostEqual(remove_outliers(rr_intervals=rri_list), [700, 600, np.nan, np.nan, 1000, np.nan, 1200]) def test_interpolate_cleaned_outlier(self): # TODO # rri_list = [10, 11, np.nan, 13, 15, np.nan, 16, 17, np.nan, np.nan, np.nan, 20, np.nan, 22] # interpolated_list = np.array(interpolate_nan_values(rri_list)) # print(interpolated_list) # print(interpolated_list) # expected_list = np.array([10, 11, 12, 13, 15, 15.5, 16, 17, 17.75, np.nan, np.nan, 20, 21, 22]) # self.assertAlmostEqual(interpolated_list, expected_list) pass def test_1_successive_outlier_malik(self): rri_list = [100, 110, 100, 130, 100, 100, 70, 100, 120, 100] self.assertEqual(remove_ectopic_beats(rr_intervals=rri_list, method="malik"), [100, 110, 100, np.nan, 100, 100, np.nan, 100, 120, 100]) def test_1_successive_outlier_kamath(self): rri_list = [101, 110, 100, 140, 100, 100, 70, 100, 130, 115, 100, 78] self.assertEqual(remove_ectopic_beats(rr_intervals=rri_list, method="kamath"), [101, 110, 100, np.nan, 100, 100, np.nan, 100, 130, 115, 100, 78]) def test_1_successive_outlier_karlsson(self): rri_list = [110, 100, 125, 100, 100, 70, 100, 130, 105, 100, 78, 100] self.assertEqual(remove_ectopic_beats(rr_intervals=rri_list, method="karlsson"), [110, 100, np.nan, 100, 100, np.nan, 100, np.nan, 105, 100, np.nan, 100]) def test_1_successive_outlier_Acer(self): rri_list = [100, 100, 100, 100, 100, 100, 100, 100, 110, 930, 110, 100, 10] self.assertEqual((remove_ectopic_beats(rr_intervals=rri_list, method="acar")), [100, 100, 100, 100, 100, 100, 100, 100, 110, np.nan, 110, 100, np.nan]) # def test_2_succesive_outliers_malik(self): # rri_list = [103, 110, 100, 150, 50, 100, 100, 130, 115, 100, 78, 70, 100, 100] # self.assertEqual(remove_ectopic_beats(rr_intervals=rri_list, method="Malik"), # [103, 110, 100, np.nan, np.nan, 100, 100, # np.nan, 115, 100, np.nan, np.nan, 100, 110]) # # def test_2_succesive_outliers_kamath(self): # rri_list = [104, 110, 100, 130, 115, 100, 100, 75, 140, 100, 78, 70, 92, 100, 140, 140, # 100, 140, 130, 100] # self.assertEqual(remove_ectopic_beats(rr_intervals=rri_list, method="Kamath"), # [104, 110, 100, 130, 115, 100, 100, np.nan, np.nan, 100, 78, 70, 92, # 100, np.nan, np.nan, 100, np.nan, 130, 110]) # # def test_2_succesive_outliers_karlsson(self): # rri_list = [105, 110, 100, 130, 115, 100, 100, 75, 140, 100, 78, 70, 92, 100, 140, 140, # 100, 140, 130, 100] # self.assertEqual(remove_ectopic_beats(rr_intervals=rri_list, method="Karlsson"), # [105, 110, 100, np.nan, 115, 100, 100, np.nan, np.nan, 100, np.nan, # np.nan, 92, 100, np.nan, np.nan, 100, np.nan, np.nan, 110]) def test_if_get_nn_intervals_creates_right_nn_intervals(self): rri_list = [700, 600, 2300, 1000, 1000, 230, 1200] expected_rri_list = [700, 600, 800, 1000, 1000, 1100, 1200] self.assertEqual(get_nn_intervals(rri_list), expected_rri_list) if __name__ == '__main__': unittest.main()
3,986
51.460526
105
py
hrv-analysis
hrv-analysis-master/tests/tests_extract_features_methods.py
#!/usr/bin/env python """This script provides methods to test extract_features methods.""" import os import unittest import numpy as np import pandas as pd from hrvanalysis.extract_features import (get_time_domain_features, get_geometrical_features, _create_interpolated_timestamp_list, get_sampen, get_csi_cvi_features, get_poincare_plot_features, get_frequency_domain_features) TEST_DATA_FILENAME = os.path.join(os.path.dirname(__file__), 'test_nn_intervals.txt') def load_test_data(path): # Load test rr_intervals data with open(path, "r") as text_file: lines = text_file.readlines() nn_intervals = list(map(lambda x: int(x.strip()), lines)) return nn_intervals class ExtractFeaturesTestCase(unittest.TestCase): """Class for UniTests of different methods in extract_features module""" def test_if_time_domain_features_are_correct(self): nn_intervals = load_test_data(TEST_DATA_FILENAME) function_time_domain_features = get_time_domain_features(nn_intervals=nn_intervals) real_function_time_domain_features = {'mean_nni': 718.248, 'sdnn': 43.113074968427306, 'sdsd': 19.519367520775713, 'nni_50': 24, 'pnni_50': 2.4024024024024024, 'nni_20': 225, 'pnni_20': 22.52252252252252, 'rmssd': 19.519400785039664, 'median_nni': 722.5, 'range_nni': 249, 'cvsd': 0.027176408127888504, 'cvnni': 0.060025332431732914, 'mean_hr': 83.84733227281252, 'max_hr': 101.69491525423729, 'min_hr': 71.51370679380214, 'std_hr': 5.196775370674054} self.assertDictEqual(function_time_domain_features, real_function_time_domain_features) def test_if_time_domain_features_are_correct_for_pnni_as_percent_set_to_false(self): nn_intervals = load_test_data(TEST_DATA_FILENAME) function_time_domain_features = get_time_domain_features(nn_intervals=nn_intervals, pnni_as_percent=False) real_function_time_domain_features = {'mean_nni': 718.248, 'sdnn': 43.113074968427306, 'sdsd': 19.519367520775713, 'nni_50': 24, 'pnni_50': 2.4, 'nni_20': 225, 'pnni_20': 22.5, 'rmssd': 19.519400785039664, 'median_nni': 722.5, 'range_nni': 249, 'cvsd': 0.027176408127888504, 'cvnni': 0.060025332431732914, 'mean_hr': 83.84733227281252, 'max_hr': 101.69491525423729, 'min_hr': 71.51370679380214, 'std_hr': 5.196775370674054} self.assertAlmostEqual(function_time_domain_features, real_function_time_domain_features) def test_if_geometrical_domain_features_are_correct(self): nn_intervals = load_test_data(TEST_DATA_FILENAME) function_geometrical_domain_features = get_geometrical_features(nn_intervals) real_function_geometrical_domain_features = {'triangular_index': 11.363636363636363, 'tinn': None} self.assertAlmostEqual(function_geometrical_domain_features, real_function_geometrical_domain_features) # TODO : check why there is not equality between arrays # def test_if_time_info_created_is_correct(self): # nn_intervals = [900, 1000, 1100, 1000, 950, 850] # time_info_created = _create_timestamp_list(nn_intervals) # expected_time = np.array([0., 1., 2.1, 3.1, 4.05, 4.9]) # print(expected_time == time_info_created) # print(expected_time) # print(time_info_created) # self.assertAlmostEqual(time_info_created, expected_time) def test_if_interpolated_time_created_is_correct(self): nn_intervals = [1000, 900, 1100, 1000, 950, 850] nni_interpolation_tmstp = _create_interpolated_timestamp_list(nn_intervals, sampling_frequency=2) real_interpolation_tmstp = np.array([0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]) all_is_equal = all(nni_interpolation_tmstp == real_interpolation_tmstp) self.assertTrue(all_is_equal) def test_if_csi_cvi_features_are_correct(self): nn_intervals = load_test_data(TEST_DATA_FILENAME) function_csi_cvi_features = get_csi_cvi_features(nn_intervals) real_csi_cvi_features = {'csi': 4.300520404060338, 'cvi': 4.117977429005704, 'Modified_csi': 1021.5749458778378} self.assertAlmostEqual(function_csi_cvi_features, real_csi_cvi_features) def test_if_pointcare_plot_features_features_are_correct(self): nn_intervals = load_test_data(TEST_DATA_FILENAME) function_pointcare_plot_features = get_poincare_plot_features(nn_intervals) real_pointcare_plot_features = {'sd1': 13.80919037557993, 'sd2': 59.38670497373513, 'ratio_sd2_sd1': 4.300520404060338} self.assertAlmostEqual(function_pointcare_plot_features, real_pointcare_plot_features) def test_if_sampen_feature_is_correct(self): nn_intervals = load_test_data(TEST_DATA_FILENAME) function_sampen_features = get_sampen(nn_intervals) sampen_plot_features = {'sampen': 1.2046675751816824} self.assertAlmostEqual(function_sampen_features, sampen_plot_features) def test_if_get_frequency_domain_features_handles_pandas_series(self): # TODO: Investigate: extract_features.py:432: RuntimeWarning: invalid value encountered in double_scalars # Also occurs with list only. try: get_frequency_domain_features(pd.Series([42]*10000)) except KeyError: self.fail() if __name__ == '__main__': unittest.main()
7,004
51.276119
114
py
hrv-analysis
hrv-analysis-master/hrvanalysis/preprocessing.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """This script provides several methods to clean abnormal and ectopic RR-intervals.""" from typing import Tuple from typing import List import pandas as pd import numpy as np # Static name for methods params MALIK_RULE = "malik" KARLSSON_RULE = "karlsson" KAMATH_RULE = "kamath" ACAR_RULE = "acar" CUSTOM_RULE = "custom" __all__ = ["remove_outliers", "remove_ectopic_beats", "interpolate_nan_values", "get_nn_intervals"] # ----------------- ClEAN OUTlIERS / ECTOPIC BEATS ----------------- # def remove_outliers(rr_intervals: List[float], verbose: bool = True, low_rri: int = 300, high_rri: int = 2000) -> list: """ Function that replace RR-interval outlier by nan. Parameters --------- rr_intervals : list raw signal extracted. low_rri : int lowest RrInterval to be considered plausible. high_rri : int highest RrInterval to be considered plausible. verbose : bool Print information about deleted outliers. Returns --------- rr_intervals_cleaned : list list of RR-intervals without outliers References ---------- .. [1] O. Inbar, A. Oten, M. Scheinowitz, A. Rotstein, R. Dlin, R.Casaburi. Normal \ cardiopulmonary responses during incremental exercise in 20-70-yr-old men. .. [2] W. C. Miller, J. P. Wallace, K. E. Eggert. Predicting max HR and the HR-VO2 relationship\ for exercise prescription in obesity. .. [3] H. Tanaka, K. D. Monahan, D. R. Seals. Age-predictedmaximal heart rate revisited. .. [4] M. Gulati, L. J. Shaw, R. A. Thisted, H. R. Black, C. N. B.Merz, M. F. Arnsdorf. Heart \ rate response to exercise stress testing in asymptomatic women. """ # Conversion RrInterval to Heart rate ==> rri (ms) = 1000 / (bpm / 60) # rri 2000 => bpm 30 / rri 300 => bpm 200 rr_intervals_cleaned = [rri if high_rri >= rri >= low_rri else np.nan for rri in rr_intervals] if verbose: outliers_list = [] for rri in rr_intervals: if high_rri >= rri >= low_rri: pass else: outliers_list.append(rri) nan_count = sum(np.isnan(rr_intervals_cleaned)) if nan_count == 0: print("{} outlier(s) have been deleted.".format(nan_count)) else: print("{} outlier(s) have been deleted.".format(nan_count)) print("The outlier(s) value(s) are : {}".format(outliers_list)) return rr_intervals_cleaned def remove_ectopic_beats(rr_intervals: List[float], method: str = "malik", custom_removing_rule: float = 0.2, verbose: bool = True) -> list: """ RR-intervals differing by more than the removing_rule from the one proceeding it are removed. Parameters --------- rr_intervals : list list of RR-intervals method : str method to use to clean outlier. malik, kamath, karlsson, acar or custom. custom_removing_rule : int Percentage criteria of difference with previous RR-interval at which we consider that it is abnormal. If method is set to Karlsson, it is the percentage of difference between the absolute mean of previous and next RR-interval at which to consider the beat as abnormal. verbose : bool Print information about ectopic beats. Returns --------- nn_intervals : list list of NN Interval outlier_count : int Count of outlier detected in RR-interval list References ---------- .. [5] Kamath M.V., Fallen E.L.: Correction of the Heart Rate Variability Signal for Ectopics \ and Miss- ing Beats, In: Malik M., Camm A.J. .. [6] Geometric Methods for Heart Rate Variability Assessment - Malik M et al """ if method not in [MALIK_RULE, KAMATH_RULE, KARLSSON_RULE, ACAR_RULE, CUSTOM_RULE]: raise ValueError("Not a valid method. Please choose between malik, kamath, karlsson, acar.\ You can also choose your own removing critera with custom_rule parameter.") if method == KARLSSON_RULE: nn_intervals, outlier_count = _remove_outlier_karlsson(rr_intervals=rr_intervals, removing_rule=custom_removing_rule) elif method == ACAR_RULE: nn_intervals, outlier_count = _remove_outlier_acar(rr_intervals=rr_intervals) else: # set first element in list outlier_count = 0 previous_outlier = False nn_intervals = [rr_intervals[0]] for i, rr_interval in enumerate(rr_intervals[:-1]): if previous_outlier: nn_intervals.append(rr_intervals[i + 1]) previous_outlier = False continue if is_outlier(rr_interval, rr_intervals[i + 1], method=method, custom_rule=custom_removing_rule): nn_intervals.append(rr_intervals[i + 1]) else: nn_intervals.append(np.nan) outlier_count += 1 previous_outlier = True if verbose: print("{} ectopic beat(s) have been deleted with {} rule.".format(outlier_count, method)) return nn_intervals def is_outlier(rr_interval: int, next_rr_interval: float, method: str = "malik", custom_rule: float = 0.2) -> bool: """ Test if the rr_interval is an outlier Parameters ---------- rr_interval : int RrInterval next_rr_interval : int consecutive RrInterval method : str method to use to clean outlier. malik, kamath, karlsson, acar or custom custom_rule : int percentage criteria of difference with previous RR-interval at which we consider that it is abnormal Returns ---------- outlier : bool True if RrInterval is valid, False if not """ if method == MALIK_RULE: outlier = abs(rr_interval - next_rr_interval) <= 0.2 * rr_interval elif method == KAMATH_RULE: outlier = 0 <= (next_rr_interval - rr_interval) <= 0.325 * rr_interval or 0 <= \ (rr_interval - next_rr_interval) <= 0.245 * rr_interval else: outlier = abs(rr_interval - next_rr_interval) <= custom_rule * rr_interval return outlier def _remove_outlier_karlsson(rr_intervals: List[float], removing_rule: float = 0.2) -> Tuple[list, int]: """ RR-intervals differing by more than the 20 % of the mean of previous and next RR-interval are removed. Parameters --------- rr_intervals : list list of RR-intervals removing_rule : float Percentage of difference between the absolute mean of previous and next RR-interval at which \ to consider the beat as abnormal. Returns --------- nn_intervals : list list of NN Interval References ---------- .. [7] Automatic filtering of outliers in RR-intervals before analysis of heart rate \ variability in Holter recordings: a comparison with carefully edited data - Marcus Karlsson, \ Rolf Hörnsten, Annika Rydberg and Urban Wiklund """ # set first element in list nn_intervals = [rr_intervals[0]] outlier_count = 0 for i in range(len(rr_intervals)): # Condition to go out of loop at limits of list if i == len(rr_intervals)-2: nn_intervals.append(rr_intervals[i + 1]) break mean_prev_next_rri = (rr_intervals[i] + rr_intervals[i+2]) / 2 if abs(mean_prev_next_rri - rr_intervals[i+1]) < removing_rule * mean_prev_next_rri: nn_intervals.append(rr_intervals[i+1]) else: nn_intervals.append(np.nan) outlier_count += 1 return nn_intervals, outlier_count def _remove_outlier_acar(rr_intervals: List[float], custom_rule=0.2) -> Tuple[list, int]: """ RR-intervals differing by more than the 20 % of the mean of last 9 RrIntervals are removed. Parameters --------- rr_intervals : list list of RR-intervals custom_rule : int percentage criteria of difference with mean of 9 previous RR-intervals at which we consider that RR-interval is abnormal. By default, set to 20 % Returns --------- nn_intervals : list list of NN Interval References ---------- .. [8] Automatic ectopic beat elimination in short-term heart rate variability measurements \ Acar B., Irina S., Hemingway H., Malik M. """ nn_intervals = [] outlier_count = 0 for i, rr_interval in enumerate(rr_intervals): if i < 9: nn_intervals.append(rr_interval) continue acar_rule_elt = np.nanmean(nn_intervals[-9:]) if abs(acar_rule_elt - rr_interval) < custom_rule * acar_rule_elt: nn_intervals.append(rr_interval) else: nn_intervals.append(np.nan) outlier_count += 1 return nn_intervals, outlier_count def interpolate_nan_values(rr_intervals: list, interpolation_method: str = "linear", limit_area: str = None, limit_direction: str = "forward", limit=None,) -> list: """ Function that interpolate Nan values with linear interpolation Parameters --------- rr_intervals : list RrIntervals list. interpolation_method : str Method used to interpolate Nan values of series. limit_area: str If limit is specified, consecutive NaNs will be filled with this restriction. limit_direction: str If limit is specified, consecutive NaNs will be filled in this direction. limit: int TODO Returns --------- interpolated_rr_intervals : list new list with outliers replaced by interpolated values. """ # search first nan data and fill it post value until it is not nan if np.isnan(rr_intervals[0]): start_idx = 0 while np.isnan(rr_intervals[start_idx]): start_idx += 1 rr_intervals[0:start_idx] = [rr_intervals[start_idx]] * start_idx else: pass # change rr_intervals to pd series series_rr_intervals_cleaned = pd.Series(rr_intervals) # Interpolate nan values and convert pandas object to list of values interpolated_rr_intervals = series_rr_intervals_cleaned.interpolate(method=interpolation_method, limit=limit, limit_area=limit_area, limit_direction=limit_direction) return interpolated_rr_intervals.values.tolist() def get_nn_intervals(rr_intervals: List[float], low_rri: int = 300, high_rri: int = 2000, limit_area: str = None, limit_direction: str = "forward", interpolation_method: str = "linear", ectopic_beats_removal_method: str = KAMATH_RULE, verbose: bool = True) -> List[float]: """ Function that computes NN Intervals from RR-intervals. Parameters --------- rr_intervals : list RrIntervals list. interpolation_method : str Method used to interpolate Nan values of series. ectopic_beats_removal_method : str method to use to clean outlier. malik, kamath, karlsson, acar or custom. low_rri : int lowest RrInterval to be considered plausible. high_rri : int highest RrInterval to be considered plausible. limit_area: str If limit is specified, consecutive NaNs will be filled with this restriction. limit_direction: str If limit is specified, consecutive NaNs will be filled in this direction. verbose : bool Print information about deleted outliers. Returns --------- interpolated_nn_intervals : list list of NN Interval interpolated """ rr_intervals_cleaned = remove_outliers(rr_intervals, low_rri=low_rri, high_rri=high_rri, verbose=verbose) interpolated_rr_intervals = interpolate_nan_values(rr_intervals_cleaned, interpolation_method, limit_area=limit_area, limit_direction=limit_direction) nn_intervals = remove_ectopic_beats(interpolated_rr_intervals, method=ectopic_beats_removal_method, verbose=verbose) interpolated_nn_intervals = interpolate_nan_values(nn_intervals, interpolation_method, limit_area=limit_area, limit_direction=limit_direction) return interpolated_nn_intervals def is_valid_sample(nn_intervals: List[float], outlier_count: int, removing_rule: float = 0.04) -> bool: """ Test if the sample meet the condition to be used for analysis Parameters ---------- nn_intervals : list list of Normal to Normal Interval outlier_count : int count of outliers or ectopic beats removed from the interval removing_rule : str rule to follow to determine whether the sample is valid or not Returns ---------- bool True if sample is valid, False if not """ result = True if outlier_count / len(nn_intervals) > removing_rule: print("Too much outlier for analyses ! You should descard the sample.") result = False if len(nn_intervals) < 240: print("Not enough Heart beat for Nyquist criteria ! ") result = False return result
13,724
35.6
110
py
hrv-analysis
hrv-analysis-master/hrvanalysis/extract_features.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """This script provides several methods to extract features from Normal to Normal Intervals for heart rate variability analysis.""" from typing import List, Tuple from collections import namedtuple import numpy as np import nolds from scipy import interpolate from scipy import signal from astropy.stats import LombScargle # limit functions that user might import using "from hrv-analysis import *" __all__ = ['get_time_domain_features', 'get_frequency_domain_features', 'get_geometrical_features', 'get_poincare_plot_features', "get_csi_cvi_features", "get_sampen"] # Frequency Methods name WELCH_METHOD = "welch" LOMB_METHOD = "lomb" # Named Tuple for different frequency bands VlfBand = namedtuple("Vlf_band", ["low", "high"]) LfBand = namedtuple("Lf_band", ["low", "high"]) HfBand = namedtuple("Hf_band", ["low", "high"]) # ----------------- TIME DOMAIN FEATURES ----------------- # def get_time_domain_features(nn_intervals: List[float], pnni_as_percent: bool = True) -> dict: """ Returns a dictionary containing time domain features for HRV analysis. Mostly used on long term recordings (24h) but some studies use some of those features on short term recordings, from 1 to 5 minutes window. Parameters ---------- nn_intervals : list list of Normal to Normal Interval pnni_as_percent: bool whether to remove bias or not to compute pnni features. Returns ------- time_domain_features : dict dictionary containing time domain features for HRV analyses. There are details about each features below. Notes ----- Here are some details about feature engineering... - **mean_nni**: The mean of RR-intervals. - **sdnn** : The standard deviation of the time interval between successive normal heart beats \ (i.e. the RR-intervals). - **sdsd**: The standard deviation of differences between adjacent RR-intervals - **rmssd**: The square root of the mean of the sum of the squares of differences between \ adjacent NN-intervals. Reflects high frequency (fast or parasympathetic) influences on hrV \ (*i.e.*, those influencing larger changes from one beat to the next). - **median_nni**: Median Absolute values of the successive differences between the RR-intervals. - **nni_50**: Number of interval differences of successive RR-intervals greater than 50 ms. - **pnni_50**: The proportion derived by dividing nni_50 (The number of interval differences \ of successive RR-intervals greater than 50 ms) by the total number of RR-intervals. - **nni_20**: Number of interval differences of successive RR-intervals greater than 20 ms. - **pnni_20**: The proportion derived by dividing nni_20 (The number of interval differences \ of successive RR-intervals greater than 20 ms) by the total number of RR-intervals. - **range_nni**: difference between the maximum and minimum nn_interval. - **cvsd**: Coefficient of variation of successive differences equal to the rmssd divided by \ mean_nni. - **cvnni**: Coefficient of variation equal to the ratio of sdnn divided by mean_nni. - **mean_hr**: The mean Heart Rate. - **max_hr**: Max heart rate. - **min_hr**: Min heart rate. - **std_hr**: Standard deviation of heart rate. References ---------- .. [1] Heart rate variability - Standards of measurement, physiological interpretation, and \ clinical use, Task Force of The European Society of Cardiology and The North American Society \ of Pacing and Electrophysiology, 1996 """ diff_nni = np.diff(nn_intervals) length_int = len(nn_intervals) - 1 if pnni_as_percent else len(nn_intervals) # Basic statistics mean_nni = np.mean(nn_intervals) median_nni = np.median(nn_intervals) range_nni = max(nn_intervals) - min(nn_intervals) sdsd = np.std(diff_nni) rmssd = np.sqrt(np.mean(diff_nni ** 2)) nni_50 = sum(np.abs(diff_nni) > 50) pnni_50 = 100 * nni_50 / length_int nni_20 = sum(np.abs(diff_nni) > 20) pnni_20 = 100 * nni_20 / length_int # Feature found on github and not in documentation cvsd = rmssd / mean_nni # Features only for long term recordings sdnn = np.std(nn_intervals, ddof=1) # ddof = 1 : unbiased estimator => divide std by n-1 cvnni = sdnn / mean_nni # Heart Rate equivalent features heart_rate_list = np.divide(60000, nn_intervals) mean_hr = np.mean(heart_rate_list) min_hr = min(heart_rate_list) max_hr = max(heart_rate_list) std_hr = np.std(heart_rate_list) time_domain_features = { 'mean_nni': mean_nni, 'sdnn': sdnn, 'sdsd': sdsd, 'nni_50': nni_50, 'pnni_50': pnni_50, 'nni_20': nni_20, 'pnni_20': pnni_20, 'rmssd': rmssd, 'median_nni': median_nni, 'range_nni': range_nni, 'cvsd': cvsd, 'cvnni': cvnni, 'mean_hr': mean_hr, "max_hr": max_hr, "min_hr": min_hr, "std_hr": std_hr, } return time_domain_features def get_geometrical_features(nn_intervals: List[float]) -> dict: """ Returns a dictionary containing geometrical time domain features for HRV analyses. Known practise is to use this function on recordings from 20 minutes to 24 Hours window. Parameters --------- nn_intervals : list list of Normal to Normal Interval. Returns --------- geometrical_features : dict Dictionary containing geometrical time domain features for HRV analyses. There are details about each features below. Notes ---------- Details about feature engineering... - **triangular_index**: The HRV triangular index measurement is the integral of the density \ distribution (= the number of all NN-intervals) divided by the maximum of the density \ distribution. - **tinn**: The triangular interpolation of NN-interval histogram (TINN) is the baseline width \ of the distribution measured as a base of a triangle, approximating the NN-interval \ distribution References ---------- .. [1] Heart rate variability - Standards of measurement, physiological interpretation, and \ clinical use, Task Force of The European Society of Cardiology and The North American Society \ of Pacing and Electrophysiology, 1996 """ triang_idx = len(nn_intervals) / max(np.histogram(nn_intervals, bins=range(300, 2000, 8))[0]) # TODO tinn = None geometrical_features = { "triangular_index": triang_idx, "tinn": tinn } return geometrical_features # ----------------- FREQUENCY DOMAIN FEATURES ----------------- # def get_frequency_domain_features(nn_intervals: List[float], method: str = WELCH_METHOD, sampling_frequency: int = 4, interpolation_method: str = "linear", vlf_band: namedtuple = VlfBand(0.003, 0.04), lf_band: namedtuple = LfBand(0.04, 0.15), hf_band: namedtuple = HfBand(0.15, 0.40)) -> dict: """ Returns a dictionary containing frequency domain features for HRV analyses. To our knowledge, you might use this function on short term recordings, from 2 to 5 minutes \ window. Parameters --------- nn_intervals : list list of Normal to Normal Interval method : str Method used to calculate the psd. Choice are Welch's FFT or Lomb method. sampling_frequency : int Frequency at which the signal is sampled. Common value range from 1 Hz to 10 Hz, by default set to 4 Hz. No need to specify if Lomb method is used. interpolation_method : str kind of interpolation as a string, by default "linear". No need to specify if Lomb method is used. vlf_band : tuple Very low frequency bands for features extraction from power spectral density. lf_band : tuple Low frequency bands for features extraction from power spectral density. hf_band : tuple High frequency bands for features extraction from power spectral density. Returns --------- frequency_domain_features : dict Dictionary containing frequency domain features for HRV analyses. There are details about each features below. Notes --------- Details about feature engineering... - **total_power** : Total power density spectral - **vlf** : variance ( = power ) in HRV in the Very low Frequency (.003 to .04 Hz by default). \ Reflect an intrinsic rhythm produced by the heart which is modulated primarily by sympathetic \ activity. - **lf** : variance ( = power ) in HRV in the low Frequency (.04 to .15 Hz). Reflects a \ mixture of sympathetic and parasympathetic activity, but in long-term recordings, it reflects \ sympathetic activity and can be reduced by the beta-adrenergic antagonist propanolol. - **hf**: variance ( = power ) in HRV in the High Frequency (.15 to .40 Hz by default). \ Reflects fast changes in beat-to-beat variability due to parasympathetic (vagal) activity. \ Sometimes called the respiratory band because it corresponds to HRV changes related to the \ respiratory cycle and can be increased by slow, deep breathing (about 6 or 7 breaths per \ minute) and decreased by anticholinergic drugs or vagal blockade. - **lf_hf_ratio** : lf/hf ratio is sometimes used by some investigators as a quantitative \ mirror of the sympatho/vagal balance. - **lfnu** : normalized lf power. - **hfnu** : normalized hf power. References ---------- .. [1] Heart rate variability - Standards of measurement, physiological interpretation, and \ clinical use, Task Force of The European Society of Cardiology and The North American Society \ of Pacing and Electrophysiology, 1996 .. [2] Signal Processing Methods for Heart Rate Variability - Gari D. Clifford, 2002 """ # ---------- Handle pandas series ---------- # nn_intervals = list(nn_intervals) # ---------- Compute frequency & Power spectral density of signal ---------- # freq, psd = _get_freq_psd_from_nn_intervals(nn_intervals=nn_intervals, method=method, sampling_frequency=sampling_frequency, interpolation_method=interpolation_method, vlf_band=vlf_band, hf_band=hf_band) # ---------- Features calculation ---------- # frequency_domain_features = _get_features_from_psd(freq=freq, psd=psd, vlf_band=vlf_band, lf_band=lf_band, hf_band=hf_band) return frequency_domain_features def _get_freq_psd_from_nn_intervals(nn_intervals: List[float], method: str = WELCH_METHOD, sampling_frequency: int = 4, interpolation_method: str = "linear", vlf_band: namedtuple = VlfBand(0.003, 0.04), hf_band: namedtuple = HfBand(0.15, 0.40)) -> Tuple: """ Returns the frequency and power of the signal. Parameters --------- nn_intervals : list list of Normal to Normal Interval method : str Method used to calculate the psd. Choice are Welch's FFT or Lomb method. sampling_frequency : int Frequency at which the signal is sampled. Common value range from 1 Hz to 10 Hz, by default set to 7 Hz. No need to specify if Lomb method is used. interpolation_method : str Kind of interpolation as a string, by default "linear". No need to specify if Lomb method is used. vlf_band : tuple Very low frequency bands for features extraction from power spectral density. hf_band : tuple High frequency bands for features extraction from power spectral density. Returns --------- freq : list Frequency of the corresponding psd points. psd : list Power Spectral Density of the signal. """ timestamp_list = _create_timestamp_list(nn_intervals) if method == WELCH_METHOD: # ---------- Interpolation of signal ---------- # funct = interpolate.interp1d(x=timestamp_list, y=nn_intervals, kind=interpolation_method) timestamps_interpolation = _create_interpolated_timestamp_list(nn_intervals, sampling_frequency) nni_interpolation = funct(timestamps_interpolation) # ---------- Remove DC Component ---------- # nni_normalized = nni_interpolation - np.mean(nni_interpolation) # --------- Compute Power Spectral Density --------- # freq, psd = signal.welch(x=nni_normalized, fs=sampling_frequency, window='hann', nfft=4096) elif method == LOMB_METHOD: freq, psd = LombScargle(timestamp_list, nn_intervals, normalization='psd').autopower(minimum_frequency=vlf_band[0], maximum_frequency=hf_band[1]) else: raise ValueError("Not a valid method. Choose between 'lomb' and 'welch'") return freq, psd def _create_timestamp_list(nn_intervals: List[float]) -> List[float]: """ Creates corresponding time interval for all nn_intervals Parameters --------- nn_intervals : list List of Normal to Normal Interval. Returns --------- nni_tmstp : list list of time intervals between first NN-interval and final NN-interval. """ # Convert in seconds nni_tmstp = np.cumsum(nn_intervals) / 1000 # Force to start at 0 return nni_tmstp - nni_tmstp[0] def _create_interpolated_timestamp_list(nn_intervals: List[float], sampling_frequency: int = 7) -> List[float]: """ Creates the interpolation time used for Fourier transform's method Parameters --------- nn_intervals : list List of Normal to Normal Interval. sampling_frequency : int Frequency at which the signal is sampled. Returns --------- nni_interpolation_tmstp : list Timestamp for interpolation. """ time_nni = _create_timestamp_list(nn_intervals) # Create timestamp for interpolation nni_interpolation_tmstp = np.arange(0, time_nni[-1], 1 / float(sampling_frequency)) return nni_interpolation_tmstp def _get_features_from_psd(freq: List[float], psd: List[float], vlf_band: namedtuple = VlfBand(0.003, 0.04), lf_band: namedtuple = LfBand(0.04, 0.15), hf_band: namedtuple = HfBand(0.15, 0.40)) -> dict: """ Computes frequency domain features from the power spectral decomposition. Parameters --------- freq : array Array of sample frequencies. psd : list Power spectral density or power spectrum. vlf_band : tuple Very low frequency bands for features extraction from power spectral density. lf_band : tuple Low frequency bands for features extraction from power spectral density. hf_band : tuple High frequency bands for features extraction from power spectral density. Returns --------- freqency_domain_features : dict Dictionary containing frequency domain features for HRV analyses. There are details about each features given below. """ # Calcul of indices between desired frequency bands vlf_indexes = np.logical_and(freq >= vlf_band[0], freq < vlf_band[1]) lf_indexes = np.logical_and(freq >= lf_band[0], freq < lf_band[1]) hf_indexes = np.logical_and(freq >= hf_band[0], freq < hf_band[1]) # Integrate using the composite trapezoidal rule lf = np.trapz(y=psd[lf_indexes], x=freq[lf_indexes]) hf = np.trapz(y=psd[hf_indexes], x=freq[hf_indexes]) # total power & vlf : Feature often used for "long term recordings" analysis vlf = np.trapz(y=psd[vlf_indexes], x=freq[vlf_indexes]) total_power = vlf + lf + hf lf_hf_ratio = lf / hf lfnu = (lf / (lf + hf)) * 100 hfnu = (hf / (lf + hf)) * 100 freqency_domain_features = { 'lf': lf, 'hf': hf, 'lf_hf_ratio': lf_hf_ratio, 'lfnu': lfnu, 'hfnu': hfnu, 'total_power': total_power, 'vlf': vlf } return freqency_domain_features # ----------------- NON lINEAR DOMAIN FEATURES ----------------- # def get_csi_cvi_features(nn_intervals: List[float]) -> dict: """ Returns a dictionary containing 3 features from non linear domain for HRV analyses. Known practise is to use this function on short term recordings, on 30 , 50, 100 RR-intervals (or seconds) window. Parameters --------- nn_intervals : list Normal to Normal Intervals. Returns --------- csi_cvi_features : dict Dictionary containing non linear domain features for hrV analyses. There are details about each features are given below. Notes --------- - **csi** : Cardiac Sympathetic Index. - **cvi** : Cadiac Vagal Index. - **Modified_csi** : Modified CSI is an alternative measure in research of seizure detection. References ---------- .. [3] Using Lorenz plot and Cardiac Sympathetic Index of heart rate variability for detecting \ seizures for patients with epilepsy, Jesper Jeppesen et al, 2014 """ # Measures the width and length of poincare cloud poincare_plot_features = get_poincare_plot_features(nn_intervals) T = 4 * poincare_plot_features['sd1'] L = 4 * poincare_plot_features['sd2'] csi = L / T cvi = np.log10(L * T) modified_csi = L ** 2 / T csi_cvi_features = { 'csi': csi, 'cvi': cvi, 'Modified_csi': modified_csi } return csi_cvi_features def get_poincare_plot_features(nn_intervals: List[float]) -> dict: """ Function returning a dictionary containing 3 features from non linear domain for HRV analyses. Known practise is to use this function on short term recordings, from 5 minutes window. Parameters --------- nn_intervals : list Normal to Normal Interval Returns --------- poincare_plot_features : dict Dictionary containing non linear domain features for hrV analyses. There are details about each features are given below. Notes --------- - **sd1** : The standard deviation of projection of the Poincaré plot on the line \ perpendicular to the line of identity. - **sd2** : SD2 is defined as the standard deviation of the projection of the Poincaré \ plot on the line of identity (y=x). - **ratio_sd2_sd1** : Ratio between SD2 and SD1. References ---------- .. [4] Pre-ictal heart rate variability assessment of epileptic seizures by means of linear \ and non- linear analyses, Soroor Behbahani, Nader Jafarnia Dabanloo et al - 2013 """ diff_nn_intervals = np.diff(nn_intervals) # measures the width of poincare cloud sd1 = np.sqrt(np.std(diff_nn_intervals, ddof=1) ** 2 * 0.5) # measures the length of the poincare cloud sd2 = np.sqrt(2 * np.std(nn_intervals, ddof=1) ** 2 - 0.5 * np.std(diff_nn_intervals, ddof=1) ** 2) ratio_sd2_sd1 = sd2 / sd1 poincare_plot_features = { 'sd1': sd1, 'sd2': sd2, 'ratio_sd2_sd1': ratio_sd2_sd1 } return poincare_plot_features def get_sampen(nn_intervals: List[float]) -> dict: """ Function computing the sample entropy of the given data. Must use this function on short term recordings, from 1 minute window. Parameters --------- nn_intervals : list Normal to Normal Interval Returns --------- sampen : float The sample entropy of the data References ---------- .. [5] Physiological time-series analysis using approximate entropy and sample entropy, \ JOSHUA S. RICHMAN1, J. RANDALL MOORMAN - 2000 """ sampen = nolds.sampen(nn_intervals, emb_dim=2) return {'sampen': sampen}
20,410
34.37435
111
py
hrv-analysis
hrv-analysis-master/hrvanalysis/plot.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """This script provides several methods to plot RR / NN-intervals.""" from typing import List import matplotlib.pyplot as plt from matplotlib import style from matplotlib.patches import Ellipse from hrvanalysis.extract_features import _get_freq_psd_from_nn_intervals from hrvanalysis.extract_features import get_poincare_plot_features from collections import namedtuple import numpy as np # Named Tuple for different frequency bands VlfBand = namedtuple("Vlf_band", ["low", "high"]) LfBand = namedtuple("Lf_band", ["low", "high"]) HfBand = namedtuple("Hf_band", ["low", "high"]) def plot_timeseries(nn_intervals: List[float], normalize: bool = True, autoscale: bool = True, y_min: float = None, y_max: float = None): """ Function plotting the NN-intervals time series. Arguments --------- nn_intervals : list list of Normal to Normal Interval. normalize : bool Set to True to plot X axis as a cumulative sum of Time. Set to False to plot X axis using x as index array 0, 1, ..., N-1. autoscale : bool Option to normalize the x-axis as a time series for comparison. Set to True by default. y_min : float Custom min value might be set for y axis. y_max : float Custom max value might be set for y axis. """ style.use("seaborn-darkgrid") plt.figure(figsize=(12, 8)) plt.title("Rr Interval time series") plt.ylabel("Rr Interval", fontsize=15) if normalize: plt.xlabel("Time (s)", fontsize=15) plt.plot(np.cumsum(nn_intervals) / 1000, nn_intervals) else: plt.xlabel("RR-interval index", fontsize=15) plt.plot(nn_intervals) if not autoscale: plt.ylim(y_min, y_max) plt.show() def plot_distrib(nn_intervals: List[float], bin_length: int = 8): """ Function plotting histogram distribution of the NN Intervals. Useful for geometrical features. Arguments --------- nn_intervals : list list of Normal to Normal Interval. bin_length : int size of the bin for histogram in ms, by default = 8. """ max_nn_i = max(nn_intervals) min_nn_i = min(nn_intervals) style.use("seaborn-darkgrid") plt.figure(figsize=(12, 8)) plt.title("Distribution of Rr Intervals", fontsize=20) plt.xlabel("Time (s)", fontsize=15) plt.ylabel("Number of Rr Interval per bin", fontsize=15) plt.hist(nn_intervals, bins=range(min_nn_i - 10, max_nn_i + 10, bin_length), rwidth=0.8) plt.show() def plot_psd(nn_intervals: List[float], method: str = "welch", sampling_frequency: int = 7, interpolation_method: str = "linear", vlf_band: namedtuple = VlfBand(0.003, 0.04), lf_band: namedtuple = LfBand(0.04, 0.15), hf_band: namedtuple = HfBand(0.15, 0.40)): """ Function plotting the power spectral density of the NN Intervals. Arguments --------- nn_intervals : list list of Normal to Normal Interval. method : str Method used to calculate the psd. Choice are Welch's FFT (welch) or Lomb method (lomb). sampling_frequency : int frequence at which the signal is sampled. Common value range from 1 Hz to 10 Hz, by default set to 7 Hz. No need to specify if Lomb method is used. interpolation_method : str kind of interpolation as a string, by default "linear". No need to specify if lomb method is used. vlf_band : tuple Very low frequency bands for features extraction from power spectral density. lf_band : tuple Low frequency bands for features extraction from power spectral density. hf_band : tuple High frequency bands for features extraction from power spectral density. """ freq, psd = _get_freq_psd_from_nn_intervals(nn_intervals=nn_intervals, method=method, sampling_frequency=sampling_frequency, interpolation_method=interpolation_method) # Calcul of indices between desired frequency bands vlf_indexes = np.logical_and(freq >= vlf_band[0], freq < vlf_band[1]) lf_indexes = np.logical_and(freq >= lf_band[0], freq < lf_band[1]) hf_indexes = np.logical_and(freq >= hf_band[0], freq < hf_band[1]) frequency_band_index = [vlf_indexes, lf_indexes, hf_indexes] label_list = ["VLF component", "LF component", "HF component"] # Plot parameters style.use("seaborn-darkgrid") plt.figure(figsize=(12, 8)) plt.xlabel("Frequency (Hz)", fontsize=15) plt.ylabel("PSD (s2/ Hz)", fontsize=15) if method == "lomb": plt.title("Lomb's periodogram", fontsize=20) for band_index, label in zip(frequency_band_index, label_list): plt.fill_between(freq[band_index], 0, psd[band_index] / (1000 * len(psd[band_index])), label=label) plt.legend(prop={"size": 15}, loc="best") elif method == "welch": plt.title("FFT Spectrum : Welch's periodogram", fontsize=20) for band_index, label in zip(frequency_band_index, label_list): plt.fill_between(freq[band_index], 0, psd[band_index] / (1000 * len(psd[band_index])), label=label) plt.legend(prop={"size": 15}, loc="best") plt.xlim(0, hf_band[1]) else: raise ValueError("Not a valid method. Choose between 'lomb' and 'welch'") plt.show() def plot_poincare(nn_intervals: List[float], plot_sd_features: bool = True): """ Pointcare / Lorentz Plot of the NN Intervals Arguments --------- nn_intervals : list list of NN intervals plot_sd_features : bool Option to show or not SD1 and SD2 features on plot. By default, set to True. Notes --------- The transverse axis (T) reflects beat-to-beat variation the longitudinal axis (L) reflects the overall fluctuation """ # For Lorentz / poincaré Plot ax1 = nn_intervals[:-1] ax2 = nn_intervals[1:] # compute features for ellipse's height, width and center dict_sd1_sd2 = get_poincare_plot_features(nn_intervals) sd1 = dict_sd1_sd2["sd1"] sd2 = dict_sd1_sd2["sd2"] mean_nni = np.mean(nn_intervals) # Plot options and settings style.use("seaborn-darkgrid") fig = plt.figure(figsize=(12, 12)) ax = fig.add_subplot(111) plt.title("Poincaré / Lorentz Plot", fontsize=20) plt.xlabel('NN_n (s)', fontsize=15) plt.ylabel('NN_n+1 (s)', fontsize=15) plt.xlim(min(nn_intervals) - 10, max(nn_intervals) + 10) plt.ylim(min(nn_intervals) - 10, max(nn_intervals) + 10) # Poincaré Plot ax.scatter(ax1, ax2, c='b', s=2) if plot_sd_features: # Ellipse plot settings ells = Ellipse(xy=(mean_nni, mean_nni), width=2 * sd2 + 1, height=2 * sd1 + 1, angle=45, linewidth=2, fill=False) ax.add_patch(ells) ells = Ellipse(xy=(mean_nni, mean_nni), width=2 * sd2, height=2 * sd1, angle=45) ells.set_alpha(0.05) ells.set_facecolor("blue") ax.add_patch(ells) # Arrow plot settings sd1_arrow = ax.arrow(mean_nni, mean_nni, -sd1 * np.sqrt(2) / 2, sd1 * np.sqrt(2) / 2, linewidth=3, ec='r', fc="r", label="SD1") sd2_arrow = ax.arrow(mean_nni, mean_nni, sd2 * np.sqrt(2) / 2, sd2 * np.sqrt(2) / 2, linewidth=3, ec='g', fc="g", label="SD2") plt.legend(handles=[sd1_arrow, sd2_arrow], fontsize=12, loc="best") plt.show()
7,596
36.240196
111
py
hrv-analysis
hrv-analysis-master/hrvanalysis/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """This script allow user to import directly the most useful functions.""" __version__ = "1.0.3" from hrvanalysis.extract_features import (get_time_domain_features, get_frequency_domain_features, get_geometrical_features, get_csi_cvi_features, get_poincare_plot_features, get_sampen) from hrvanalysis.preprocessing import (remove_outliers, remove_ectopic_beats, interpolate_nan_values, get_nn_intervals) from hrvanalysis.plot import (plot_timeseries, plot_distrib, plot_psd, plot_poincare)
664
40.5625
101
py
panphon
panphon-master/setup.py
from setuptools import setup setup(name='panphon', version='0.20.0', description='Tools for using the International Phonetic Alphabet with phonological features', url='https://github.com/dmort27/panphon', download_url='https://github.com/dmort27/panphon/archive/0.19.1.tar.gz', long_description=open('README.md', encoding='utf-8').read(), long_description_content_type='text/markdown', author='David R. Mortensen', author_email='dmortens@cs.cmu.edu', license='MIT', install_requires=['setuptools', 'unicodecsv', 'PyYAML', 'regex', 'numpy>=1.20.2', 'editdistance', 'munkres'], scripts=['panphon/bin/validate_ipa.py', 'panphon/bin/align_wordlists.py', 'panphon/bin/generate_ipa_all.py'], packages=['panphon'], package_dir={'panphon': 'panphon'}, package_data={'panphon': ['data/*.csv', 'data/*.yml']}, zip_safe=True, classifiers=['Operating System :: OS Independent', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Text Processing :: Linguistic'] )
1,405
41.606061
99
py
panphon
panphon-master/panphon/collapse.py
from __future__ import (absolute_import, division, print_function, unicode_literals) import os.path import pkg_resources import yaml from panphon import _panphon from panphon import permissive class Collapser(object): def __init__(self, tablename='dogolpolsky_prime.yml', feature_set='spe+', feature_model='strict'): fm = {'strict': _panphon.FeatureTable, 'permissive': permissive.PermissiveFeatureTable} self.fm = fm[feature_model](feature_set=feature_set) self.rules = self._load_table(tablename) def _load_table(self, tablename): fn = os.path.join('data', tablename) fn = pkg_resources.resource_filename(__name__, fn) with open(fn, 'r') as f: rules = [] table = yaml.load(f.read(), Loader=yaml.FullLoader) for rule in table: rules.append((_panphon.fts(rule['def']), rule['label'])) return rules def collapse(self, s): segs = [] for seg in self.fm.seg_regex.findall(s): fts = self.fm.fts(seg) for mask, label in self.rules: if self.fm.match(mask, fts): segs.append(label) break return ''.join(segs)
1,270
31.589744
102
py
panphon
panphon-master/panphon/_panphon.py
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals from os import stat import unicodedata import os.path from functools import reduce import numpy import pkg_resources import regex as re import unicodecsv as csv from panphon import featuretable from . import xsampa from panphon.errors import SegmentError # logging.basicConfig(level=logging.DEBUG) FT_REGEX = re.compile(r'([-+0])([a-z][A-Za-z]*)', re.U | re.X) MT_REGEX = re.compile(r'\[[-+0a-zA-Z ,;]*\]') SEG_REGEX = re.compile(r'[\p{InBasic_Latin}\p{InGreek_and_Coptic}' + r'\p{InIPA_Extensions}ŋœ\u00C0-\u00FF]' + r'[\u0300-\u0360\u0362-\u036F]*' + r'\p{InSpacing_Modifier_Letters}*', re.U | re.X) filenames = { 'spe+': os.path.join('data', 'ipa_all.csv'), 'panphon': os.path.join('data', 'ipa_all.csv'), } def segment_text(text, seg_regex=SEG_REGEX): """Return an iterator of segments in the text. Args: text (unicode): string of IPA Unicode text seg_regex (_regex.Pattern): compiled regex defining a segment (base + modifiers) Return: generator: segments in the input text """ for m in seg_regex.finditer(text): yield m.group(0) def fts(s): """Given string `s` with +/-[alphabetical sequence]s, return list of features. Args: s (str): string with segments of the sort "+son -syl 0cor" Return: list: list of (value, feature) tuples """ return [m.groups() for m in FT_REGEX.finditer(s)] def pat(p): """Given a string `p` with feature matrices (features grouped with square brackets into segments, return a list of sets of (value, feature) tuples. Args: p (str): list of feature matrices as strings Return: list: list of sets of (value, feature) tuples """ pattern = [] for matrix in [m.group(0) for m in MT_REGEX.finditer(p)]: segment = set([m.groups() for m in FT_REGEX.finditer(matrix)]) pattern.append(segment) return pattern def word2array(ft_names, word): """Converts `word` [[(value, feature),...],...] to a NumPy array Given a word consisting of lists of lists/sets of (value, feature) tuples, return a NumPy array where each row is a segment and each column is a feature. Args: ft_names (list): list of feature names (as strings) in order; this argument controls what features are included in the array that is output and their order vis-a-vis the columns of the array word (list): list of lists of feature tuples (output by FeatureTable.word_fts) Returns: ndarray: array in which each row is a segment and each column is a feature """ vdict = {'+': 1, '-': -1, '0': 0} def seg2col(seg): seg = dict([(k, v) for (v, k) in seg]) return [vdict[seg[ft]] for ft in ft_names] return numpy.array([seg2col(s) for s in word], order='F') class FeatureTable(object): """Encapsulate the segment <=> feature mapping in the file "data/ipa_all.csv". """ def __init__(self, feature_set='spe+'): """Construct a FeatureTable object Args: feature_set (str): the feature set that the FeatureTable will use; currently, there is only one of these ("spe+") """ filename = filenames[feature_set] self.segments, self.seg_dict, self.names = self._read_table(filename) self.seg_seq = {seg[0]: i for (i, seg) in enumerate(self.segments)} self.weights = self._read_weights() self.seg_regex = self._build_seg_regex() self.longest_seg = max([len(x) for x in self.seg_dict.keys()]) self.xsampa = xsampa.XSampa() @staticmethod def normalize(data): return unicodedata.normalize('NFD', data) def _read_table(self, filename): """Read the data from data/ipa_all.csv into self.segments, a list of 2-tuples of unicode strings and sets of feature tuples and self.seg_dict, a dictionary mapping from unicode segments and sets of feature tuples. """ filename = pkg_resources.resource_filename( __name__, filename) segments = [] with open(filename, 'rb') as f: reader = csv.reader(f, encoding='utf-8') header = next(reader) names = header[1:] for row in reader: seg = row[0] vals = row[1:] specs = set(zip(vals, names)) segments.append((seg, specs)) seg_dict = dict(segments) return segments, seg_dict, names def _read_weights(self, filename=os.path.join('data', 'feature_weights.csv')): filename = pkg_resources.resource_filename( __name__, filename) with open(filename, 'rb') as f: reader = csv.reader(f, encoding='utf-8') next(reader) weights = [float(x) for x in next(reader)] return weights def _build_seg_regex(self): # Build a regex that will match individual segments in a string. segs = sorted(self.seg_dict.keys(), key=lambda x: len(x), reverse=True) return re.compile(r'(?P<all>{})'.format('|'.join(segs))) def fts(self, segment): """Returns features corresponding to `segment` as list of (value, feature) tuples. Args: segment (unicode): segment for which features are to be returned as Unicode IPA string. Returns: set: set of (value, feature) tuples, if `segment` is valid; otherwise, None """ if segment in self.seg_dict: return self.seg_dict[segment] else: return None def match(self, ft_mask, ft_seg): """Answer question "are `ft_mask`'s features a subset of ft_seg?" Args: ft_mask (set): pattern defined as set of (value, feature) tuples ft_seg (set): segment defined as a set of (value, feature) tuples Returns: bool: True iff all features in `ft_mask` are also in `ft_seg` """ return set(ft_mask) <= set(ft_seg) def fts_match(self, features, segment): """Answer question "are `ft_mask`'s features a subset of ft_seg?" This is like `FeatureTable.match` except that it checks whether a segment is valid and returns None if it is not. Args: features (set): pattern defined as set of (value, feature) tuples segment (set): segment defined as a set of (value, feature) tuples Returns: bool: True iff all features in `ft_mask` are also in `ft_seg`; None if segment is not valid """ features = set(features) if self.seg_known(segment): return features <= self.fts(segment) else: return None def longest_one_seg_prefix(self, word, normalize=True): """Return longest Unicode IPA prefix of a word Args: word (unicode): input word as Unicode IPA string Returns: unicode: longest single-segment prefix of `word` in database """ if normalize: word = FeatureTable.normalize(word) for i in range(self.longest_seg, 0, -1): if word[:i] in self.seg_dict: return word[:i] return '' def validate_word(self, word): """Returns True if `word` consists exhaustively of valid IPA segments Args: word (unicode): input word as Unicode IPA string Returns: bool: True if `word` can be divided exhaustively into IPA segments that exist in the database """ while word: match = self.seg_regex.match(word) if match: word = word[len(match.group(0)):] else: # print('{}\t->\t{}\t'.format(orig, word).encode('utf-8'), file=sys.stderr) return False return True def segs(self, word): """Returns a list of segments from a word Args: word (unicode): input word as Unicode IPA string Returns: list: list of strings corresponding to segments found in `word` """ return [m.group('all') for m in self.seg_regex.finditer(word)] def word_fts(self, word): """Return featural analysis of `word` Args: word (unicode): one or more IPA segments Returns: list: list of lists (value, feature) tuples where each inner list corresponds to a segment in `word` """ return list(map(self.fts, self.segs(word))) def word_array(self, ft_names, word): """Return `word` as [-1, 0, 1] features in a NumPy array Args: ft_names (list): list of feature names in order word (unicode): word as an IPA string Returns: ndarray: segments in rows, features in columns as [-1, 0 , 1] """ return word2array(ft_names, self.word_fts(word)) def seg_known(self, segment): """Return True if `segment` is in segment <=> features database Args: segment (unicode): consonant or vowel Returns: bool: True, if `segment` is in the database """ return segment in self.seg_dict def segs_safe(self, word): """Return a list of segments (as strings) from a word Characters that are not valid segments are included in the list as individual characters. Args: word (unicode): word as an IPA string Returns: list: list of Unicode IPA strings corresponding to segments in `word` """ segs = [] while word: m = self.seg_regex.match(word) if m: segs.append(m.group(1)) word = word[len(m.group(1)):] else: segs.append(word[0]) word = word[1:] return segs def filter_segs(self, segs): """Given list of strings, return only those which are valid segments Args: segs (list): list of IPA Unicode strings Return: list: list of IPA Unicode strings identical to `segs` but with invalid segments filtered out """ return list(filter(self.seg_known, segs)) def filter_string(self, word): """Return a string like the input but containing only legal IPA segments Args: word (unicode): input string to be filtered Returns: unicode: string identical to `word` but with invalid IPA segments absent """ segs = [m.group(0) for m in self.seg_regex.finditer(word)] return ''.join(segs) def fts_intersection(self, segs): """Return the features shared by `segs` Args: segs (list): list of Unicode IPA segments Returns: set: set of (value, feature) tuples shared by the valid segments in `segs` """ fts_vecs = [self.fts(s) for s in self.filter_segs(segs)] return reduce(lambda a, b: a & b, fts_vecs) def fts_match_any(self, fts, inv): """Return `True` if any segment in `inv` matches the features in `fts` Args: fts (list): a collection of (value, feature) tuples inv (list): a collection of IPA segments represented as Unicode strings Returns: bool: `True` if any segment in `inv` matches the features in `fts` """ return any([self.fts_match(fts, s) for s in inv]) def fts_match_all(self, fts, inv): """Return `True` if all segments in `inv` matches the features in fts Args: fts (list): a collection of (value, feature) tuples inv (list): a collection of IPA segments represented as Unicode strings Returns: bool: `True` if all segments in `inv` matches the features in `fts` """ return all([self.fts_match(fts, s) for s in inv]) def fts_contrast2(self, fs, ft_name, inv): """Return `True` if there is a segment in `inv` that contrasts in feature `ft_name`. Args: fs (list): feature specifications used to filter `inv`. ft_name (str): name of the feature where contrast must be present. inv (list): collection of segments represented as Unicode segments. Returns: bool: `True` if two segments in `inv` are identical in features except for feature `ft_name` """ inv_fts = [self.fts(x) for x in inv if set(fs) <= self.fts(x)] for a in inv_fts: for b in inv_fts: if a != b: diff = a ^ b if len(diff) == 2: if all([nm == ft_name for (_, nm) in diff]): return True return False def fts_count(self, fts, inv): """Return the count of segments in an inventory matching a given feature mask. Args: fts (set): feature mask given as a set of (value, feature) tuples inv (set): inventory of segments (as Unicode IPA strings) Returns: int: number of segments in `inv` that match feature mask `fts` """ return len(list(filter(lambda s: self.fts_match(fts, s), inv))) def match_pattern(self, pat, word): """Implements fixed-width pattern matching. Matches just in case pattern is the same length (in segments) as the word and each of the segments in the pattern is a featural subset of the corresponding segment in the word. Matches return the corresponding list of feature sets; failed matches return None. Args: pat (list): pattern consisting of a sequence of sets of (value, feature) tuples word (unicode): a Unicode IPA string consisting of zero or more segments Returns: list: corresponding list of feature sets or, if there is no match, None """ segs = self.word_fts(word) if len(pat) != len(segs): return None else: if all([set(p) <= s for (p, s) in zip(pat, segs)]): return segs def match_pattern_seq(self, pat, const): """Implements limited pattern matching. Matches just in case pattern is the same length (in segments) as the constituent and each of the segments in the pattern is a featural subset of the corresponding segment in the word. Args: pat (list): pattern consisting of a list of sets of (value, feature) tuples. const (list): a sequence of Unicode IPA strings consisting of zero or more segments. Returns: bool: `True` if `const` matches `pat` """ segs = [self.fts(s) for s in const] if len(pat) != len(segs): return False else: return all([set(p) <= s for (p, s) in zip(pat, segs)]) def all_segs_matching_fts(self, fts): """Return segments matching a feature mask, both as (value, feature) tuples (sorted in reverse order by length). Args: fts (list): feature mask as (value, feature) tuples. Returns: list: segments matching `fts`, sorted in reverse order by length """ matching_segs = [] for seg, pairs in self.segments: if set(fts) <= set(pairs): matching_segs.append(seg) return sorted(matching_segs, key=lambda x: len(x), reverse=True) def compile_regex_from_str(self, ft_str): """Given a string describing features masks for a sequence of segments, return a regex matching the corresponding strings. Args: ft_str (str): feature masks, each enclosed in square brackets, in which the features are delimited by any standard delimiter. Returns: Pattern: regular expression pattern equivalent to `ft_str` """ sequence = [] for m in re.finditer(r'\[([^]]+)\]', ft_str): ft_mask = fts(m.group(1)) segs = self.all_segs_matching_fts(ft_mask) sub_pat = '({})'.format('|'.join(segs)) sequence.append(sub_pat) pattern = ''.join(sequence) regex = re.compile(pattern) return regex def segment_to_vector(self, seg): """Given a Unicode IPA segment, return a list of feature specificiations in cannonical order. Args: seg (unicode): IPA consonant or vowel Returns: list: feature specifications ('+'/'-'/'0') in the order from `FeatureTable.names` """ ft_dict = {ft: val for (val, ft) in self.fts(seg)} return [ft_dict[name] for name in self.names] def tensor_to_numeric(self, t): return list(map(lambda a: list(map(lambda b: {'+': 1, '-': -1, '0': 0}[b], a)), t)) def word_to_vector_list(self, word, numeric=False, xsampa=False): """Return a list of feature vectors, given a Unicode IPA word. Args: word (unicode): string in IPA numeric (bool): if True, return features as numeric values instead of strings Returns: list: a list of lists of '+'/'-'/'0' or 1/-1/0 """ if xsampa: word = self.xsampa.convert(word) tensor = list(map(self.segment_to_vector, self.segs(word))) if numeric: return self.tensor_to_numeric(tensor) else: return tensor
18,322
32.620183
91
py
panphon
panphon-master/panphon/featuretable.py
# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) import os.path import unicodedata import collections import numpy import pkg_resources import regex as re import unicodecsv as csv from . import xsampa from .segment import Segment from functools import reduce feature_sets = { 'spe+': (os.path.join('data', 'ipa_all.csv'), os.path.join('data', 'feature_weights.csv')) } class FeatureTable(object): TRIE_LEAF_MARKER = None def __init__(self, feature_set='spe+'): bases_fn, weights_fn = feature_sets[feature_set] self.weights = self._read_weights(weights_fn) self.segments, self.seg_dict, self.names = self._read_bases(bases_fn, self.weights) self.seg_regex = self._build_seg_regex() self.seg_trie = self._build_seg_trie() self.longest_seg = max([len(x) for x in self.seg_dict.keys()]) self.xsampa = xsampa.XSampa() @staticmethod def normalize(data): return unicodedata.normalize('NFD', data) def _read_bases(self, fn, weights): fn = pkg_resources.resource_filename(__name__, fn) segments = [] with open(fn, 'rb') as f: reader = csv.reader(f, encoding='utf-8') header = next(reader) names = header[1:] for row in reader: ipa = FeatureTable.normalize(row[0]) vals = [{'-': -1, '0': 0, '+': 1}[x] for x in row[1:]] vec = Segment(names, {n: v for (n, v) in zip(names, vals)}, weights=weights) segments.append((ipa, vec)) seg_dict = dict(segments) return segments, seg_dict, names def _read_weights(self, weights_fn): weights_fn = pkg_resources.resource_filename(__name__, weights_fn) with open(weights_fn, 'rb') as f: reader = csv.reader(f, encoding='utf-8') next(reader) weights = [float(x) for x in next(reader)] return weights def _build_seg_regex(self): segs = sorted(self.seg_dict.keys(), key=lambda x: len(x), reverse=True) return re.compile(r'(?P<all>{})'.format('|'.join(segs))) def _build_seg_trie(self): trie = {} for seg in self.seg_dict.keys(): node = trie for char in seg: if char not in node: node[char] = {} node = node[char] node[self.TRIE_LEAF_MARKER] = None return trie def fts(self, ipa, normalize=True): if normalize: ipa = FeatureTable.normalize(ipa) if ipa in self.seg_dict: return self.seg_dict[ipa] else: return None def longest_one_seg_prefix(self, word, normalize=True): """Return longest Unicode IPA prefix of a word Args: word (unicode): input word as Unicode IPA string normalize (bool): whether the word should be pre-normalized Returns: unicode: longest single-segment prefix of `word` in database """ if normalize: word = FeatureTable.normalize(word) last_found_length = 0 node = self.seg_trie for pos in range(len(word) + 1): if pos == len(word) or word[pos] not in node: return word[:last_found_length] node = node[word[pos]] if self.TRIE_LEAF_MARKER in node: last_found_length = pos + 1 def ipa_segs(self, word, normalize=True): """Returns a list of segments from a word Args: word (unicode): input word as Unicode IPA string normalize (bool): whether to pre-normalize the word Returns: list: list of strings corresponding to segments found in `word` """ if normalize: word = FeatureTable.normalize(word) return self._segs(word, include_invalid=False, normalize=normalize) def validate_word(self, word, normalize=True): """Returns True if `word` consists exhaustively of valid IPA segments Args: word (unicode): input word as Unicode IPA string normalize (bool): whether to pre-normalize the word Returns: bool: True if `word` can be divided exhaustively into IPA segments that exist in the database """ return not self._segs(word, include_valid=False, include_invalid=True, normalize=normalize) def word_fts(self, word, normalize=True): """Return a list of Segment objects corresponding to the segments in word. Args: word (unicode): word consisting of IPA segments normalize (bool): whether to pre-normalize the word Returns: list: list of Segment objects corresponding to word """ return [self.fts(ipa, False) for ipa in self.ipa_segs(word, normalize)] def word_array(self, ft_names, word, normalize=True): """Return a nparray of features namd in ft_name for the segments in word Args: ft_names (list): strings naming subset of features in self.names word (unicode): word to be analyzed normalize (bool): whether to pre-normalize the word Returns: ndarray: segments in rows, features in columns as [-1, 0, 1] """ return numpy.array([s.numeric(ft_names) for s in self.word_fts(word, normalize)]) def bag_of_features(self, word, normalize=True): """Return a vector in which each dimension is the number of times a feature-value pair occurs in the word Args: word (unicode): word consisting of IPA segments normalize (bool): whether to pre-normalize the word Returns: array: array of integers corresponding to a bag of feature-value pair counts """ word_features = self.word_fts(word, normalize) features = [v + f for f in self.names for v in ['+', '0', '-']] bag = collections.OrderedDict() for f in features: bag[f] = 0 vdict = {-1: '-', 0: '0', 1: '+'} for w in word_features: for (f, v) in w.items(): bag[vdict[v] + f] += 1 return numpy.array(list(bag.values())) def seg_known(self, segment, normalize=True): """Return True if `segment` is in segment <=> features database Args: segment (unicode): consonant or vowel normalize (bool): whether to pre-normalize the segment Returns: bool: True, if `segment` is in the database """ if normalize: segment = FeatureTable.normalize(segment) return segment in self.seg_dict def segs_safe(self, word, normalize=True): """Return a list of segments (as strings) from a word Characters that are not valid segments are included in the list as individual characters. Args: word (unicode): word as an IPA string normalize (bool): whether to pre-normalize the word Returns: list: list of Unicode IPA strings corresponding to segments in `word` """ if normalize: word = FeatureTable.normalize(word) return self._segs(word, include_invalid=True, normalize=normalize) def _segs(self, word, *, include_valid=True, include_invalid, normalize): if normalize: word = FeatureTable.normalize(word) segs = [] while word: m = self.longest_one_seg_prefix(word, False) if m: if include_valid: segs.append(m) word = word[len(m):] else: if include_invalid: segs.append(word[0]) word = word[1:] return segs def filter_segs(self, segs, normalize=True): """Given list of strings, return only those which are valid segments Args: segs (list): list of IPA Unicode strings normalize (bool): whether to pre-normalize the segments Return: list: list of IPA Unicode strings identical to `segs` but with invalid segments filtered out """ return list(filter(lambda seg: self.seg_known(seg, normalize), segs)) def filter_string(self, word, normalize=True): """Return a string like the input but containing only legal IPA segments Args: word (unicode): input string to be filtered normalize (bool): whether to pre-normalize the word (and return a normalized string) Returns: unicode: string identical to `word` but with invalid IPA segments absent """ return ''.join(self.ipa_segs(word, normalize)) def fts_intersection(self, segs, normalize=True): """Return a Segment object containing the features shared by all segments Args: segs (list): IPA segments normalize (bool): whether to pre-normalize the segments Returns: Segment: the features shared by all segments in segs """ return reduce(lambda a, b: a & b, [self.fts(s, normalize) for s in self.filter_segs(segs, normalize)]) def fts_match_all(self, fts, inv, normalize=True): """Return `True` if all segments in `inv` matches the features in fts Args: fts (dict): a dictionary of features inv (list): a collection of IPA segments represented as Unicode strings normalize (bool): whether to pre-normalize the segments Returns: bool: `True` if all segments in `inv` matches the features in `fts` """ return all([self.fts(s, normalize) >= fts for s in inv]) def fts_match_any(self, fts, inv, normalize=True): """Return `True` if any segments in `inv` matches the features in fts Args: fts (dict): a dictionary of features inv (list): a collection of IPA segments represented as Unicode strings normalize (bool): whether to pre-normalize the segments Returns: bool: `True` if any segments in `inv` matches the features in `fts` """ return any([self.fts(s, normalize) >= fts for s in inv]) def fts_contrast(self, fs, ft_name, inv, normalize=True): """Return `True` if there is a segment in `inv` that contrasts in feature `ft_name`. Args: fs (dict): feature specifications used to filter `inv`. ft_name (str): name of the feature where contrast must be present. inv (list): collection of segments represented as Unicode strings. normalize (bool): whether to pre-normalize the segments Returns: bool: `True` if two segments in `inv` are identical in features except for feature `ft_name` """ inv_segs = filter(lambda x: x >= fs, map(lambda seg: self.fts(seg, normalize), inv)) for a in inv_segs: for b in inv_segs: if a != b: if a.differing_specs(b) == [ft_name]: return True return False def fts_count(self, fts, inv, normalize=True): """Return the count of segments in an inventory matching a given feature mask. Args: fts (dict): feature mask given as a set of (value, feature) tuples inv (list): inventory of segments (as Unicode IPA strings) normalize (bool): whether to pre-normalize the segments Returns: int: number of segments in `inv` that match feature mask `fts` """ return len(list(filter(lambda s: self.fts(s, normalize) >= fts, inv))) def match_pattern(self, pat, word, normalize=True): """Implements fixed-width pattern matching. Matches just in case pattern is the same length (in segments) as the word and each of the segments in the pattern is a featural subset of the corresponding segment in the word. Matches return the corresponding list of feature sets; failed matches return None. Args: pat (list): pattern consisting of a sequence of feature dicts word (unicode): a Unicode IPA string consisting of zero or more segments normalize (bool): whether to pre-normalize the word Returns: list: corresponding list of feature dicts or, if there is no match, None """ segs = self.word_fts(word, normalize) if len(pat) != len(segs): return None else: if all([s >= p for (s, p) in zip(segs, pat)]): return segs def match_pattern_seq(self, pat, const, normalize=True): """Implements limited pattern matching. Matches just in case pattern is the same length (in segments) as the constituent and each of the segments in the pattern is a featural subset of the corresponding segment in the word. Args: pat (list): pattern consisting of a list of feature dicts, e.g. [{'voi': 1}] const (list): a sequence of Unicode IPA strings consisting of zero or more segments. normalize (bool): whether to pre-normalize the segments Returns: bool: `True` if `const` matches `pat` """ segs = [self.fts(s, normalize) for s in const] if len(pat) != len(segs): return False else: return all([s >= p for (s, p) in zip(segs, pat)]) def all_segs_matching_fts(self, ft_mask): """Return segments matching a feature mask, a dict of features Args: ft_mask (list): feature mask dict, e.g. {'voi': -1, 'cont': 1}. Returns: list: segments matching `ft_mask`, sorted in reverse order by length """ matching_segs = [ipa for (ipa, fts) in self.segments if fts >= ft_mask] return sorted(matching_segs, key=lambda x: len(x), reverse=True) def compile_regex_from_str(self, pat): """Given a string describing features masks for a sequence of segments, return a compiled regex matching the corresponding strings. Args: pat (str): feature masks, each enclosed in square brackets, in which the features are delimited by any standard delimiter. Returns: Pattern: regular expression pattern equivalent to `pat` """ s2n = {'-': -1, '0': 0, '+': 1} seg_res = [] for mat in re.findall(r'\[[^]]+\]+', pat): ft_mask = {k: s2n[v] for (v, k) in re.findall(r'([+-])(\w+)', mat)} segs = self.all_segs_matching_fts(ft_mask) seg_res.append('({})'.format('|'.join(segs))) regexp = ''.join(seg_res) return re.compile(regexp) def segment_to_vector(self, seg, normalize=True): """Given a Unicode IPA segment, return a list of feature specificiations in canonical order. Args: seg (unicode): IPA consonant or vowel normalize: whether to pre-normalize the segment Returns: list: feature specifications ('+'/'-'/'0') in the order from `FeatureTable.names` """ return self.fts(seg, normalize).strings() def word_to_vector_list(self, word, numeric=False, xsampa=False, normalize=True): """Return a list of feature vectors, given a Unicode IPA word. Args: word (unicode): string in IPA (or X-SAMPA, provided `xsampa` is True) numeric (bool): if True, return features as numeric values instead of strings xsampa (bool): whether the word is in X-SAMPA instead of IPA normalize: whether to pre-normalize the word (applies to IPA only) Returns: list: a list of lists of '+'/'-'/'0' or 1/-1/0 """ if xsampa: word = self.xsampa.convert(word) segs = self.word_fts(word, normalize or xsampa) if numeric: tensor = [x.numeric() for x in segs] else: tensor = [x.strings() for x in segs] return tensor
16,647
35.831858
113
py
panphon
panphon-master/panphon/errors.py
# -*- coding: utf-8 -*- class SegmentError(Exception): pass
66
10.166667
30
py
panphon
panphon-master/panphon/segment.py
# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) import regex as re class Segment(object): """Models a phonological segment as a vector of features.""" def __init__(self, names, features={}, ftstr='', weights=None): """Construct a `Segment` object Args: names (list): ordered list of feature names features (dict): name-value pairs for specified features ftstr (unicode): a string, each /(+|0|-)\w+/ sequence of which is interpreted as a feature specification weights (float): order list of feature weights/saliences """ self.n2s = {-1: '-', 0: '0', 1: '+'} self.s2n = {k: v for (v, k) in self.n2s.items()} self.names = names """Set a feature specification""" self.data = {} for name in names: if name in features: self.data[name] = features[name] else: self.data[name] = 0 for m in re.finditer(r'(\+|0|-)(\w+)', ftstr): v, k = m.groups() self.data[k] = self.s2n[v] if weights: self.weights = weights else: self.weights = [1 for _ in names] def __getitem__(self, key): """Get a feature specification""" return self.data[key] def __setitem__(self, key, value): """Set a feature specification""" if key in self.names: self.data[key] = value else: raise KeyError('Unknown feature name.') def __repr__(self): """Return a string representation of a feature vector""" pairs = [(self.n2s[self.data[k]], k) for k in self.names] fts = ', '.join(['{}{}'.format(*pair) for pair in pairs]) return '<Segment [{}]>'.format(fts) def __iter__(self): """Return an iterator over the feature names""" return iter(self.names) def items(self): """Return a list of the features as (name, value) pairs""" return [(k, self.data[k]) for k in self.names] def iteritems(self): """Return an iterator over the features as (name, value) pairs""" return ((k, self.data[k]) for k in self.names) def update(self, features): """Update the objects features to match `features`. Args: features (dict): dictionary containing the new feature values """ self.data.update(features) def match(self, ft_mask): """Determine whether `self`'s features are a superset of `features`'s Args: features (dict): (name, value) pairs Returns: (bool): True if superset relationship holds else False """ return all([self.data[k] == v for (k, v) in ft_mask.items()]) def __ge__(self, other): """Determine whether `self`'s features are a superset of `other`'s""" return self.match(other) def intersection(self, other): """Return dict of features shared by `self` and `other` Args: other (Segment): object with feature specifications Returns: Segment: (name, value) pairs for each shared feature """ data = dict(set(self.items()) & set(other.items())) names = list(filter(lambda a: a in data, self.names)) return Segment(names, data) def __and__(self, other): """Return dict of features shared by `self` and `other`""" return self.intersection(other) def numeric(self, names=None): if not names: names = self.names """Return feature values as a list of integers""" return [self.data[k] for k in names] def strings(self, names=None): """Return feature values as a list of strings""" if not names: names = self.names return list(map(lambda x: self.n2s[x], self.numeric())) def distance(self, other): """Compute a distance between `self` and `other` Args: other (Segment): object to compare with `self` Returns: int: the sum of the absolute value of the difference between each of the feature values in `self` and `other`. """ return sum(abs(a - b) for (a, b) in zip(self.numeric(), other.numeric())) def norm_distance(self, other): """Compute a distance, normalized by vector length Args: other (Segment): object to compare with `self` Returns: float: the sum of the absolute value of the difference between each of the feature values in `self` and `other`, divided by the number of features per vector. """ return self.distance(other) / len(self.names) def __sub__(self, other): """Distance between segments, normalized by vector length""" return self.norm_distance(other) def hamming_distance(self, other): """Compute Hamming distance between feature vectors Args: other (Segment): object to compare with `self` Returns: int: the unnormalized Hamming distance between the two vectors. """ return sum(int(a != b) for (a, b) in zip(self.numeric(), other.numeric())) def norm_hamming_distance(self, other): """Compute Hamming distance, normalized by vector length Args: other (Segment): object to compare with `self` Returns: int: the normalized Hamming distance between the two vectors. """ return self.hamming_distance(other) / len(self.names) def weighted_distance(self, other): """Compute weighted distance Args: other (Segment): object to compare with `self` Returns: float: the weighted distance between the two vectors """ return sum([abs(a - b) * c for (a, b, c) in zip(self.numeric(), other.numeric(), self.weights)]) def norm_weighted_distance(self, other): """Compute weighted distance, normalized by vector length Args: other (Segment): object to compare with `self` Returns: float: the weighted distance between the two vectors, normalized by vector length. """ return self.weighted_distance(other) / sum(self.weights) def specified(self): """Return dictionary of features that are specified '+' or '-' (1 or -1) Returns: dict: each feature in `self` for which the value is not 0 """ return {k: v for (k, v) in self.data.items() if v != 0} def differing_specs(self, other): """Return a list of feature names that differ in their specified values Args: other (Segment): object to compare with `self` Returns: list: the names of the features that differ in the two vectors """ return [k for (k, v) in self.items() if other[k] != v]
7,126
32.460094
82
py
panphon
panphon-master/panphon/sonority.py
from __future__ import print_function, absolute_import, unicode_literals from . import _panphon from . import permissive from ._panphon import FeatureTable, fts class BoolTree(object): """Simple decision tree specialized for sonority classes""" def __init__(self, test=None, t_node=None, f_node=None): """Construct a BoolTree object Args: test (bool): test for whether to traverse the true-node or the false-node (`BoolTree.t_node` or `BoolTree.f_node`) t_node (BoolTree/Int): node to follow if test is `True` f_node (BoolTree/Int): node to follow if test is `False` """ self.test = test self.t_node = t_node self.f_node = f_node def get_value(self): if self.test: if isinstance(self.t_node, BoolTree): return self.t_node.get_value() else: return self.t_node else: if isinstance(self.f_node, BoolTree): return self.f_node.get_value() else: return self.f_node class Sonority(object): """Determine the sonority of a segment""" def __init__(self, feature_set='spe+', feature_model='strict'): """Construct a Sonority object Args: feature_set (str): features set to be used by `FeatureTable` feature_model (str): 'strict' or 'permissive' feature model """ fm = {'strict': _panphon.FeatureTable, 'permissive': permissive.PermissiveFeatureTable} self.fm = fm[feature_model](feature_set=feature_set) def sonority_from_fts(self, seg): """Given a segment as features, returns the sonority on a scale of 1 to 9. Args: seg (list): collection of (value, feature) pairs representing a segment (vowel or consonant) Returns: int: sonority of `seg` between 1 and 9 """ def match(m): return self.fm.match(fts(m), seg) minusHi = BoolTree(match('-hi'), 9, 8) minusNas = BoolTree(match('-nas'), 6, 5) plusVoi1 = BoolTree(match('+voi'), 4, 3) plusVoi2 = BoolTree(match('+voi'), 2, 1) plusCont = BoolTree(match('+cont'), plusVoi1, plusVoi2) plusSon = BoolTree(match('+son'), minusNas, plusCont) minusCons = BoolTree(match('-cons'), 7, plusSon) plusSyl = BoolTree(match('+syl'), minusHi, minusCons) return plusSyl.get_value() def sonority(self, seg): """Given a segment as a Unicode IPA string, returns the sonority on a scale of 1 to 9. Args: seg (unicode): IPA consonant or vowel Returns: int: sonority of `seg` between 1 and 9 """ return self.sonority_from_fts(self.fm.fts(seg))
2,874
32.430233
76
py
panphon
panphon-master/panphon/permissive.py
from __future__ import absolute_import, print_function, unicode_literals import codecs import copy import os.path import pkg_resources import yaml import regex as re import unicodecsv as csv from . import _panphon, xsampa def flip(s): return [(b, a) for (a, b) in s] def update_ft_set(seg, dia): seg = dict(flip(seg)) seg.update(dia) return flip(set(seg.items())) class PermissiveFeatureTable(_panphon.FeatureTable): """Encapsulate the segment <=> feature vector mapping implied by the files data/ipa_all.csv and diacritic_definitions.yml. Uses a more permissive algorithm for identifying base+diacritic combinations. To avoid a combinatorial explosion, it never generates all of the base-diacritic- modifier combinations, meaning it cannot easily make statements about the whole set of segments.""" def __init__(self, feature_set='spe+', feature_model='strict', ipa_bases=os.path.join('data', 'ipa_bases.csv'), dias=os.path.join('data', 'diacritic_definitions.yml'), ): """Construct a PermissiveFeatureTable object Args: feature_set (str): feature system (for API compatibility) feature_model (str): feature parsing model (for API compatibility) ipa_bases (str): path from panphon root to CSV file definining features of bases (unmodified consonants and vowels) dias (str): path from panphon root to YAML file containing rules for diacritics and modifiers """ dias = pkg_resources.resource_filename(__name__, dias) self.bases, self.names = self._read_ipa_bases(ipa_bases) self.prefix_dias, self.postfix_dias = self._read_dias(dias) self.pre_regex, self.post_regex, self.seg_regex = self._compile_seg_regexes(self.bases, self.prefix_dias, self.postfix_dias) self.xsampa = xsampa.XSampa() self.weights = self._read_weights() def _read_ipa_bases(self, fn): fn = pkg_resources.resource_filename(__name__, fn) with open(fn, 'rb') as f: reader = csv.reader(f, encoding='utf-8', delimiter=str(',')) names = next(reader)[1:] bases = {} for row in reader: seg, vals = row[0], row[1:] bases[seg] = (set(zip(vals, names))) return bases, names def _read_dias(self, fn): prefix, postfix = {}, {} with codecs.open(fn, 'r', 'utf-8') as f: defs = yaml.load(f.read(), Loader=yaml.FullLoader) for dia in defs['diacritics']: if dia['position'] == 'pre': prefix[dia['marker']] = dia['content'] else: postfix[dia['marker']] = dia['content'] return prefix, postfix def _compile_seg_regexes(self, bases, prefix, postfix): pre_jnd = '|'.join(prefix.keys()) post_jnd = '|'.join(postfix.keys()) bases_jnd = '|'.join(bases.keys()) pre_re = '({})'.format(pre_jnd) post_re = '({})'.format(post_jnd) seg_re = '(?P<all>(?P<pre>({})*)(?P<base>{})(?P<post>({})*))'.format(pre_jnd, bases_jnd, post_jnd) return re.compile(pre_re), re.compile(post_re), re.compile(seg_re) def _build_seg_regex(self): return self.seg_regex def _read_weights(self, filename=os.path.join('data', 'feature_weights.csv')): filename = pkg_resources.resource_filename( __name__, filename) with open(filename, 'rb') as f: reader = csv.reader(f, encoding='utf-8') next(reader) weights = [float(x) for x in next(reader)] return weights def fts(self, segment): """Return features corresponding to segment as list of (value, feature) tuples Args: segment (unicode): segment for which features are to be returned as Unicode string Returns: list: None if `segment` cannot be parsed; otherwise, a list of the features of `segment` as (value, feature) pairs """ match = self.seg_regex.match(segment) if match: pre, base, post = match.group('pre'), match.group('base'), match.group('post') seg = copy.deepcopy(self.bases[base]) for m in reversed(pre): seg = update_ft_set(seg, self.prefix_dias[m]) for m in post: seg = update_ft_set(seg, self.postfix_dias[m]) return set(seg) else: return None def fts_match(self, fts_mask, segment): """Evaluates whether a set of features 'match' a segment (are a subset of that segment's features) Args: fts_mask (list): list of (value, feature) tuples segment (unicode): IPA string corresponding to segment (consonant or vowel) Returns: bool: None if `segment` cannot be parsed; True if the feature values of `fts_mask` are a subset of those for `segment` """ fts_seg = self.fts(segment) if fts_seg: fts_mask = set(fts_mask) return fts_mask <= fts_seg else: return None def longest_one_seg_prefix(self, word): """Return longest IPA Unicode prefix of `word` Args: word (unicode): word as IPA string Returns: unicode: longest single-segment prefix of `word` """ match = self.seg_regex.match(word) if match: return match.group(0) else: return '' def seg_known(self, segment): """Return True if the segment is valid Args: segment (unicode): a string which may or may not be a valid segment Returns: bool: True if segment can be parsed given the database of bases and diacritics """ if self.seg_regex.match(segment): return True else: return False def filter_segs(self, segs): """Given list of strings, return only those which are valid segments. Args: segs (list): list of unicode values Returns: list: values in `segs` that are valid segments (according to the definititions of bases and diacritics/modifiers known to the object """ def whole_seg(seg): m = self.seg_regex.match(seg) if m and m.group(0) == seg: return True else: return False return list(filter(whole_seg, segs)) def segment_word_segments(self, word): def n2s(s): if s is None: return '' else: return s return ((n2s(m.group('pre')), n2s(m.group('base')), n2s(m.group('post'))) for m in self.seg_regex.finditer(word)) @property def all_segs_matching_fts(self): raise AttributeError("'PermissiveFeatureTable' object has no attribute 'all_segs_matching_fts'")
7,279
34.512195
132
py
panphon
panphon-master/panphon/__init__.py
from __future__ import absolute_import from panphon.featuretable import FeatureTable from panphon._panphon import pat
118
28.75
45
py
panphon
panphon-master/panphon/xsampa.py
from __future__ import absolute_import, print_function, unicode_literals import regex as re import unicodecsv as csv import os.path import pkg_resources class XSampa(object): def __init__(self, delimiter=' '): self.delimiter = delimiter self.xs_regex, self.xs2ipa = self.read_xsampa_table() def read_xsampa_table(self): filename = os.path.join('data', 'ipa-xsampa.csv') filename = pkg_resources.resource_filename(__name__, filename) with open(filename, 'rb') as f: xs2ipa = {x[1]: x[0] for x in csv.reader(f, encoding='utf-8')} xs = sorted(xs2ipa.keys(), key=len, reverse=True) xs_regex = re.compile('|'.join(list(map(re.escape, xs)))) return xs_regex, xs2ipa def convert(self, xsampa): def seg2ipa(seg): ipa = [] while seg: match = self.xs_regex.match(seg) if match: ipa.append(self.xs2ipa[match.group(0)]) seg = seg[len(match.group(0)):] else: seg = seg[1:] return ''.join(ipa) ipasegs = list(map(seg2ipa, xsampa.split(self.delimiter))) return ''.join(ipasegs)
1,224
33.027778
74
py
panphon
panphon-master/panphon/distance.py
from __future__ import (absolute_import, division, print_function, unicode_literals) import os.path from functools import partial import editdistance import numpy as np import regex as re import pkg_resources import yaml from . import _panphon, permissive, featuretable, xsampa def zerodiviszero(f): def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except ZeroDivisionError: return 0 return wrapper def xsampaopt(f): def wrapper(*args, **kwargs): if 'xsampa' in kwargs and kwargs['xsampa']: self, source, target = args source = self.xs.convert(source) target = self.xs.convert(target) args = (self, source, target) return f(*args, **kwargs) return wrapper def ftstr2dict(ftstr): fts = {} for m in re.finditer(r'([-0+])(\w+)', ftstr): v, k = m.groups() fts[k] = {'-': -1, '0': 0, '+': 1}[v] return fts class Distance(object): """Measures of phonological distance.""" def __init__(self, feature_set='spe+', feature_model='segment'): """Construct a `Distance` object Args: feature_set (str): feature set to be used by the `Distance` object feature_model (str): feature parsing model to be used by the `Distance` object """ fm = {'strict': _panphon.FeatureTable, 'permissive': permissive.PermissiveFeatureTable, 'segment': featuretable.FeatureTable} self.fm = fm[feature_model](feature_set=feature_set) self.xs = xsampa.XSampa() self.dolgo_prime = self._dolgopolsky_prime() def _dolgopolsky_prime(self, filename=os.path.join('data', 'dolgopolsky_prime.yml')): """Reads dolgopolsky classes and constructs function cascade Args: filename (str): path to YAML file (from panphon root) containing dolgopolsky classes """ filename = pkg_resources.resource_filename( __name__, filename) with open(filename, 'r') as f: rules = [] dolgo_prime = yaml.load(f.read(), Loader=yaml.FullLoader) for rule in dolgo_prime: rules.append((ftstr2dict(rule['def']), rule['label'])) return rules def map_to_dolgo_prime(self, s): """Map a string to dolgopolsky' classes Args: s (unicode): IPA word Returns: (unicode): word with all segments collapsed to D' classes """ segs = [] for seg in self.fm.seg_regex.finditer(s): fts = self.fm.fts(seg.group(0)) for mask, label in self.dolgo_prime: if fts >= mask: segs.append(label) break return ''.join(segs) def levenshtein_distance(self, source, target): """Slow implementation of Levenshtein distance using NumPy arrays Args: source (unicode): source word target (unicode): target word Returns: int: minimum number of Levenshtein edits required to get from `source` to `target` """ if len(source) < len(target): return self.levenshtein_distance(target, source) # So now we have len(source) >= len(target). if len(target) == 0: return len(source) # We call tuple() to force strings to be used as sequences # ('c', 'a', 't', 's') - numpy uses them as values by default. source = np.array(tuple(source)) target = np.array(tuple(target)) # We use a dynamic programming algorithm, but with the # added optimization that we only need the last two rows # of the matrix. previous_row = np.arange(target.size + 1) for s in source: # Insertion (target grows longer than source): current_row = previous_row + 1 # Substitution or matching: # Target and source items are aligned, and either # are different (cost of 1), or are the same (cost of 0). current_row[1:] = np.minimum(current_row[1:], np.add(previous_row[:-1], target != s)) # Deletion (target grows shorter than source): current_row[1:] = np.minimum(current_row[1:], current_row[0:-1] + 1) previous_row = current_row return previous_row[-1] def fast_levenshtein_distance(self, source, target): """Wrapper for the distance function in the Levenshtein module Args: source (unicode): source word target (unicode): target word Returns: int: minimum number of Levenshtein edits required to get from `source` to `target` """ return int(editdistance.eval(source, target)) def fast_levenshtein_distance_div_maxlen(self, source, target): """Levenshtein distance divided by maxlen Args: source (unicode): source word target (unicode): target word Returns: int: minimum number of Levenshtein edits required to get from `source` to `target` divided by the length of the longest of these arguments """ maxlen = max(len(source), len(target)) return int(editdistance.eval(source, target)) / maxlen def dolgo_prime_distance(self, source, target): """Levenshtein distance using D' phonetic equivalence classes `source` and `target` are converted to dolgopolsky' equivalence classes (each segment is mapped to the appropriate class) and then the Levenshtein distance between the resulting representations is computed. Args: source (unicode): source word target (unicode): target word Returns: int: minimum number of Levenshtein edits required to get from dolgopolsky' versions of `source` to `target` """ source = self.map_to_dolgo_prime(source) target = self.map_to_dolgo_prime(target) return self.fast_levenshtein_distance(source, target) @zerodiviszero @xsampaopt def dolgo_prime_distance_div_maxlen(self, source, target, xsampa=False): """Levenshtein distance using D' classes, normalized by max length `source` and `target` are converted to dolgopolsky' equivalence classes (each segment is mapped to the appropriate class) and then the Levenshtein distance between the resulting representations is computed. The result is divided by the length of the longest argument (`source` or `target`) after mapping to D' classes. Args: source (unicode): source word target (unicode): target word Returns: int: minimum number of Levenshtein edits required to get from dolgopolsky' versions of `source` to `target` """ source = self.map_to_dolgo_prime(source) target = self.map_to_dolgo_prime(target) maxlen = max(len(source), len(target)) return self.fast_levenshtein_distance(source, target) / maxlen def min_edit_distance(self, del_cost, ins_cost, sub_cost, start, source, target): """Return minimum edit distance, parameterized, slow Args: del_cost (function): cost function for deletion ins_cost (function): cost function for insertion sub_cost (function): cost function for substitution start (sequence): start symbol: string for strings, list for lists, list of list for list of lists source (sequence): source string/sequence of feature vectors target (sequence): target string/sequence of feature vectors Returns: Number: minimum edit distance from source to target, with edit costs as defined """ # Get lengths of source and target n, m = len(source), len(target) source, target = start + source, start + target # Create "matrix" d = [] for i in range(n + 1): d.append((m + 1) * [None]) # Initialize "matrix" d[0][0] = 0 for i in range(1, n + 1): d[i][0] = d[i - 1][0] + del_cost(source[i]) for j in range(1, m + 1): d[0][j] = d[0][j - 1] + ins_cost(target[j]) # Recurrence relation for i in range(1, n + 1): for j in range(1, m + 1): d[i][j] = min([ d[i - 1][j] + del_cost(source[i]), d[i - 1][j - 1] + sub_cost(source[i], target[j]), d[i][j - 1] + ins_cost(target[j]), ]) return d[n][m] def feature_difference(self, ft1, ft2): """Given two feature values, return the difference divided by 2 *deprecated* Args: ft1 (int): feature value in {1, 0, -1} ft2 (int): feature value in {1, 0, -1} Returns: float: half the absolute value of the difference between ft1 and ft2 """ return abs(ft1 - ft2) / 2 def unweighted_deletion_cost(self, v1, gl_wt=1.0): """Return cost of deleting segment corresponding to feature vector Features are not weighted; features specified as '0' add 0.5 to the raw deletion cost; other features add 1 to the raw deletion cost; the cost is normalized by the number of features Args: v1 (list): vector of feature values global_weight (Number): global weighting factor Returns: float: sum of feature costs divided by the number of features and multiplied by a global weighting factor """ assert isinstance(v1, list) return sum(map(lambda x: 0.5 if x == 0 else 1, v1)) / len(v1) * gl_wt def unweighted_substitution_cost(self, v1, v2): """Given two feature vectors, return the difference Args: v1 (list): vector of feature values v2 (list): vector of feature values Returns: float: sum of the differences between the features in `v1` and `v2`, divided by the number of features """ return sum([abs(ft1 - ft2) / 2 for (ft1, ft2) in zip(v1, v2)]) / len(v1) def unweighted_insertion_cost(self, v1, gl_wt=1.0): """Return cost of inserting segment corresponding to feature vector Features are not weighted; features with the value '0' add 0.5 to the raw cost; other features add 1.0 to the raw cost; the raw cost is then normalized by the number of features Args: v1 (list): vector of feature values global_weight (Number): global weighting factor Returns: float: sum of the costs of inserting each of the features in `v1` divided by the number of features in the vector and multiplied by a global weighting factor """ return sum(map(lambda x: 0.5 if x == 0 else 1, v1)) / len(v1) * gl_wt @xsampaopt def feature_edit_distance(self, source, target, xsampa=False): """String edit distance with equally-weighed features. All articulatory features are given equal weight. The distance between an unspecified value and a specified value is smaller than the distance between two features with oppoiste values. Args: source (unicode): source string target (unicode): target string Returns: float: feature edit distance with equally-weighed features an insdel costs set so insdel operations cost as much, roughly, as substituting a whole segment """ return self.min_edit_distance(self.unweighted_deletion_cost, self.unweighted_insertion_cost, self.unweighted_substitution_cost, [[]], self.fm.word_to_vector_list(source, numeric=True), self.fm.word_to_vector_list(target, numeric=True)) @xsampaopt def jt_feature_edit_distance(self, source, target, xsampa=False): """String edit distance with equally-weighed features. All articulatory features are given equal weight. The distance between an unspecified value and a specified value is smaller than the distance between two features with oppoiste values. Insdel costs are cheap. Args: source (unicode): source string target (unicode): target string xsampa (bool): source and target are X-SAMPA Returns: float: feature edit distance with equally-weighed features and insdel costs set so insdel operations cost 1/4 as much, roughly, as substituting a whole segment """ return self.min_edit_distance(partial(self.unweighted_deletion_cost, gl_wt=0.25), partial(self.unweighted_insertion_cost, gl_wt=0.25), self.unweighted_substitution_cost, [[]], self.fm.word_to_vector_list(source, numeric=True), self.fm.word_to_vector_list(target, numeric=True)) @zerodiviszero @xsampaopt def feature_edit_distance_div_maxlen(self, source, target, xsampa=False): """Like `Distance.feature_edit_distance` but normalized by maxlen Args: source (unicode): source string target (unicode): target string xsampa (bool): source and target are X-SAMPA Returns: float: feature edit distance with equally-weighed features and insdel costs set so insdel operations cost as much, roughly, as substituting a whole segment Raw result is divided by the length of the longest argument """ source_len, target_len = len(self.fm.word_to_vector_list(source)), len(self.fm.word_to_vector_list(target)) maxlen = max(source_len, target_len) return self.feature_edit_distance(source, target) / maxlen @zerodiviszero @xsampaopt def jt_feature_edit_distance_div_maxlen(self, source, target, xsampa=False): """Like `Distance.feature_edit_distance` but normalized by maxlen Args: source (unicode): source string target (unicode): target string xsampa (bool): source and target are X-SAMPA Returns: float: feature edit distance with equally-weighed features and insdel costs set so insdel operations cost 1/4 as much, roughly, as substituting a whole segment Raw result is divided by the length of the longest argument """ source_len, target_len = len(self.fm.word_to_vector_list(source)), len(self.fm.word_to_vector_list(target)) maxlen = max(source_len, target_len) return self.jt_feature_edit_distance(source, target) / maxlen def phoneme_error_rate(self, hyp, ref): """Phoneme error rate over lists of hypothesized and reference strings. Calculates edit distance in terms of phonemes, instead of Unicode characters Normalizes by the total number of phones in the reference Args: hyp (list[unicode]): hypothesized strings ref (list[unicode]): reference strings Returns: float: phoneme error rate (PER) """ if hyp and ref: errors = [] for (h, r) in zip(hyp, ref): phoneme_edits = self.min_edit_distance( lambda v: 1, lambda v: 1, lambda x,y: 0 if x == y else 1, [[]], self.fm.ipa_segs(h), self.fm.ipa_segs(r) ) errors.append(phoneme_edits) total_phones = sum([len(self.fm.ipa_segs(r)) for r in ref]) return sum(errors) / total_phones else: return 0.0 def feature_error_rate(self, hyp, ref, xsampa=False): """Feature error rate over lists of hypothesized and reference strings. Args: hyp (list[unicode]): hypothesized strings ref (list[unicode]): reference strings Returns: float: feature error rate (FER) """ if hyp and ref: errors = sum([self.feature_edit_distance(h, r) for (h, r) in zip(hyp, ref)]) ft = featuretable.FeatureTable() total_phones = sum([len(ft.ipa_segs(r)) for r in ref]) return errors / total_phones else: return 0.0 def hamming_substitution_cost(self, v1, v2): """Substitution cost for feature vectors computed as Hamming distance. Substitution cost for feature vectors computed as Hamming distance and normalized by dividing this result by the length of the vectors. Args: v1 (list): feature vector v2 (list): feature vector Returns: float: Hamming distance between `v1` and `v2` divided by the length of `v1` and `v2` """ diffs = [ft1 != ft2 for (ft1, ft2) in zip(v1, v2)] return sum(diffs) / len(diffs) # Booleans are cohersed to integers. @xsampaopt def hamming_feature_edit_distance(self, source, target, xsampa=False): """String edit distance with equally-weighed features. All articulatory features are given equal weight. The distance between an unspecified value and a specified value is smaller than the distance between two features with oppoiste values. The insertion and deletion cost is always one, somewhat favoring substitution. This function has no normalization but should obey the triangle inequality and thus provide a true distance metric. Args: source (unicode): source string target (unicode): target string xsampa (bool): source and target are X-SAMPA Returns: float: Hamming feature edit distance between `source` and `target` with high insdel costs """ return self.min_edit_distance(lambda v: 1, lambda v: 1, self.hamming_substitution_cost, [[]], self.fm.word_to_vector_list(source, numeric=True), self.fm.word_to_vector_list(target, numeric=True)) @xsampaopt def jt_hamming_feature_edit_distance(self, source, target, xsampa=False): """String edit distance with equally-weighed features. All articulatory features are given equal weight. The distance between an unspecified value and a specified value is smaller than the distance between two features with oppoiste values. The insertion and deletion cost is always one, somewhat favoring substitution. This function has no normalization but should obey the triangle inequality and thus provide a true distance metric. Args: source (unicode): source string target (unicode): target string xsampa (bool): source and target are X-SAMPA Returns: float: Hamming feature edit distance between `source` and `target` with low insdel costs (1/4 cost of total substitution) """ return self.min_edit_distance(lambda v: 0.25, lambda v: 0.25, self.hamming_substitution_cost, [[]], self.fm.word_to_vector_list(source, numeric=True), self.fm.word_to_vector_list(target, numeric=True)) @zerodiviszero @xsampaopt def hamming_feature_edit_distance_div_maxlen(self, source, target, xsampa=False): """Hamming feature edit distance divded by maxlen The same as `Distance.hamming_feature_edit_distance` except that the resulting value is divided by the length of the longest argument. It therefore does not obey the triangle inequality and is not a proper metric. Args: source (unicode): source string target (unicode): target string xsampa (bool): source and target are X-SAMPA Returns: float: Hamming feature edit distance between `source` and `target` with high insdel costs, normalized by length of longest argument """ source = self.fm.word_to_vector_list(source, numeric=True) target = self.fm.word_to_vector_list(target, numeric=True) maxlen = max(len(source), len(target)) raw = self.min_edit_distance(lambda v: 1, lambda v: 1, self.hamming_substitution_cost, [[]], source, target) return raw / maxlen @xsampaopt def jt_hamming_feature_edit_distance_div_maxlen(self, source, target, xsampa=False): """Hamming feature edit distance divded by maxlen The same as `Distance.hamming_feature_edit_distance` except that the resulting value is divided by the length of the longest argument. It therefore does not obey the triangle inequality and is not a proper metric. Args: source (unicode): source string target (unicode): target string xsampa (bool): source and target are X-SAMPA Returns: float: Hamming feature edit distance between `source` and `target` with low insdel costs, normalized by length of longest argument """ source = self.fm.word_to_vector_list(source, numeric=True) target = self.fm.word_to_vector_list(target, numeric=True) maxlen = max(len(source), len(target)) raw = self.min_edit_distance(lambda v: 0.25, lambda v: 0.25, self.hamming_substitution_cost, [[]], source, target) return raw / maxlen def weighted_feature_difference(self, w, ft1, ft2): """Return the weighted difference between two features *deprecated* Args: w (Number): weight ft1 (str): feature value ft2 (str): feature value Returns: float: difference between two features multiplied by weight; raw differences are: '+' - '-' = 1.0 '-' - '+' = 1.0 '+' - '0' = 0.5 '-' - '0' = 0.5 '0' - '+' = 0.5 '0' - '-' = 0.5 Raw differences are multipled by weight `w` """ return self.feature_difference(ft1, ft2) * w def weighted_substitution_cost(self, v1, v2, gl_wt=1.0): """Given two feature vectors, return the difference Args: v1 (list): feature vector v2 (list): feature vector Returns: float: sum of weighted feature difference for each feature pair in zip(v1, v2) """ return sum([abs(ft1 - ft2) * w for (w, ft1, ft2) in zip(self.fm.weights, v1, v2)]) * gl_wt def weighted_insertion_cost(self, v1, gl_wt=1.0): """Return cost of inserting segment corresponding to feature vector Args: v1 (list): feature vector gl_wt (float): global weights Returns: float: sum of weights multiplied by global weight (`gl_wt`) """ assert isinstance(v1, list) return sum(self.fm.weights) * gl_wt def weighted_deletion_cost(self, v1, gl_wt=1.0): """Return cost of deleting segment corresponding to feature vector Args: v1 (list): feature vector gl_wt (float): global weights Returns: float: sum of weights multiplied by global weight (`gl_wt`)""" assert isinstance(v1, list) return sum(self.fm.weights) * gl_wt def weighted_feature_edit_distance(self, source, target, xsampa=False): """String edit distance with weighted features The cost of changine an articulatory feature is weighted according to the class of the feature and the subjective probability of the feature changing in phonological alternation and loanword contexts. These weights are stored in `Distance.weights`. Args: source (unicode): source string target (uniocde): target string xsampa (bool): source and target are X-SAMPA Returns: float: feature weighted string edit distance between `source` and `target` """ return self.min_edit_distance(self.weighted_deletion_cost, self.weighted_insertion_cost, self.weighted_substitution_cost, [[]], self.fm.word_to_vector_list(source, numeric=True, xsampa=xsampa), self.fm.word_to_vector_list(target, numeric=True, xsampa=xsampa)) @xsampaopt def jt_weighted_feature_edit_distance(self, source, target, xsampa=False): """String edit distance with weighted features The cost of changine an articulatory feature is weighted according to the class of the feature and the subjective probability of the feature changing in phonological alternation and loanword contexts. These weights are stored in `Distance.weights`. Args: source (unicode): source string target (uniocde): target string xsampa (bool): source and target are X-SAMPA Returns: float: feature weighted string edit distance between `source` and `target` """ return self.min_edit_distance(partial(self.weighted_deletion_cost, gl_wt=0.25), partial(self.weighted_insertion_cost, gl_wt=0.25), self.weighted_substitution_cost, [[]], self.fm.word_to_vector_list(source, numeric=True), self.fm.word_to_vector_list(target, numeric=True)) @zerodiviszero @xsampaopt def weighted_feature_edit_distance_div_maxlen(self, source, target, xsampa=False): """String edit distance with weighted features, divided by maxlen The cost of changine an articulatory feature is weighted according to the class of the feature and the subjective probability of the feature changing in phonological alternation and loanword contexts. These weights are stored in `Distance.weights`. Args: source (unicode): source string target (uniocde): target string xsampa (bool): source and target are X-SAMPA Returns: float: feature weighted string edit distance between `source` and `target` divided by the length of the longest of these arguments """ source = self.fm.word_to_vector_list(source, numeric=True, xsampa=xsampa) target = self.fm.word_to_vector_list(target, numeric=True, xsampa=xsampa) maxlen = max(len(source), len(target)) return self.min_edit_distance(self.weighted_deletion_cost, self.weighted_insertion_cost, self.weighted_substitution_cost, [[]], source, target) / maxlen @zerodiviszero @xsampaopt def jt_weighted_feature_edit_distance_div_maxlen(self, source, target, xsampa=False): """String edit distance with weighted features, cheap insdel, divided by maxlen The cost of changine an articulatory feature is weighted according to the class of the feature and the subjective probability of the feature changing in phonological alternation and loanword contexts. These weights are stored in `Distance.weights`. This is like `Distance.weighted_feature_edit_distance_div_maxlen` except with low insdel costs (1/4 the cost of a complete substitution). Args: source (unicode): source string target (uniocde): target string xsampa (bool): source and target are X-SAMPA Returns: float: feature weighted string edit distance between `source` and `target` divided by the length of the longest of these arguments """ source = self.fm.word_to_vector_list(source, numeric=True) target = self.fm.word_to_vector_list(target, numeric=True) maxlen = max(len(source), len(target)) return self.min_edit_distance(partial(self.weighted_deletion_cost, gl_wt=0.25), partial(self.weighted_insertion_cost, gl_wt=0.25), self.weighted_substitution_cost, [[]], source, target) / maxlen def partial_hamming_substitution_cost(self, v1, v2): """Substitution cost for feature vectors computed in a manner sensitive to specification. Substitution cost for feature vectors computed so that specified-to-specified costs 1/|V| and specified-to-unspecified costs 1/2*|V|. Args: v1 (list): feature vector v2 (list): feature vector Returns: float: Special edit distance where substitutions are less expensive of one of the features is not specified """ def subcost(ft1, ft2): if ft1 == ft2: return 0 elif ft1 == 0 or ft2 == 0: return 0.5 else: return 1 diffs = [subcost(ft1, ft2) for (ft1, ft2) in zip(v1, v2)] return sum(diffs) / len(diffs) @xsampaopt def partial_hamming_feature_edit_distance(self, source, target, xsampa=False): """String edit distance with insdel cost = 1 and sub costs are 1/22 or 1/44 depending on specification. This method implements a distance metric which is neither identical to hamming distance nor to feature edit distance. The insertion/deletion cost for segment is always 1. The cost of substituting a specified feature for a specified feature is 1/|V| where |V| is the number of dimensions in a feature vector. The cost of substituting a feature specification for an unspecified feature is 1/2*|V|. This function has no normalization and should obey the triangle inequality and thus provide a true distance metric. Args: source (unicode): source string target (unicode): target string xsampa (bool): source and target are X-SAMPA Returns: float: Partial hamming feature edit distance between `source` and `target` """ source = self.fm.word_to_vector_list(source, numeric=True) target = self.fm.word_to_vector_list(target, numeric=True) return self.min_edit_distance(lambda v: 1, lambda v: 1, self.partial_hamming_substitution_cost, [[]], source, target) @zerodiviszero @xsampaopt def partial_hamming_feature_edit_distance_div_maxlen(self, source, target, xsampa=False): """String edit distance with insdel cost = 1 and sub costs are 1/22 or 1/44 depending on specification. This method implements a distance metric which is neither identical to hamming distance nor to feature edit distance and normalizes it by the longest input. The insertion/deletion cost for segment is always 1. The cost of substituting a specified feature for a specified feature is 1/|V| where |V| is the number of dimensions in a feature vector. The cost of substituting a feature specification for an unspecified feature is 1/2*|V|. This method is normalized and does not satisfy the triangle inequality. It is thus not a true distance metric. Args: source (unicode): source string target (unicode): target string xsampa (bool): source and target are X-SAMPA Returns: float: Normalized partial hamming feature edit distance between `source` and `target` """ source = self.fm.word_to_vector_list(source, numeric=True) target = self.fm.word_to_vector_list(target, numeric=True) maxlen = max(len(source), len(target)) return self.min_edit_distance(lambda v: 1, lambda v: 1, self.partial_hamming_substitution_cost, [[]], source, target) / maxlen
34,379
40.026253
115
py
panphon
panphon-master/panphon/bin/validate_ipa.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function from __future__ import unicode_literals import panphon import regex as re import sys class Validator(object): def __init__(self, infile=sys.stdin): """Validate Unicode IPA from file relative to panphon database. infile -- File from which input is taken; by default, STDIN. """ self.ws_punc_regex = re.compile(r'[," \t\n]', re.V1 | re.U) self.ft = panphon.FeatureTable() self._validate_file(infile) def _validate_file(self, infile): for line in infile: line = unicode(line, 'utf-8') self.validate_line(line) def validate_line(self, line): """Validate Unicode IPA string relative to panphon. line -- String of IPA characters. Can contain whitespace and limited punctuation. """ line0 = line pos = 0 while line: seg_m = self.ft.seg_regex.match(line) wsp_m = self.ws_punc_regex.match(line) if seg_m: length = len(seg_m.group(0)) line = line[length:] pos += length elif wsp_m: length = len(wsp_m.group(0)) line = line[length:] pos += length else: msg = 'IPA not valid at position {} in "{}".'.format(pos, line0.strip()) # msg = msg.decode('utf-8') print(msg, file=sys.stderr) line = line[1:] pos += 1 if __name__ == '__main__': validator = Validator(sys.stdin)
1,637
28.781818
88
py
panphon
panphon-master/panphon/bin/align_wordlists.py
#!/usr/bin/env python from __future__ import print_function import unicodecsv as csv import argparse import panphon import Levenshtein import munkres import panphon.distance from functools import partial def levenshtein_dist(_, a, b): return Levenshtein.distance(a, b) def dogol_leven_dist(_, a, b): return Levenshtein.distance(dist.map_to_dogol_prime(a), dist.map_to_dogol_prime(b)) def feature_hamming_dist(dist, a, b): return dist.feature_edit_distance(a, b) def feature_weighted_dist(dist, a, b): return dist.weighted_feature_edit_distance(a, b) def construct_cost_matrix(words_a, words_b, dist): def matrix_row(word_a, words_b): return [dist(word_a, word_b) for (word_b, _) in words_b] return [matrix_row(word_a, words_b) for (word_a, _) in words_a] def score(indices): pairs, errors = 0, 0 for row, column in indices: pairs += 1 if row != column: errors += 1 return pairs, errors def main(wordlist1, wordlist2, dist_funcs): with open(wordlist1, 'rb') as file_a, open(wordlist2, 'rb') as file_b: reader_a = csv.reader(file_a, encoding='utf-8') reader_b = csv.reader(file_b, encoding='utf-8') print('Reading word lists...') words = zip([(w, g) for (g, w) in reader_a], [(w, g) for (g, w) in reader_b]) words_a, words_b = zip(*[(a, b) for (a, b) in words if a and b]) print('Constructing cost matrix...') matrix = construct_cost_matrix(words_a, words_b, dist_funcs) m = munkres.Munkres() print('Computing matrix using Hungarian Algorithm...') indices = m.compute(matrix) print(score(indices)) print('Done.') if __name__ == '__main__': parser = argparse.ArgumentParser(usage='Align two lists of "cognates" using a specified distance metric.') parser.add_argument('wordlists', nargs=2, help='Filenames of two wordlists in corresponding order.') parser.add_argument('-d', '--dist', default='hamming', help='Distance metric (e.g. Hamming).') args = parser.parse_args() dists = {'levenshtein': levenshtein_dist, 'dogol-leven': dogol_leven_dist, 'hamming': feature_hamming_dist, 'weighted': feature_weighted_dist} dist = panphon.distance.Distance() dist_funcs = partial(dists[args.dist], dist) main(args.wordlists[0], args.wordlists[1], dist_funcs)
2,458
32.22973
110
py
panphon
panphon-master/panphon/bin/generate_ipa_all.py
#!/usr/bin/env python from __future__ import print_function, unicode_literals import argparse import codecs import copy import yaml import unicodecsv as csv class Segment(object): """Class modeling phonological segment.""" def __init__(self, form, features): """Construct Segment objectself. Args: form (string): the segment as ipa features (list): the segment as feature_names """ self.form = form self.features = features def __repr__(self): """Output string representation of Segment.""" return 'Segment("{}", {})'.format(self.form, repr(self.features)).encode('utf-8') def feature_vector(self, feature_names): """Return feature vector for segment. Args: feature_names (list): ordered names of features Returns: list: feature values """ return [self.features[ft] for ft in feature_names] class Diacritic(object): """An object encapsulating a diacritics properties.""" def __init__(self, marker, position, conditions, exclude, content): """Construct a diacritic object. Args: marker (unicode): the string form of the diacritic position (str): 'pre' or 'post', determining whether the diacritic attaches before or after the base conditions (list): feature specification on which application of diacritic is conditional exclude (list): conditions under which the diacritic will not be applied] content (list): feature specification that will override existing feature specifications when diacritics is applied """ self.marker = marker assert position in ['pre', 'post'] self.position = position self.exclude = exclude self.conditions = conditions self.content = content def match(self, segment): if segment.form not in self.exclude: for condition in self.conditions: if set(condition.items()) <= set(segment.features.items()): return True return False else: return False def apply(self, segment): if self.match(segment): new_seg = copy.deepcopy(segment) for k, v in self.content.items(): new_seg.features[k] = v if self.position == 'post': new_seg.form = '{}{}'.format(new_seg.form, self.marker) else: new_seg.form = '{}{}'.format(self.marker, new_seg.form) return new_seg else: return None class Combination(object): def __init__(self, diacritics, name, sequence): self.name = name self.sequence = [diacritics[d] for d in sequence] def apply(self, segment): new_seg = copy.deepcopy(segment) for dia in self.sequence: if dia.match(new_seg): new_seg = dia.apply(new_seg) else: return None return new_seg def read_ipa_bases(ipa_bases): segments = [] with open(ipa_bases, 'rb') as f: dictreader = csv.DictReader(f, encoding='utf=8') for record in dictreader: form = record['ipa'] features = {k: v for k, v in record.items() if k != 'ipa'} segments.append(Segment(form, features)) return segments def parse_dia_defs(dia_defs): defs = yaml.load(codecs.open(dia_defs, "r", "utf-8").read(), Loader=yaml.FullLoader) diacritics = {} for dia in defs['diacritics']: if 'exclude' in dia: exclude = dia['exclude'] else: exclude = [] diacritics[dia['name']] = Diacritic(dia['marker'], dia['position'], dia['conditions'], exclude, dia['content']) combinations = [] for comb in defs['combinations']: combinations.append(Combination(diacritics, comb['name'], comb['combines'])) return diacritics, combinations def sort_all_segments(sort_order, all_segments): all_segments_list = list(all_segments) field_order = reversed(yaml.load(open(sort_order, 'r').read(), Loader=yaml.FullLoader)) for field in field_order: all_segments_list.sort(key=lambda seg: seg.features[field['name']], reverse=field['reverse']) return all_segments_list def write_ipa_all(ipa_bases, ipa_all, all_segments, sort_order): with open(ipa_bases, 'rb') as f: reader = csv.reader(f, encoding='utf-8') fieldnames = next(reader) with open(ipa_all, 'wb') as f: writer = csv.DictWriter(f, encoding='utf-8', fieldnames=fieldnames) writer.writerow({k: k for k in fieldnames}) all_segments_list = sort_all_segments(sort_order, all_segments) for segment in all_segments_list: fields = copy.copy(segment.features) fields['ipa'] = segment.form writer.writerow(fields) def main(ipa_bases, ipa_all, dia_defs, sort_order): segments = read_ipa_bases(ipa_bases) diacritics, combinations = parse_dia_defs(dia_defs) all_segments = set(segments) for diacritic in diacritics.values(): for segment in segments: new_seg = diacritic.apply(segment) if new_seg is not None: all_segments.add(new_seg) for combination in combinations: for segment in segments: new_seg = combination.apply(segment) if new_seg is not None: all_segments.add(new_seg) write_ipa_all(ipa_bases, ipa_all, all_segments, sort_order) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('bases', help='File containing IPA bases (ipa_bases.csv)') parser.add_argument('all', help='File to which all IPA segments is to be written (ipa_all.csv)') parser.add_argument('-d', '--dia', required=True, help='Diacritic definition file (default=diacritic_definitions.yml)') parser.add_argument('-s', '--sort-order', required=True, help='File definiting sort order.') args = parser.parse_args() main(args.bases, args.all, args.dia, args.sort_order)
6,485
34.442623
123
py
panphon
panphon-master/panphon/test/test_distance.py
# -*- coding: utf-8 -*- from __future__ import print_function, unicode_literals, division, absolute_import import unittest import panphon from panphon import distance feature_model = 'segment' dim = 24 class TestLevenshtein(unittest.TestCase): def setUp(self): self.dist = distance.Distance(feature_model=feature_model) def test_trivial1(self): self.assertEqual(self.dist.levenshtein_distance('pop', 'pʰop'), 1) def test_trivial2(self): self.assertEqual(self.dist.levenshtein_distance('pop', 'pʰom'), 2) class TestDolgoPrime(unittest.TestCase): def setUp(self): self.dist = distance.Distance(feature_model=feature_model) def test_trivial1(self): self.assertEqual(self.dist.dolgo_prime_distance('pop', 'bob'), 0) def test_trivial2(self): self.assertEqual(self.dist.dolgo_prime_distance('pop', 'bab'), 0) class TestUnweightedFeatureEditDist(unittest.TestCase): def setUp(self): self.dist = distance.Distance(feature_model=feature_model) def test_unweighted_substitution_cost(self): self.assertEqual(self.dist.unweighted_substitution_cost([0, 1, -1], [0, 1, 1]) * 3, 1) def test_unweighted_deletion_cost(self): self.assertEqual(self.dist.unweighted_deletion_cost([1, -1, 1, 0]) * 4, 3.5) def test_trivial1(self): self.assertEqual(self.dist.feature_edit_distance('bim', 'pym') * dim, 3) def test_trivial2(self): self.assertEqual(self.dist.feature_edit_distance('ti', 'tʰi') * dim, 1) def test_xsampa(self): self.assertEqual(self.dist.feature_edit_distance('t i', 't_h i', xsampa=True) * dim, 1) def test_xsampa2(self): self.assertEqual(self.dist.feature_edit_distance('p u n', 'p y n', xsampa=True) * dim, 1) def test_xsampa3(self): ipa = self.dist.jt_feature_edit_distance_div_maxlen('kʰin', 'pʰin') xs = self.dist.jt_feature_edit_distance_div_maxlen('k_h i n', 'p_h i n', xsampa=True) self.assertEqual(ipa, xs) class TestWeightedFeatureEditDist(unittest.TestCase): def setUp(self): self.dist = distance.Distance(feature_model=feature_model) def test_trivial1(self): self.assertGreater(self.dist.weighted_feature_edit_distance('ti', 'tʰu'), self.dist.weighted_feature_edit_distance('ti', 'tʰi')) def test_trivial2(self): self.assertGreater(self.dist.weighted_feature_edit_distance('ti', 'te'), self.dist.weighted_feature_edit_distance('ti', 'tḭ')) class TestHammingFeatureEditDistanceDivMaxlen(unittest.TestCase): def setUp(self): self.dist = distance.Distance(feature_model=feature_model) def test_hamming_substitution_cost(self): self.assertEqual(self.dist.hamming_substitution_cost(['+', '-', '0'], ['0', '-', '0']) * 3, 1) def test_trivial1(self): self.assertEqual(self.dist.hamming_feature_edit_distance_div_maxlen('pa', 'ba') * dim * 2, 1) def test_trivial2(self): self.assertEqual(self.dist.hamming_feature_edit_distance_div_maxlen('i', 'pi') * 2, 1) def test_trivial3(self): self.assertEqual(self.dist.hamming_feature_edit_distance_div_maxlen('sɛks', 'ɛɡz'), (1 + (1 / dim) + (1 / dim)) / 4) def test_trivial4(self): self.assertEqual(self.dist.hamming_feature_edit_distance_div_maxlen('k', 'ɡ'), 1 / dim) class TestMany(unittest.TestCase): def setUp(self): self.dist = distance.Distance(feature_model=feature_model) def test_fast_levenshtein_distance(self): self.assertEqual(self.dist.fast_levenshtein_distance('p', 'b'), 1) def test_fast_levenshtein_distance_div_maxlen(self): self.assertEqual(self.dist.fast_levenshtein_distance_div_maxlen('p', 'b'), 1) def test_dolgo_prime_distance(self): self.assertEqual(self.dist.dolgo_prime_distance('p', 'b'), 0) def test_dolgo_prime_div_maxlen(self): self.assertEqual(self.dist.dolgo_prime_distance_div_maxlen('p', 'b'), 0) def test_feature_edit_distance(self): self.assertEqual(self.dist.feature_edit_distance('p', 'b'), 1 / dim) def test_jt_feature_edit_distance(self): self.assertEqual(self.dist.jt_feature_edit_distance('p', 'b'), 1 / dim) def test_feature_edit_distance_div_maxlen(self): self.assertEqual(self.dist.feature_edit_distance_div_maxlen('p', 'b'), 1 / dim) def test_jt_feature_edit_distance_div_maxlen(self): self.assertEqual(self.dist.jt_feature_edit_distance_div_maxlen('p', 'b'), 1 / dim) def test_hamming_feature_edit_distance(self): self.assertEqual(self.dist.hamming_feature_edit_distance('p', 'b'), 1 / dim) def test_jt_hamming_feature_edit_distance(self): self.assertEqual(self.dist.jt_hamming_feature_edit_distance('p', 'b'), 1 / dim) def test_hamming_feature_edit_distance_div_maxlen(self): self.assertEqual(self.dist.hamming_feature_edit_distance_div_maxlen('p', 'b'), 1 / dim) def test_jt_hamming_feature_edit_distance_div_maxlen(self): self.assertEqual(self.dist.jt_hamming_feature_edit_distance_div_maxlen('p', 'b'), 1 / dim) class TestXSampa(unittest.TestCase): def setUp(self): self.dist = distance.Distance(feature_model=feature_model) self.ft = panphon.FeatureTable() def test_feature_edit_distance(self): self.assertEqual(self.dist.feature_edit_distance("p_h", "p", xsampa=True), 1 / dim)
5,453
37.408451
124
py
panphon
panphon-master/panphon/test/test_permissive_methods.py
# -*- coding: utf-8 -*- from __future__ import print_function, unicode_literals, division, absolute_import import unittest from panphon import permissive dim = 24 class TestFeatureTableAPI(unittest.TestCase): def setUp(self): self.ft = permissive.PermissiveFeatureTable() def test_fts(self): self.assertEqual(len(self.ft.fts('u')), dim) # def test_seg_fts(self): # self.assertEqual(len(self.ft.seg_fts('p')), 24) def test_match(self): self.assertTrue(self.ft.match(self.ft.fts('u'), self.ft.fts('u'))) def test_fts_match(self): self.assertTrue(self.ft.fts_match(self.ft.fts('u'), 'u')) def test_longest_one_seg_prefix(self): self.assertEqual(self.ft.longest_one_seg_prefix('pap'), 'p') def test_validate_word(self): self.assertTrue(self.ft.validate_word('tik')) def test_segs(self): self.assertEqual(self.ft.segs('tik'), ['t', 'i', 'k']) def test_word_fts(self): self.assertEqual(len(self.ft.word_fts('tik')), 3) def test_seg_known(self): self.assertTrue(self.ft.seg_known('t')) def test_filter_string(self): self.assertEqual(len(self.ft.filter_string('pup$')), 3) def test_segs_safe(self): self.assertEqual(len(self.ft.segs_safe('pup$')), 4) def test_filter_segs(self): self.assertEqual(len(self.ft.filter_segs(['p', 'u', 'p', '$'])), 3) def test_fts_intersection(self): self.assertIn(('-', 'voi'), self.ft.fts_intersection(['p', 't', 'k'])) def test_fts_match_any(self): self.assertTrue(self.ft.fts_match_any([('-', 'voi')], ['p', 'o', '$'])) def test_fts_match_all(self): self.assertTrue(self.ft.fts_match_all([('-', 'voi')], ['p', 't', 'k'])) def test_fts_contrast2(self): self.assertTrue(self.ft.fts_contrast2([], 'voi', ['p', 'b', 'r'])) def test_fts_count(self): self.assertEqual(self.ft.fts_count([('-', 'voi')], ['p', 't', 'k', 'r']), 3) self.assertEqual(self.ft.fts_count([('-', 'voi')], ['r', '$']), 0) def test_match_pattern(self): self.assertEqual(len(self.ft.match_pattern([set([('-', 'voi')])], 'p')), 1) def test_match_pattern_seq(self): self.assertTrue(self.ft.match_pattern_seq([set([('-', 'voi')])], 'p')) # def test_all_segs_matching_fts(self): # self.assertIn('p', self.ft.all_segs_matching_fts([('-', 'voi')])) def test_compile_regex_from_str(self): pass def test_segment_to_vector(self): self.assertEqual(len(self.ft.segment_to_vector('p')), dim) def test_word_to_vector_list(self): self.assertEqual(len(self.ft.word_to_vector_list('pup')), 3)
2,690
31.421687
84
py
panphon
panphon-master/panphon/test/test_panphon_methods.py
# -*- coding: utf-8 -*- from __future__ import print_function, unicode_literals, division, absolute_import import unittest import panphon._panphon as panphon dim = 24 class TestFeatureTableAPI(unittest.TestCase): def setUp(self): self.ft = panphon.FeatureTable() def test_fts(self): self.assertEqual(len(self.ft.fts('u')), 24) # def test_seg_fts(self): # self.assertEqual(len(self.ft.seg_fts('p')), 24) def test_match(self): self.assertTrue(self.ft.match(self.ft.fts('u'), self.ft.fts('u'))) def test_fts_match(self): self.assertTrue(self.ft.fts_match(self.ft.fts('u'), 'u')) def test_longest_one_seg_prefix(self): self.assertEqual(self.ft.longest_one_seg_prefix('pap'), 'p') def test_validate_word(self): self.assertTrue(self.ft.validate_word('tik')) def test_segs(self): self.assertEqual(self.ft.segs('tik'), ['t', 'i', 'k']) def test_word_fts(self): self.assertEqual(len(self.ft.word_fts('tik')), 3) def test_seg_known(self): self.assertTrue(self.ft.seg_known('t')) def test_filter_string(self): self.assertEqual(len(self.ft.filter_string('pup$')), 3) def test_segs_safe(self): self.assertEqual(len(self.ft.segs_safe('pup$')), 4) def test_filter_segs(self): self.assertEqual(len(self.ft.filter_segs(['p', 'u', 'p', '$'])), 3) def test_fts_intersection(self): self.assertIn(('-', 'voi'), self.ft.fts_intersection(['p', 't', 'k'])) def test_fts_match_any(self): self.assertTrue(self.ft.fts_match_any([('-', 'voi')], ['p', 'o', '$'])) def test_fts_match_all(self): self.assertTrue(self.ft.fts_match_all([('-', 'voi')], ['p', 't', 'k'])) def test_fts_contrast2(self): self.assertTrue(self.ft.fts_contrast2([], 'voi', ['p', 'b', 'r'])) def test_fts_count(self): self.assertEqual(self.ft.fts_count([('-', 'voi')], ['p', 't', 'k', 'r']), 3) self.assertEqual(self.ft.fts_count([('-', 'voi')], ['r', '$']), 0) def test_match_pattern(self): self.assertEqual(len(self.ft.match_pattern([set([('-', 'voi')])], 'p')), 1) def test_match_pattern_seq(self): self.assertTrue(self.ft.match_pattern_seq([set([('-', 'voi')])], 'p')) def test_all_segs_matching_fts(self): self.assertIn('p', self.ft.all_segs_matching_fts([('-', 'voi')])) def test_compile_regex_from_str(self): pass def test_segment_to_vector(self): self.assertEqual(len(self.ft.segment_to_vector('p')), 24) def test_word_to_vector_list(self): self.assertEqual(len(self.ft.word_to_vector_list('pup')), 3)
2,675
31.240964
84
py
panphon
panphon-master/panphon/test/test_featuretable.py
# -*- coding: utf-8 -*- from __future__ import print_function, unicode_literals, division, absolute_import import unittest import panphon.featuretable class TestFeatureTable(unittest.TestCase): LONG_IPA_STRING = 'tɐʉmɐtɐ.ɸɐkɐtɐŋihɐŋɐ.koːɐʉɐʉ.ɔ.tɐmɐtɛɐ.tʉɾi.pʉkɐkɐ.piki.mɐʉŋɐ.hɔɾɔ.nʉkʉ.pɔkɐi.ɸɛnʉɐ.ki.tɐnɐ.tɐhʉ' TIMING_REPETITIONS = 1000 def setUp(self): self.ft = panphon.featuretable.FeatureTable() def test_fts_contrast2(self): inv = 'p t k b d ɡ a e i o u'.split(' ') self.assertTrue(self.ft.fts_contrast({'syl': -1}, 'voi', inv)) self.assertFalse(self.ft.fts_contrast({'syl': 1}, 'cor', inv)) self.assertTrue(self.ft.fts_contrast({'ant': 1, 'cor': -1}, 'voi', inv)) def test_longest_one_seg_prefix(self): prefix = self.ft.longest_one_seg_prefix('pʰʲaŋ') self.assertEqual(prefix, 'pʰʲ') def test_match_pattern(self): self.assertTrue(self.ft.match_pattern([{'voi': -1}, {'voi': 1}, {'voi': -1}], 'pat')) def test_all_segs_matching_fts(self): segs = self.ft.all_segs_matching_fts({'syl': -1, 'son': 1}) self.assertIn('m', segs) self.assertIn('n', segs) self.assertIn('ŋ', segs) self.assertIn('m̥', segs) self.assertIn('l', segs) # self.assertNotIn('a', segs) # self.assertNotIn('s', segs) def test_word_to_vector_list_aspiration(self): self.assertNotEqual(self.ft.word_to_vector_list(u'pʰ'), self.ft.word_to_vector_list(u'p')) def test_word_to_vector_list_aspiration_xsampa(self): self.assertNotEqual(self.ft.word_to_vector_list(u'p_h', xsampa=True), self.ft.word_to_vector_list(u'p', xsampa=True)) def test_word_to_vector_list_aspiration_xsampa_len(self): self.assertEqual(len(self.ft.word_to_vector_list(u'p_h', xsampa=True)), 1) def test_normalization(self): u1 = '\u00e3' u2 = 'a\u0303' self.assertEqual(self.ft.ipa_segs(u1), self.ft.ipa_segs(u2)) def test_ipa_segs_timing(self): for _ in range(self.TIMING_REPETITIONS): self.ft.ipa_segs(self.LONG_IPA_STRING) def test_segs_safe_timing(self): for _ in range(self.TIMING_REPETITIONS): self.ft.segs_safe(self.LONG_IPA_STRING) class TestIpaRe(unittest.TestCase): def setUp(self): self.ft = panphon.featuretable.FeatureTable() def test_compile_regex_from_str1(self): r = self.ft.compile_regex_from_str('[-son -cont][+syl -hi -lo]') self.assertIsNotNone(r.match('tʰe')) self.assertIsNone(r.match('pi')) def test_compile_regex_from_str2(self): r = self.ft.compile_regex_from_str('[-son -cont][+son +cont]') self.assertIsNotNone(r.match('pj')) self.assertIsNone(r.match('ts')) class TestXSampa(unittest.TestCase): def setUp(self): self.ft = panphon.featuretable.FeatureTable() def test_affricates(self): self.assertNotEqual(self.ft.word_to_vector_list(u'tS', xsampa=True), self.ft.word_to_vector_list(u't S', xsampa=True)) if __name__ == '__main__': unittest.main()
3,326
34.774194
120
py
panphon
panphon-master/panphon/test/test_sonority.py
# -*- coding: utf-8 -*- from __future__ import print_function, unicode_literals, division, absolute_import import unittest from panphon import sonority class TestSonority(unittest.TestCase): def setUp(self): self.son = sonority.Sonority(feature_model='permissive') def test_sonority_nine(self): segs = ['a', 'ɑ', 'æ', 'ɒ', 'e', 'o̥'] scores = [9] * 6 self.assertEqual(list(map(self.son.sonority, segs)), scores) def test_sonority_eight(self): segs = ['i', 'y', 'ɨ', 'ʉ', 'ɯ', 'u'] scores = [8] * 6 self.assertEqual(list(map(self.son.sonority, segs)), scores) def test_sonority_seven(self): segs = ['j', 'w', 'ʋ', 'ɰ', 'ɹ', 'e̯'] scores = [7] * 6 self.assertEqual(list(map(self.son.sonority, segs)), scores) def test_sonority_six(self): segs = ['l', 'ɭ', 'r', 'ɾ'] scores = [6] * 4 self.assertEqual(list(map(self.son.sonority, segs)), scores) def test_sonority_five(self): segs = ['n', 'm', 'ŋ', 'ɴ'] scores = [5] * 4 self.assertEqual(list(map(self.son.sonority, segs)), scores) def test_sonority_four(self): segs = ['v', 'z', 'ʒ', 'ɣ'] scores = [4] * 4 self.assertEqual(list(map(self.son.sonority, segs)), scores) def test_sonority_three(self): segs = ['f', 's', 'x', 'ħ', 'ʃ'] scores = [3] * 5 self.assertEqual(list(map(self.son.sonority, segs)), scores) def test_sonority_two(self): segs = ['b', 'ɡ', 'd', 'ɢ'] scores = [2] * 4 self.assertEqual(list(map(self.son.sonority, segs)), scores) def test_sonority_one(self): segs = ['p', 'k', 'c', 'q'] scores = [1] * 4 self.assertEqual(list(map(self.son.sonority, segs)), scores)
1,803
30.649123
82
py
panphon
panphon-master/panphon/test/test_panphon.py
# -*- coding: utf-8 -*- from __future__ import print_function, unicode_literals, division, absolute_import import unittest from panphon import _panphon class TestFeatureTable(unittest.TestCase): def setUp(self): self.ft = _panphon.FeatureTable() def test_fts_contrast2(self): inv = 'p t k b d ɡ a e i o u'.split(' ') self.assertTrue(self.ft.fts_contrast2([('-', 'syl')], 'voi', inv)) self.assertFalse(self.ft.fts_contrast2([('+', 'syl')], 'cor', inv)) self.assertTrue(self.ft.fts_contrast2(_panphon.fts('+ant -cor'), 'voi', inv)) def test_fts(self): fts = self.ft.fts('ŋ') self.assertIn(('+', 'voi'), fts) self.assertIn(('-', 'syl'), fts) self.assertIn(('+', 'hi'), fts) self.assertIn(('-', 'lo'), fts) self.assertIn(('+', 'nas'), fts) def test_longest_one_seg_prefix(self): prefix = self.ft.longest_one_seg_prefix('pʰʲaŋ') self.assertEqual(prefix, 'pʰʲ') def test_match_pattern(self): self.assertTrue(self.ft.match_pattern([[('-', 'voi')], [('+', 'voi')], [('-', 'voi')]], 'pat')) def test_all_segs_matching_fts(self): segs = self.ft.all_segs_matching_fts([('-', 'syl'), ('+', 'son')]) self.assertIn('m', segs) self.assertIn('n', segs) self.assertIn('ŋ', segs) self.assertIn('m̥', segs) self.assertIn('l', segs) def test_word_to_vector_list_aspiration(self): self.assertNotEqual(self.ft.word_to_vector_list(u'pʰ'), self.ft.word_to_vector_list(u'p')) def test_word_to_vector_list_aspiration_xsampa(self): self.assertNotEqual(self.ft.word_to_vector_list(u'p_h', xsampa=True), self.ft.word_to_vector_list(u'p', xsampa=True)) def test_word_to_vector_list_aspiration_xsampa_len(self): self.assertEqual(len(self.ft.word_to_vector_list(u'p_h', xsampa=True)), 1) class TestIpaRe(unittest.TestCase): def setUp(self): self.ft = _panphon.FeatureTable() def test_compile_regex_from_str1(self): r = self.ft.compile_regex_from_str('[-son -cont][+syl -hi -lo]') self.assertIsNotNone(r.match('tʰe')) self.assertIsNone(r.match('pi')) def test_compile_regex_from_str2(self): r = self.ft.compile_regex_from_str('[-son -cont][+son +cont]') self.assertIsNotNone(r.match('pj')) self.assertIsNone(r.match('ts')) class TestXSampa(unittest.TestCase): def setUp(self): self.ft = _panphon.FeatureTable() def test_affricates(self): self.assertNotEqual(self.ft.word_to_vector_list(u'tS', xsampa=True), self.ft.word_to_vector_list(u't S', xsampa=True)) if __name__ == '__main__': unittest.main()
2,836
33.597561
85
py
panphon
panphon-master/panphon/test/test_xsampa.py
# -*- coding: utf-8 -*- from __future__ import print_function, unicode_literals, division, absolute_import import unittest import panphon import panphon.xsampa class TestXSampa(unittest.TestCase): def setUp(self): self.ft = panphon.FeatureTable() self.xs = panphon.xsampa.XSampa() def test_ipa_equals_xsampa(self): self.assertEqual('kʰat', self.xs.convert('k_h a t')) def test_ipa_vector_equals_xsampa_vector(self): ipa = self.ft.word_to_vector_list('kʰat', xsampa=False) xs = self.ft.word_to_vector_list('k_h a t', xsampa=True) self.assertEqual(ipa, xs)
621
27.272727
82
py
HIBPool
HIBPool-main/GIB.py
#!/usr/bin/env python # coding: utf-8 # In[ ]: from __future__ import print_function import numpy as np import pprint as pp from copy import deepcopy import pickle from numbers import Number from collections import OrderedDict import itertools import torch import torch.nn as nn from torch.autograd import Variable from torch.nn import functional as F import torch.optim as optim from torch.optim.lr_scheduler import ReduceLROnPlateau, LambdaLR from torch.distributions import constraints from torch.distributions.normal import Normal from torch.distributions.multivariate_normal import MultivariateNormal from torch.distributions.distribution import Distribution from torch.distributions.utils import broadcast_all import sys, os sys.path.append(os.path.join(os.path.dirname(__file__), '..')) from pytorch_net.modules import get_Layer, load_layer_dict, Simple_2_Symbolic from pytorch_net.util import forward, Loss_Fun, get_activation, get_criterion, get_criteria_value, get_optimizer, get_full_struct_param, plot_matrices, get_model_DL, PrecisionFloorLoss, get_list_DL, init_weight from pytorch_net.util import Early_Stopping, Performance_Monitor, record_data, to_np_array, to_Variable, make_dir, formalize_value, RampupLR, Transform_Label, view_item, load_model, save_model, to_cpu_recur, filter_kwargs # ## Training functionality: # In[ ]: def train( model, X = None, y = None, train_loader = None, validation_data = None, validation_loader = None, criterion = nn.MSELoss(), inspect_interval = 10, isplot = False, is_cuda = None, **kwargs ): """Training function for generic models. "model" can be a single model or a ordered list of models""" def get_regularization(model, loss_epoch, **kwargs): """Compute regularization.""" reg_dict = kwargs["reg_dict"] if "reg_dict" in kwargs else None reg = to_Variable([0], is_cuda = is_cuda) if reg_dict is not None: for reg_type, reg_coeff in reg_dict.items(): # Setting up regularization strength: if isinstance(reg_coeff, Number): reg_coeff_ele = reg_coeff else: if loss_epoch < len(reg_coeff): reg_coeff_ele = reg_coeff[loss_epoch] else: reg_coeff_ele = reg_coeff[-1] # Accumulate regularization: reg = reg + model.get_regularization(source=[reg_type], mode=reg_mode, **kwargs) * reg_coeff_ele return reg if is_cuda is None: if X is None and y is None: assert train_loader is not None is_cuda = train_loader.dataset.tensors[0].is_cuda else: is_cuda = X.is_cuda # Optimization kwargs: epochs = kwargs["epochs"] if "epochs" in kwargs else 10000 lr = kwargs["lr"] if "lr" in kwargs else 5e-3 lr_rampup_steps = kwargs["lr_rampup"] if "lr_rampup" in kwargs else 200 optim_type = kwargs["optim_type"] if "optim_type" in kwargs else "adam" optim_kwargs = kwargs["optim_kwargs"] if "optim_kwargs" in kwargs else {} scheduler_type = kwargs["scheduler_type"] if "scheduler_type" in kwargs else "ReduceLROnPlateau" gradient_noise = kwargs["gradient_noise"] if "gradient_noise" in kwargs else None data_loader_apply = kwargs["data_loader_apply"] if "data_loader_apply" in kwargs else None # Inspection kwargs: inspect_step = kwargs["inspect_step"] if "inspect_step" in kwargs else None # Whether to inspect each step inspect_items = kwargs["inspect_items"] if "inspect_items" in kwargs else None inspect_items_train = get_inspect_items_train(inspect_items) inspect_functions = kwargs["inspect_functions"] if "inspect_functions" in kwargs else None if inspect_functions is not None: for inspect_function_key in inspect_functions: if inspect_function_key not in inspect_items: inspect_items.append(inspect_function_key) inspect_items_interval = kwargs["inspect_items_interval"] if "inspect_items_interval" in kwargs else 1000 inspect_image_interval = kwargs["inspect_image_interval"] if "inspect_image_interval" in kwargs else None inspect_loss_precision = kwargs["inspect_loss_precision"] if "inspect_loss_precision" in kwargs else 4 callback = kwargs["callback"] if "callback" in kwargs else None # Saving kwargs: record_keys = kwargs["record_keys"] if "record_keys" in kwargs else ["loss"] filename = kwargs["filename"] if "filename" in kwargs else None if filename is not None: make_dir(filename) save_interval = kwargs["save_interval"] if "save_interval" in kwargs else None save_step = kwargs["save_step"] if "save_step" in kwargs else None logdir = kwargs["logdir"] if "logdir" in kwargs else None data_record = {key: [] for key in record_keys} info_to_save = kwargs["info_to_save"] if "info_to_save" in kwargs else None if info_to_save is not None: data_record.update(info_to_save) patience = kwargs["patience"] if "patience" in kwargs else 20 if patience is not None: early_stopping_epsilon = kwargs["early_stopping_epsilon"] if "early_stopping_epsilon" in kwargs else 0 early_stopping_monitor = kwargs["early_stopping_monitor"] if "early_stopping_monitor" in kwargs else "loss" early_stopping = Early_Stopping(patience = patience, epsilon = early_stopping_epsilon, mode = "max" if early_stopping_monitor in ["accuracy"] else "min") if logdir is not None: from pytorch_net.logger import Logger batch_idx = 0 logger = Logger(logdir) logimages = kwargs["logimages"] if "logimages" in kwargs else None reg_mode = kwargs["reg_mode"] if "reg_mode" in kwargs else "L1" if validation_loader is not None: assert validation_data is None X_valid, y_valid = None, None elif validation_data is not None: X_valid, y_valid = validation_data else: X_valid, y_valid = X, y # Setting up dynamic label noise: label_noise_matrix = kwargs["label_noise_matrix"] if "label_noise_matrix" in kwargs else None transform_label = Transform_Label(label_noise_matrix = label_noise_matrix, is_cuda=is_cuda) # Setting up cotrain optimizer: co_kwargs = kwargs["co_kwargs"] if "co_kwargs" in kwargs else None if co_kwargs is not None: co_optimizer = co_kwargs["co_optimizer"] co_model = co_kwargs["co_model"] co_criterion = co_kwargs["co_criterion"] if "co_criterion" in co_kwargs else None co_multi_step = co_kwargs["co_multi_step"] if "co_multi_step" in co_kwargs else 1 # Get original loss: if len(inspect_items_train) > 0: loss_value_train = get_loss(model, train_loader, X, y, criterion=criterion, loss_epoch=-1, transform_label=transform_label, **kwargs) info_dict_train = prepare_inspection(model, train_loader, X, y, transform_label=transform_label, **kwargs) if "loss" in record_keys: record_data(data_record, [loss_value_train], ["loss_tr"]) loss_original = get_loss(model, validation_loader, X_valid, y_valid, criterion=criterion, loss_epoch=-1, transform_label=transform_label, **kwargs) if "loss" in record_keys: record_data(data_record, [-1, loss_original], ["iter", "loss"]) if "reg" in record_keys and "reg_dict" in kwargs and len(kwargs["reg_dict"]) > 0: reg_value = get_regularization(model, loss_epoch=0, **kwargs) record_data(data_record, [reg_value], ["reg"]) if "param" in record_keys: record_data(data_record, [model.get_weights_bias(W_source="core", b_source="core")], ["param"]) if "param_grad" in record_keys: record_data(data_record, [model.get_weights_bias(W_source="core", b_source="core", is_grad=True)], ["param_grad"]) if co_kwargs is not None: co_loss_original = get_loss(co_model, validation_loader, X_valid, y_valid, criterion=criterion, loss_epoch=-1, transform_label=transform_label, **co_kwargs) if "co_loss" in record_keys: record_data(data_record, [co_loss_original], ["co_loss"]) if filename is not None and save_interval is not None: record_data(data_record, [{}], ["model_dict"]) # Setting up optimizer: parameters = model.parameters() num_params = len(list(model.parameters())) if num_params == 0: print("No parameters to optimize!") loss_value = get_loss(model, validation_loader, X_valid, y_valid, criterion = criterion, loss_epoch = -1, transform_label=transform_label, **kwargs) if "loss" in record_keys: record_data(data_record, [0, loss_value], ["iter", "loss"]) if "param" in record_keys: record_data(data_record, [model.get_weights_bias(W_source = "core", b_source = "core")], ["param"]) if "param_grad" in record_keys: record_data(data_record, [model.get_weights_bias(W_source = "core", b_source = "core", is_grad = True)], ["param_grad"]) if co_kwargs is not None: co_loss_value = get_loss(co_model, validation_loader, X_valid, y_valid, criterion = criterion, loss_epoch = -1, transform_label=transform_label, **co_kwargs) record_data(data_record, [co_loss_value], ["co_loss"]) return loss_original, loss_value, data_record optimizer = get_optimizer(optim_type, lr, parameters, **optim_kwargs) if "optimizer" not in kwargs or ("optimizer" in kwargs and kwargs["optimizer"] is None) else kwargs["optimizer"] # Initialize inspect_items: if inspect_items is not None: print("{}:".format(-1), end = "") print("\tlr: {0:.3e}\t loss:{1:.{2}f}".format(optimizer.param_groups[0]["lr"], loss_original, inspect_loss_precision), end = "") info_dict = prepare_inspection(model, validation_loader, X_valid, y_valid, transform_label=transform_label, **kwargs) if len(inspect_items_train) > 0: print("\tloss_tr: {0:.{1}f}".format(loss_value_train, inspect_loss_precision), end = "") info_dict_train = update_key_train(info_dict_train, inspect_items_train) info_dict.update(info_dict_train) if "reg" in record_keys and "reg_dict" in kwargs and len(kwargs["reg_dict"]) > 0: print("\treg:{0:.{1}f}".format(to_np_array(reg_value), inspect_loss_precision), end="") if len(info_dict) > 0: for item in inspect_items: if item in info_dict: print(" \t{0}: {1:.{2}f}".format(item, info_dict[item], inspect_loss_precision), end = "") if item in record_keys and item not in ["loss", "reg"]: record_data(data_record, [to_np_array(info_dict[item])], [item]) if co_kwargs is not None: co_info_dict = prepare_inspection(co_model, validation_loader, X_valid, y_valid, transform_label=transform_label, **co_kwargs) if "co_loss" in inspect_items: co_loss_value = get_loss(co_model, validation_loader, X_valid, y_valid, criterion=criterion, loss_epoch=-1, transform_label=transform_label, **co_kwargs) print("\tco_loss: {}".format(formalize_value(co_loss_value, inspect_loss_precision)), end="") if len(co_info_dict) > 0: for item in inspect_items: if item in co_info_dict: print(" \t{0}: {1}".format(item, formalize_value(co_info_dict[item], inspect_loss_precision)), end="") if item in record_keys and item != "loss": record_data(data_record, [to_np_array(co_info_dict[item])], [item]) print("\n") # Setting up gradient noise: if gradient_noise is not None: from pytorch_net.util import Gradient_Noise_Scale_Gen scale_gen = Gradient_Noise_Scale_Gen(epochs=epochs, gamma=gradient_noise["gamma"], # decay rate eta=gradient_noise["eta"], # starting variance gradient_noise_interval_epoch=1, ) gradient_noise_scale = scale_gen.generate_scale(verbose=True) # Set up learning rate scheduler: if scheduler_type is not None: if scheduler_type == "ReduceLROnPlateau": scheduler_patience = kwargs["scheduler_patience"] if "scheduler_patience" in kwargs else 40 scheduler_factor = kwargs["scheduler_factor"] if "scheduler_factor" in kwargs else 0.1 scheduler_verbose = kwargs["scheduler_verbose"] if "scheduler_verbose" in kwargs else False scheduler = ReduceLROnPlateau(optimizer, factor=scheduler_factor, patience=scheduler_patience, verbose=scheduler_verbose) elif scheduler_type == "LambdaLR": scheduler_lr_lambda = kwargs["scheduler_lr_lambda"] if "scheduler_lr_lambda" in kwargs else (lambda epoch: 0.97 ** (epoch // 2)) scheduler = LambdaLR(optimizer, lr_lambda=scheduler_lr_lambda) else: raise # Ramping or learning rate for the first lr_rampup_steps steps: if lr_rampup_steps is not None and train_loader is not None: scheduler_rampup = RampupLR(optimizer, num_steps=lr_rampup_steps) if hasattr(train_loader, "dataset"): data_size = len(train_loader.dataset) else: data_size = kwargs["data_size"] # Initialize logdir: if logdir is not None: if logimages is not None: for tag, image_fun in logimages["image_fun"].items(): image = image_fun(model, logimages["X"], logimages["y"]) logger.log_images(tag, image, -1) # Training: to_stop = False for i in range(epochs + 1): model.train() # Updating gradient noise: if gradient_noise is not None: hook_handle_list = [] if i % scale_gen.gradient_noise_interval_epoch == 0: for h in hook_handle_list: h.remove() hook_handle_list = [] scale_idx = int(i / scale_gen.gradient_noise_interval_epoch) if scale_idx >= len(gradient_noise_scale): current_gradient_noise_scale = gradient_noise_scale[-1] else: current_gradient_noise_scale = gradient_noise_scale[scale_idx] for param_group in optimizer.param_groups: for param in param_group["params"]: if param.requires_grad: h = param.register_hook(lambda grad: grad + Variable(torch.normal(mean=torch.zeros(grad.size()), std=current_gradient_noise_scale * torch.ones(grad.size())))) hook_handle_list.append(h) if X is not None and y is not None: if optim_type != "LBFGS": optimizer.zero_grad() reg = get_regularization(model, loss_epoch=i, **kwargs) loss = model.get_loss(X, transform_label(y), criterion=criterion, loss_epoch=i, **kwargs) + reg loss.backward() optimizer.step() else: # "LBFGS" is a second-order optimization algorithm that requires a slightly different procedure: def closure(): optimizer.zero_grad() reg = get_regularization(model, loss_epoch=i, **kwargs) loss = model.get_loss(X, transform_label(y), criterion=criterion, loss_epoch=i, **kwargs) + reg loss.backward() return loss optimizer.step(closure) # Cotrain step: if co_kwargs is not None: if "co_warmup_epochs" not in co_kwargs or "co_warmup_epochs" in co_kwargs and i >= co_kwargs["co_warmup_epochs"]: for _ in range(co_multi_step): co_optimizer.zero_grad() co_reg = get_regularization(co_model, loss_epoch=i, **co_kwargs) co_loss = co_model.get_loss(X, transform_label(y), criterion=co_criterion, loss_epoch=i, **co_kwargs) + co_reg co_loss.backward() co_optimizer.step() else: if inspect_step is not None: info_dict_step = {key: [] for key in inspect_items} if "loader_process" in kwargs and kwargs["loader_process"] is not None: train_loader = kwargs["loader_process"]("train") for k, data_batch in enumerate(train_loader): if isinstance(data_batch, tuple) or isinstance(data_batch, list): X_batch, y_batch = data_batch if data_loader_apply is not None: X_batch, y_batch = data_loader_apply(X_batch, y_batch) else: X_batch, y_batch = data_loader_apply(data_batch) if optim_type != "LBFGS": optimizer.zero_grad() reg = get_regularization(model, loss_epoch=i, **kwargs) loss = model.get_loss(X_batch, transform_label(y_batch), criterion=criterion, loss_epoch=i, loss_step=k, **kwargs) + reg loss.backward() if logdir is not None: batch_idx += 1 if len(model.info_dict) > 0: for item in inspect_items: if item in model.info_dict: logger.log_scalar(item, model.info_dict[item], batch_idx) optimizer.step() else: def closure(): optimizer.zero_grad() reg = get_regularization(model, loss_epoch=i, **kwargs) loss = model.get_loss(X_batch, transform_label(y_batch), criterion=criterion, loss_epoch=i, loss_step=k, **kwargs) + reg loss.backward() return loss if logdir is not None: batch_idx += 1 if len(model.info_dict) > 0: for item in inspect_items: if item in model.info_dict: logger.log_scalar(item, model.info_dict[item], batch_idx) optimizer.step(closure) # Rampup scheduler: if lr_rampup_steps is not None and i * data_size // len(X_batch) + k < lr_rampup_steps: scheduler_rampup.step() # Cotrain step: if co_kwargs is not None: if "co_warmup_epochs" not in co_kwargs or "co_warmup_epochs" in co_kwargs and i >= co_kwargs["co_warmup_epochs"]: for _ in range(co_multi_step): co_optimizer.zero_grad() co_reg = get_regularization(co_model, loss_epoch=i, **co_kwargs) co_loss = co_model.get_loss(X_batch, transform_label(y_batch), criterion=co_criterion, loss_epoch=i, loss_step=k, **co_kwargs) + co_reg co_loss.backward() if logdir is not None: if len(co_model.info_dict) > 0: for item in inspect_items: if item in co_model.info_dict: logger.log_scalar(item, co_model.info_dict[item], batch_idx) co_optimizer.step() # Inspect at each step: if inspect_step is not None: if k % inspect_step == 0: print("s{}:".format(k), end = "") info_dict = prepare_inspection(model, validation_loader, X_valid, y_valid, transform_label=transform_label, **kwargs) if "loss" in inspect_items: info_dict_step["loss"].append(loss.item()) print("\tloss: {0:.{1}f}".format(loss.item(), inspect_loss_precision), end="") if len(info_dict) > 0: for item in inspect_items: if item in info_dict: info_dict_step[item].append(info_dict[item]) print(" \t{0}: {1}".format(item, formalize_value(info_dict[item], inspect_loss_precision)), end = "") if co_kwargs is not None: if "co_warmup_epochs" not in co_kwargs or "co_warmup_epochs" in co_kwargs and i >= co_kwargs["co_warmup_epochs"]: co_info_dict = prepare_inspection(co_model, validation_loader, X_valid, y_valid, transform_label=transform_label, **co_kwargs) if "co_loss" in inspect_items: print("\tco_loss: {0:.{1}f}".format(co_loss.item(), inspect_loss_precision), end="") info_dict_step["co_loss"].append(co_loss.item()) if len(co_info_dict) > 0: for item in inspect_items: if item in co_info_dict and item != "co_loss": info_dict_step[item].append(co_info_dict[item]) print(" \t{0}: {1}".format(item, formalize_value(co_info_dict[item], inspect_loss_precision)), end="") print() if k % save_step == 0: if filename is not None: pickle.dump(model.model_dict, open(filename[:-2] + "_model.p", "wb")) if logdir is not None: # Log values and gradients of the parameters (histogram summary) # for tag, value in model.named_parameters(): # tag = tag.replace('.', '/') # logger.log_histogram(tag, to_np_array(value), i) # logger.log_histogram(tag + '/grad', to_np_array(value.grad), i) if logimages is not None: for tag, image_fun in logimages["image_fun"].items(): image = image_fun(model, logimages["X"], logimages["y"]) logger.log_images(tag, image, i) if i % inspect_interval == 0: model.eval() if inspect_items is not None and i % inspect_items_interval == 0 and len(inspect_items_train) > 0: loss_value_train = get_loss(model, train_loader, X, y, criterion = criterion, loss_epoch = i, transform_label=transform_label, **kwargs) info_dict_train = prepare_inspection(model, train_loader, X, y, transform_label=transform_label, **kwargs) loss_value = get_loss(model, validation_loader, X_valid, y_valid, criterion = criterion, loss_epoch = i, transform_label=transform_label, **kwargs) reg_value = get_regularization(model, loss_epoch = i, **kwargs) if scheduler_type is not None: if lr_rampup_steps is None or train_loader is None or (lr_rampup_steps is not None and i * data_size // len(X_batch) + k >= lr_rampup_steps): if scheduler_type == "ReduceLROnPlateau": scheduler.step(loss_value) else: scheduler.step() if callback is not None: assert callable(callback) callback(model = model, X = X_valid, y = y_valid, iteration = i, loss = loss_value, ) if patience is not None: if early_stopping_monitor == "loss": to_stop = early_stopping.monitor(loss_value) else: info_dict = prepare_inspection(model, validation_loader, X_valid, y_valid, transform_label=transform_label, **kwargs) to_stop = early_stopping.monitor(info_dict[early_stopping_monitor]) if inspect_items is not None: if i % inspect_items_interval == 0: # Get loss: print("{}:".format(i), end = "") print("\tlr: {0:.3e}\tloss: {1:.{2}f}".format(optimizer.param_groups[0]["lr"], loss_value, inspect_loss_precision), end = "") info_dict = prepare_inspection(model, validation_loader, X_valid, y_valid, transform_label=transform_label, **kwargs) if len(inspect_items_train) > 0: print("\tloss_tr: {0:.{1}f}".format(loss_value_train, inspect_loss_precision), end = "") info_dict_train = update_key_train(info_dict_train, inspect_items_train) info_dict.update(info_dict_train) if "reg" in inspect_items and "reg_dict" in kwargs and len(kwargs["reg_dict"]) > 0: print("\treg:{0:.{1}f}".format(to_np_array(reg_value), inspect_loss_precision), end="") # Print and record: if len(info_dict) > 0: for item in inspect_items: if item + "_val" in info_dict: print(" \t{0}: {1}".format(item, formalize_value(info_dict[item + "_val"], inspect_loss_precision)), end = "") if item in record_keys and item not in ["loss", "reg"]: record_data(data_record, [to_np_array(info_dict[item + "_val"])], [item]) # logger: if logdir is not None: for item in inspect_items: if item + "_val" in info_dict: logger.log_scalar(item + "_val", info_dict[item + "_val"], i) # Co_model: if co_kwargs is not None: co_loss_value = get_loss(co_model, validation_loader, X_valid, y_valid, criterion = criterion, loss_epoch = i, transform_label=transform_label, **co_kwargs) co_info_dict = prepare_inspection(co_model, validation_loader, X_valid, y_valid, transform_label=transform_label, **co_kwargs) if "co_loss" in inspect_items: print("\tco_loss: {0:.{1}f}".format(co_loss_value, inspect_loss_precision), end="") if len(co_info_dict) > 0: for item in inspect_items: if item + "_val" in co_info_dict: print(" \t{0}: {1}".format(item, formalize_value(co_info_dict[item + "_val"], inspect_loss_precision)), end="") if item in record_keys and item != "co_loss": record_data(data_record, [to_np_array(co_info_dict[item + "_val"])], [item]) if "co_loss" in record_keys: record_data(data_record, [co_loss_value], ["co_loss"]) # Training metrics: if inspect_step is not None: for item in info_dict_step: if len(info_dict_step[item]) > 0: print(" \t{0}_s: {1}".format(item, formalize_value(np.mean(info_dict_step[item]), inspect_loss_precision)), end = "") if item in record_keys and item != "loss": record_data(data_record, [np.mean(info_dict_step[item])], ["{}_s".format(item)]) # Record loss: if "loss" in record_keys: record_data(data_record, [i, loss_value], ["iter", "loss"]) if "reg" in record_keys and "reg_dict" in kwargs and len(kwargs["reg_dict"]) > 0: record_data(data_record, [reg_value], ["reg"]) if "param" in record_keys: record_data(data_record, [model.get_weights_bias(W_source = "core", b_source = "core")], ["param"]) if "param_grad" in record_keys: record_data(data_record, [model.get_weights_bias(W_source = "core", b_source = "core", is_grad = True)], ["param_grad"]) print("\n") try: sys.stdout.flush() except: pass if isplot: if inspect_image_interval is not None and hasattr(model, "plot"): if i % inspect_image_interval == 0: if gradient_noise is not None: print("gradient_noise: {0:.9f}".format(current_gradient_noise_scale)) plot_model(model, data_loader = validation_loader, X = X_valid, y = y_valid, transform_label=transform_label, data_loader_apply=data_loader_apply) if co_kwargs is not None and "inspect_image_interval" in co_kwargs and co_kwargs["inspect_image_interval"] and hasattr(co_model, "plot"): if i % co_kwargs["inspect_image_interval"] == 0: plot_model(co_model, data_loader = validation_loader, X = X_valid, y = y_valid, transform_label=transform_label, data_loader_apply=data_loader_apply) if save_interval is not None: if i % save_interval == 0: record_data(data_record, [model.model_dict], ["model_dict"]) if co_kwargs is not None: record_data(data_record, [co_model.model_dict], ["co_model_dict"]) if filename is not None: pickle.dump(data_record, open(filename, "wb")) if to_stop: break loss_value = get_loss(model, validation_loader, X_valid, y_valid, criterion=criterion, loss_epoch=epochs, transform_label=transform_label, **kwargs) if isplot: import matplotlib.pylab as plt for key, item in data_record.items(): if isinstance(item, Number) or len(data_record["iter"]) != len(item): continue if key not in ["iter", "model_dict"]: if key in ["accuracy"]: plt.figure(figsize = (8,6)) plt.plot(data_record["iter"], data_record[key]) plt.xlabel("epoch") plt.ylabel(key) plt.title(key) plt.show() else: plt.figure(figsize = (8,6)) plt.semilogy(data_record["iter"], data_record[key]) plt.xlabel("epoch") plt.ylabel(key) plt.title(key) plt.show() return loss_original, loss_value, data_record def train_simple(model, X, y, validation_data = None, inspect_interval = 5, **kwargs): """minimal version of training. "model" can be a single model or a ordered list of models""" def get_regularization(model, **kwargs): reg_dict = kwargs["reg_dict"] if "reg_dict" in kwargs else None reg = to_Variable([0], is_cuda = X.is_cuda) for model_ele in model: if reg_dict is not None: for reg_type, reg_coeff in reg_dict.items(): reg = reg + model_ele.get_regularization(source = [reg_type], mode = "L1", **kwargs) * reg_coeff return reg if not(isinstance(model, list) or isinstance(model, tuple)): model = [model] epochs = kwargs["epochs"] if "epochs" in kwargs else 2000 lr = kwargs["lr"] if "lr" in kwargs else 5e-3 optim_type = kwargs["optim_type"] if "optim_type" in kwargs else "adam" optim_kwargs = kwargs["optim_kwargs"] if "optim_kwargs" in kwargs else {} loss_type = kwargs["loss_type"] if "loss_type" in kwargs else "mse" early_stopping_epsilon = kwargs["early_stopping_epsilon"] if "early_stopping_epsilon" in kwargs else 0 patience = kwargs["patience"] if "patience" in kwargs else 40 record_keys = kwargs["record_keys"] if "record_keys" in kwargs else ["loss", "mse", "data_DL", "model_DL"] scheduler_type = kwargs["scheduler_type"] if "scheduler_type" in kwargs else "ReduceLROnPlateau" loss_precision_floor = kwargs["loss_precision_floor"] if "loss_precision_floor" in kwargs else PrecisionFloorLoss autoencoder = kwargs["autoencoder"] if "autoencoder" in kwargs else None data_record = {key: [] for key in record_keys} isplot = kwargs["isplot"] if "isplot" in kwargs else False if patience is not None: early_stopping = Early_Stopping(patience = patience, epsilon = early_stopping_epsilon) if validation_data is not None: X_valid, y_valid = validation_data else: X_valid, y_valid = X, y # Get original loss: criterion = get_criterion(loss_type, loss_precision_floor = loss_precision_floor) DL_criterion = Loss_Fun(core = "DLs", loss_precision_floor = loss_precision_floor, DL_sum = True) DL_criterion_absolute = Loss_Fun(core = "DLs", loss_precision_floor = PrecisionFloorLoss, DL_sum = True) pred_valid = forward(model, X_valid, **kwargs) loss_original = to_np_array(criterion(pred_valid, y_valid)) if "loss" in record_keys: record_data(data_record, [-1, loss_original], ["iter","loss"]) if "mse" in record_keys: record_data(data_record, [to_np_array(nn.MSELoss()(pred_valid, y_valid))], ["mse"]) if "data_DL" in record_keys: record_data(data_record, [to_np_array(DL_criterion(pred_valid, y_valid))], ["data_DL"]) if "data_DL_absolute" in record_keys: record_data(data_record, [to_np_array(DL_criterion_absolute(pred_valid, y_valid))], ["data_DL_absolute"]) if "model_DL" in record_keys: record_data(data_record, [get_model_DL(model)], ["model_DL"]) if "param" in record_keys: record_data(data_record, [model[0].get_weights_bias(W_source = "core", b_source = "core")], ["param"]) if "param_grad" in record_keys: record_data(data_record, [model[0].get_weights_bias(W_source = "core", b_source = "core", is_grad = True)], ["param_grad"]) if "param_collapse_layers" in record_keys: record_data(data_record, [simplify(deepcopy(model[0]), X, y, "collapse_layers", verbose = 0)[0] .get_weights_bias(W_source = "core", b_source = "core")], ["param"]) # Setting up optimizer: parameters = itertools.chain(*[model_ele.parameters() for model_ele in model]) num_params = np.sum([[len(list(model_ele.parameters())) for model_ele in model]]) if num_params == 0: print("No parameters to optimize!") pred_valid = forward(model, X_valid, **kwargs) loss_value = to_np_array(criterion(pred_valid, y_valid)) if "loss" in record_keys: record_data(data_record, [0, loss_value], ["iter", "loss"]) if "mse" in record_keys: record_data(data_record, [to_np_array(nn.MSELoss()(pred_valid, y_valid))], ["mse"]) if "data_DL" in record_keys: record_data(data_record, [to_np_array(DL_criterion(pred_valid, y_valid))], ["data_DL"]) if "data_DL_absolute" in record_keys: record_data(data_record, [to_np_array(DL_criterion_absolute(pred_valid, y_valid))], ["data_DL_absolute"]) if "model_DL" in record_keys: record_data(data_record, [get_model_DL(model)], ["model_DL"]) if "param" in record_keys: record_data(data_record, [model[0].get_weights_bias(W_source = "core", b_source = "core")], ["param"]) if "param_grad" in record_keys: record_data(data_record, [model[0].get_weights_bias(W_source = "core", b_source = "core", is_grad = True)], ["param_grad"]) if "param_collapse_layers" in record_keys: record_data(data_record, [simplify(deepcopy(model[0]), X, y, "collapse_layers", verbose = 0)[0] .get_weights_bias(W_source = "core", b_source = "core")], ["param"]) return loss_original, loss_value, data_record optimizer = get_optimizer(optim_type, lr, parameters, **optim_kwargs) # Set up learning rate scheduler: if scheduler_type is not None: if scheduler_type == "ReduceLROnPlateau": scheduler_patience = kwargs["scheduler_patience"] if "scheduler_patience" in kwargs else 10 scheduler_factor = kwargs["scheduler_factor"] if "scheduler_factor" in kwargs else 0.1 scheduler = ReduceLROnPlateau(optimizer, factor = scheduler_factor, patience = scheduler_patience) elif scheduler_type == "LambdaLR": scheduler_lr_lambda = kwargs["scheduler_lr_lambda"] if "scheduler_lr_lambda" in kwargs else (lambda epoch: 1 / (1 + 0.01 * epoch)) scheduler = LambdaLR(optimizer, lr_lambda = scheduler_lr_lambda) else: raise # Training: to_stop = False for i in range(epochs + 1): if optim_type != "LBFGS": optimizer.zero_grad() pred = forward(model, X, **kwargs) reg = get_regularization(model, **kwargs) loss = criterion(pred, y) + reg loss.backward() optimizer.step() else: # "LBFGS" is a second-order optimization algorithm that requires a slightly different procedure: def closure(): optimizer.zero_grad() pred = forward(model, X, **kwargs) reg = get_regularization(model, **kwargs) loss = criterion(pred, y) + reg loss.backward() return loss optimizer.step(closure) if i % inspect_interval == 0: pred_valid = forward(model, X_valid, **kwargs) loss_value = to_np_array(criterion(pred_valid, y_valid)) if scheduler_type is not None: if scheduler_type == "ReduceLROnPlateau": scheduler.step(loss_value) else: scheduler.step() if "loss" in record_keys: record_data(data_record, [i, loss_value], ["iter", "loss"]) if "mse" in record_keys: record_data(data_record, [to_np_array(nn.MSELoss()(pred_valid, y_valid))], ["mse"]) if "data_DL" in record_keys: record_data(data_record, [to_np_array(DL_criterion(pred_valid, y_valid))], ["data_DL"]) if "data_DL_absolute" in record_keys: record_data(data_record, [to_np_array(DL_criterion_absolute(pred_valid, y_valid))], ["data_DL_absolute"]) if "model_DL" in record_keys: record_data(data_record, [get_model_DL(model)], ["model_DL"]) if "param" in record_keys: record_data(data_record, [model[0].get_weights_bias(W_source = "core", b_source = "core")], ["param"]) if "param_grad" in record_keys: record_data(data_record, [model[0].get_weights_bias(W_source = "core", b_source = "core", is_grad = True)], ["param_grad"]) if "param_collapse_layers" in record_keys: record_data(data_record, [simplify(deepcopy(model[0]), X, y, "collapse_layers", verbose = 0)[0] .get_weights_bias(W_source = "core", b_source = "core")], ["param"]) if patience is not None: to_stop = early_stopping.monitor(loss_value) if to_stop: break pred_valid = forward(model, X_valid, **kwargs) loss_value = to_np_array(criterion(pred_valid, y_valid)) if isplot: import matplotlib.pylab as plt if "mse" in data_record: plt.semilogy(data_record["iter"], data_record["mse"]) plt.xlabel("epochs") plt.title("MSE") plt.show() if "loss" in data_record: plt.plot(data_record["iter"], data_record["loss"]) plt.xlabel("epochs") plt.title("Loss") plt.show() return loss_original, loss_value, data_record def load_model_dict_net(model_dict, is_cuda = False): net_type = model_dict["type"] if net_type.startswith("MLP"): return MLP(input_size = model_dict["input_size"], struct_param = model_dict["struct_param"] if "struct_param" in model_dict else None, W_init_list = model_dict["weights"] if "weights" in model_dict else None, b_init_list = model_dict["bias"] if "bias" in model_dict else None, settings = model_dict["settings"] if "settings" in model_dict else {}, is_cuda = is_cuda, ) elif net_type == "Labelmix_MLP": model = Labelmix_MLP(input_size=model_dict["input_size"], struct_param=model_dict["struct_param"], idx_label=model_dict["idx_label"] if "idx_label" in model_dict else None, is_cuda=is_cuda, ) if "state_dict" in model_dict: model.load_state_dict(model_dict["state_dict"]) return model elif net_type == "Multi_MLP": return Multi_MLP(input_size = model_dict["input_size"], struct_param = model_dict["struct_param"], W_init_list = model_dict["weights"] if "weights" in model_dict else None, b_init_list = model_dict["bias"] if "bias" in model_dict else None, settings = model_dict["settings"] if "settings" in model_dict else {}, is_cuda = is_cuda, ) elif net_type == "Branching_Net": return Branching_Net(net_base_model_dict = model_dict["net_base_model_dict"], net_1_model_dict = model_dict["net_1_model_dict"], net_2_model_dict = model_dict["net_2_model_dict"], is_cuda = is_cuda, ) elif net_type == "Fan_in_MLP": return Fan_in_MLP(model_dict_branch1=model_dict["model_dict_branch1"], model_dict_branch2=model_dict["model_dict_branch2"], model_dict_joint=model_dict["model_dict_joint"], is_cuda=is_cuda, ) elif net_type == "Net_reparam": return Net_reparam(model_dict=model_dict["model"], reparam_mode=model_dict["reparam_mode"], is_cuda=is_cuda, ) elif net_type == "Wide_ResNet": model = Wide_ResNet(depth=model_dict["depth"], widen_factor=model_dict["widen_factor"], input_channels=model_dict["input_channels"], output_size=model_dict["output_size"], dropout_rate=model_dict["dropout_rate"], is_cuda=is_cuda, ) if "state_dict" in model_dict: model.load_state_dict(model_dict["state_dict"]) return model elif net_type.startswith("ConvNet"): return ConvNet(input_channels = model_dict["input_channels"], struct_param = model_dict["struct_param"], W_init_list = model_dict["weights"] if "weights" in model_dict else None, b_init_list = model_dict["bias"] if "bias" in model_dict else None, settings = model_dict["settings"] if "settings" in model_dict else {}, return_indices = model_dict["return_indices"] if "return_indices" in model_dict else False, is_cuda = is_cuda, ) elif net_type == "Conv_Autoencoder": model = Conv_Autoencoder(input_channels_encoder = model_dict["input_channels_encoder"], input_channels_decoder = model_dict["input_channels_decoder"], struct_param_encoder = model_dict["struct_param_encoder"], struct_param_decoder = model_dict["struct_param_decoder"], settings = model_dict["settings"], is_cuda = is_cuda, ) if "encoder" in model_dict: model.encoder.load_model_dict(model_dict["encoder"]) if "decoder" in model_dict: model.decoder.load_model_dict(model_dict["decoder"]) return model elif model_dict["type"] == "Conv_Model": is_generative = model_dict["is_generative"] if "is_generative" in model_dict else False return Conv_Model(encoder_model_dict = model_dict["encoder_model_dict"] if not is_generative else None, core_model_dict = model_dict["core_model_dict"], decoder_model_dict = model_dict["decoder_model_dict"], latent_size = model_dict["latent_size"], is_generative = model_dict["is_generative"] if is_generative else False, is_res_block = model_dict["is_res_block"] if "is_res_block" in model_dict else False, is_cuda = is_cuda, ) else: raise Exception("net_type {} not recognized!".format(net_type)) def load_model_dict(model_dict, is_cuda = False): net_type = model_dict["type"] if net_type not in ["Model_Ensemble", "LSTM", "Model_with_Uncertainty", "Mixture_Model", "Mixture_Gaussian"]: return load_model_dict_net(model_dict, is_cuda = is_cuda) elif net_type == "Model_Ensemble": if model_dict["model_type"] == "MLP": model_ensemble = Model_Ensemble( num_models = model_dict["num_models"], input_size = model_dict["input_size"], model_type = model_dict["model_type"], output_size = model_dict["output_size"], is_cuda = is_cuda, # Here we just create some placeholder network. The model will be overwritten in the next steps: struct_param = [[1, "Simple_Layer", {}]], ) elif model_dict["model_type"] == "LSTM": model_ensemble = Model_Ensemble( num_models = model_dict["num_models"], input_size = model_dict["input_size"], model_type = model_dict["model_type"], output_size = model_dict["output_size"], is_cuda = is_cuda, # Here we just create some placeholder network. The model will be overwritten in the next steps: hidden_size = 3, output_struct_param = [[1, "Simple_Layer", {}]], ) else: raise for k in range(model_ensemble.num_models): setattr(model_ensemble, "model_{}".format(k), load_model_dict(model_dict["model_{}".format(k)], is_cuda = is_cuda)) return model_ensemble elif net_type == "Model_with_Uncertainty": return Model_with_Uncertainty(model_pred = load_model_dict(model_dict["model_pred"], is_cuda = is_cuda), model_logstd = load_model_dict(model_dict["model_logstd"], is_cuda = is_cuda)) elif net_type == "Mixture_Model": return Mixture_Model(model_dict_list=model_dict["model_dict_list"], weight_logits_model_dict=model_dict["weight_logits_model_dict"], num_components=model_dict["num_components"], is_cuda=is_cuda, ) elif net_type == "Mixture_Gaussian": return load_model_dict_Mixture_Gaussian(model_dict, is_cuda = is_cuda) else: raise Exception("net_type {} not recognized!".format(net_type)) ## Helper functions: def get_accuracy(pred, target): """Get accuracy from prediction and target""" assert len(pred.shape) == len(target.shape) == 1 assert len(pred) == len(target) pred, target = to_np_array(pred, target) accuracy = ((pred == target).sum().astype(float) / len(pred)) return accuracy def flatten(*tensors): """Flatten the tensor except the first dimension""" new_tensors = [] for tensor in tensors: new_tensors.append(tensor.view(tensor.size(0), -1)) if len(new_tensors) == 1: new_tensors = new_tensors[0] return new_tensors def fill_triangular(vec, dim, mode = "lower"): """Fill an lower or upper triangular matrices with given vectors""" num_examples, size = vec.shape assert size == dim * (dim + 1) // 2 matrix = torch.zeros(num_examples, dim, dim).to(vec.device) idx = (torch.tril(torch.ones(dim, dim)) == 1).unsqueeze(0) idx = idx.repeat(num_examples,1,1) if mode == "lower": matrix[idx] = vec.contiguous().view(-1) elif mode == "upper": matrix[idx] = vec.contiguous().view(-1) else: raise Exception("mode {} not recognized!".format(mode)) return matrix def matrix_diag_transform(matrix, fun): """Return the matrices whose diagonal elements have been executed by the function 'fun'.""" num_examples = len(matrix) idx = torch.eye(matrix.size(-1)).bool().unsqueeze(0) idx = idx.repeat(num_examples, 1, 1) new_matrix = matrix.clone() new_matrix[idx] = fun(matrix.diagonal(dim1 = 1, dim2 = 2).contiguous().view(-1)) return new_matrix def Zip(*data, **kwargs): """Recursive unzipping of data structure Example: Zip(*[(('a',2), 1), (('b',3), 2), (('c',3), 3), (('d',2), 4)]) ==> [[['a', 'b', 'c', 'd'], [2, 3, 3, 2]], [1, 2, 3, 4]] Each subtree in the original data must be in the form of a tuple. In the **kwargs, you can set the function that is applied to each fully unzipped subtree. """ import collections function = kwargs["function"] if "function" in kwargs else None if len(data) == 1: return data[0] data = [list(element) for element in zip(*data)] for i, element in enumerate(data): if isinstance(element[0], tuple): data[i] = Zip(*element, **kwargs) elif isinstance(element, list): if function is not None: data[i] = function(element) return data def get_loss(model, data_loader=None, X=None, y=None, criterion=None, transform_label=None, **kwargs): """Get loss using the whole data or data_loader. Return the average validation loss with np.ndarray format""" max_validation_iter = kwargs["max_validation_iter"] if "max_validation_iter" in kwargs else None if transform_label is None: transform_label = Transform_Label() if "loader_process" in kwargs and kwargs["loader_process"] is not None: data_loader = kwargs["loader_process"]("test") if data_loader is not None: assert X is None and y is None loss_record = 0 count = 0 # Taking the average of all metrics: for j, data_batch in enumerate(data_loader): if isinstance(data_batch, tuple) or isinstance(data_batch, list): X_batch, y_batch = data_batch if "data_loader_apply" in kwargs and kwargs["data_loader_apply"] is not None: X_batch, y_batch = kwargs["data_loader_apply"](X_batch, y_batch) else: X_batch, y_batch = kwargs["data_loader_apply"](data_batch) loss_ele = to_np_array(model.get_loss(X_batch, transform_label(y_batch), criterion = criterion, **kwargs)) if j == 0: all_info_dict = {key: 0 for key in model.info_dict.keys()} loss_record = loss_record + loss_ele count += 1 for key in model.info_dict: all_info_dict[key] = all_info_dict[key] + model.info_dict[key] if max_validation_iter is not None and count > max_validation_iter: break for key in model.info_dict: all_info_dict[key] = all_info_dict[key] / count loss = loss_record / count model.info_dict = deepcopy(all_info_dict) else: assert X is not None and y is not None loss = to_np_array(model.get_loss(X, transform_label(y), criterion = criterion, **kwargs)) return loss def plot_model(model, data_loader=None, X=None, y=None, transform_label=None, **kwargs): data_loader_apply = kwargs["data_loader_apply"] if "data_loader_apply" in kwargs else None max_validation_iter = kwargs["max_validation_iter"] if "max_validation_iter" in kwargs else None if transform_label is None: transform_label = Transform_Label() if "loader_process" in kwargs and kwargs["loader_process"] is not None: data_loader = kwargs["loader_process"]("test") if data_loader is not None: assert X is None and y is None X_all = [] y_all = [] for i, data_batch in enumerate(data_loader): if isinstance(data_batch, tuple) or isinstance(data_batch, list): X_batch, y_batch = data_batch if data_loader_apply is not None: X_batch, y_batch = data_loader_apply(X_batch, y_batch) else: X_batch, y_batch = data_loader_apply(data_batch) X_all.append(X_batch) y_all.append(y_batch) if max_validation_iter is not None and i >= max_validation_iter: break if not isinstance(X_all[0], torch.Tensor): X_all = Zip(*X_all, function = torch.cat) else: X_all = torch.cat(X_all, 0) y_all = torch.cat(y_all) model.plot(X_all, transform_label(y_all)) else: assert X is not None and y is not None model.plot(X, transform_label(y)) def prepare_inspection(model, data_loader=None, X=None, y=None, transform_label=None, **kwargs): inspect_functions = kwargs["inspect_functions"] if "inspect_functions" in kwargs else None max_validation_iter = kwargs["max_validation_iter"] if "max_validation_iter" in kwargs else None verbose = kwargs["verbose"] if "verbose" in kwargs else False if transform_label is None: transform_label = Transform_Label() if "loader_process" in kwargs and kwargs["loader_process"] is not None: data_loader = kwargs["loader_process"]("test") if data_loader is None: assert X is not None and y is not None all_dict_summary = model.prepare_inspection(X, transform_label(y), **kwargs) if inspect_functions is not None: for inspect_function_key, inspect_function in inspect_functions.items(): all_dict_summary[inspect_function_key] = inspect_function(model, X, y, **kwargs) else: assert X is None and y is None all_dict = {} for j, data_batch in enumerate(data_loader): if verbose is True: print("valid step: {}".format(j)) if isinstance(data_batch, tuple) or isinstance(data_batch, list): X_batch, y_batch = data_batch if "data_loader_apply" in kwargs and kwargs["data_loader_apply"] is not None: X_batch, y_batch = kwargs["data_loader_apply"](X_batch, y_batch) else: X_batch, y_batch = kwargs["data_loader_apply"](data_batch) info_dict = model.prepare_inspection(X_batch, transform_label(y_batch), valid_step=j, **kwargs) for key, item in info_dict.items(): if key not in all_dict: all_dict[key] = [item] else: all_dict[key].append(item) if inspect_functions is not None: for inspect_function_key, inspect_function in inspect_functions.items(): inspect_function_result = inspect_function(model, X_batch, transform_label(y_batch), **kwargs) if inspect_function_key not in all_dict: all_dict[inspect_function_key] = [inspect_function_result] else: all_dict[inspect_function_key].append(inspect_function_result) if max_validation_iter is not None and j >= max_validation_iter: break all_dict_summary = {} for key, item in all_dict.items(): all_dict_summary[key + "_val"] = np.mean(all_dict[key]) return all_dict_summary def get_inspect_items_train(inspect_items): if inspect_items is None: return [] inspect_items_train = [] for item in inspect_items: if item.endswith("_tr"): inspect_items_train.append("_".join(item.split("_")[:-1])) return inspect_items_train def update_key_train(info_dict_train, inspect_items_train): info_dict_train_new = {} for key, item in info_dict_train.items(): if key in inspect_items_train: info_dict_train_new[key + "_tr"] = item return deepcopy(info_dict_train_new) # ## Simplifying functionality: # In[ ]: def simplify(model, X=None, y=None, mode="full", isplot=False, target_name=None, validation_data=None, **kwargs): """Simplify a neural network model in various ways. "model" can be a single model or a ordered list of models""" verbose = kwargs["verbose"] if "verbose" in kwargs else 1 if validation_data is None: X_valid, y_valid = X, y else: X_valid, y_valid = validation_data simplify_criteria = kwargs["simplify_criteria"] if "simplify_criteria" in kwargs else ("DLs", 0.05, 3, "relative") # the first argument choose from "DL", "loss" simplify_epsilon = simplify_criteria[1] simplify_patience = simplify_criteria[2] simplify_compare_mode = simplify_criteria[3] performance_monitor = Performance_Monitor(patience = simplify_patience, epsilon = simplify_epsilon, compare_mode = simplify_compare_mode) record_keys = kwargs["record_keys"] if "record_keys" in kwargs else ["mse"] loss_precision_floor = kwargs["loss_precision_floor"] if "loss_precision_floor" in kwargs else PrecisionFloorLoss if X is not None: if y is None: y = Variable(forward(model, X, **kwargs).data, requires_grad = False) if not (isinstance(model, list) or isinstance(model, tuple)): model = [model] is_list = False else: is_list = True if mode == "full": mode = ["collapse_layers", "snap"] if not isinstance(mode, list): mode = [mode] # Obtain the original loss and setup criterion: loss_type = kwargs["loss_type"] if "loss_type" in kwargs else "mse" criterion = get_criterion(loss_type, loss_precision_floor = loss_precision_floor) DL_criterion = Loss_Fun(core = "DLs", loss_precision_floor = loss_precision_floor, DL_sum = True) loss_dict = OrderedDict() for mode_ele in mode: if verbose >= 1: print("\n" + "=" * 48 + "\nSimplifying mode: {}".format(mode_ele), end = "") if mode_ele == "snap": snap_mode = kwargs["snap_mode"] if "snap_mode" in kwargs else "integer" print(" {}".format(snap_mode), end = "") if target_name is not None: print(" for {}".format(target_name)) else: print() print("=" * 48) # Record the loss before simplification: if X is not None: pred_valid = forward(model, X_valid, **kwargs) loss_original = to_np_array(criterion(pred_valid, y_valid)) loss_list = [loss_original] if verbose >= 1: print("original_loss: {}".format(loss_original)) mse_record_whole = [to_np_array(nn.MSELoss()(pred_valid, y_valid))] data_DL_whole = [to_np_array(DL_criterion(pred_valid, y_valid))] model_DL_whole = [get_model_DL(model)] event_list = ["before simplification"] iter_end_whole = [1] is_accept_whole = [] if "param" in record_keys: param_record_whole = [model[0].get_weights_bias(W_source = "core", b_source = "core")] if "param_grad" in record_keys: param_grad_record_whole = [model[0].get_weights_bias(W_source = "core", b_source = "core", is_grad = True)] # Begin simplification: if mode_ele == "collapse_layers": all_collapse_dict = {} for model_id, model_ele in enumerate(model): # Obtain activations for each layer: activation_list = [] for k in range(len(model_ele.struct_param)): if "activation" in model_ele.struct_param[k][2]: activation_list.append(model_ele.struct_param[k][2]["activation"]) elif "activation" in model_ele.settings: activation_list.append(model_ele.settings["activation"]) else: activation_list.append("default") # Build the collapse_list that stipulates which layers to collapse: collapse_dict = {} current_start = None current_layer_type = None for k, activation in enumerate(activation_list): if activation == "linear" and k != len(activation_list) - 1: if k not in collapse_dict and current_start is None: # Create a new bunch: if model_ele.struct_param[k + 1][1] == model_ele.struct_param[k][1]: # The current layer must have the same layer_type as the next layer current_start = k collapse_dict[current_start] = [k] current_layer_type = model_ele.struct_param[k][1] else: # Adding to current bunch: if model_ele.struct_param[k + 1][1] == model_ele.struct_param[k][1] == current_layer_type: collapse_dict[current_start].append(k) else: collapse_dict[current_start].append(k) current_start = None else: if current_start is not None: collapse_dict[current_start].append(k) current_start = None # Build new layer: new_layer_info = {} for current_start, layer_ids in collapse_dict.items(): for i, layer_id in enumerate(layer_ids): layer = getattr(model_ele, "layer_{}".format(layer_id)) if i == 0: W_accum = layer.W_core b_accum = layer.b_core else: W_accum = torch.matmul(W_accum, layer.W_core) b_accum = torch.matmul(b_accum, layer.W_core) + layer.b_core if model_ele.is_cuda: W_accum = W_accum.cpu() b_accum = b_accum.cpu() last_layer_id = collapse_dict[current_start][-1] new_layer_info[current_start] = {"W_init": W_accum.data.numpy(), "b_init": b_accum.data.numpy(), "layer_struct_param": [b_accum.size(0), model_ele.struct_param[last_layer_id][1], deepcopy(model_ele.struct_param[last_layer_id][2])], } new_layer_info[current_start].pop("snap_dict", None) if verbose >= 1: print("model_id {}, layers collapsed: {}".format(model_id, collapse_dict)) # Rebuild the Net: if len(collapse_dict) > 0: all_collapse_dict[model_id] = {"collapse_dict": collapse_dict, "new_layer_info": new_layer_info, "collapse_layer_ids": [idx for item in collapse_dict.values() for idx in item], } # Rebuild the list of models: if len(all_collapse_dict) > 0: model_new = [] for model_id, model_ele in enumerate(model): if model_id in all_collapse_dict: W_list, b_list = model_ele.get_weights_bias(W_source = "core", b_source = "core") W_init_list = [] b_init_list = [] struct_param = [] for k in range(len(model_ele.struct_param)): if k not in all_collapse_dict[model_id]["collapse_layer_ids"]: struct_param.append(model_ele.struct_param[k]) W_init_list.append(W_list[k]) b_init_list.append(b_list[k]) else: if k in all_collapse_dict[model_id]["collapse_dict"].keys(): struct_param.append(all_collapse_dict[model_id]["new_layer_info"][k]["layer_struct_param"]) W_init_list.append(all_collapse_dict[model_id]["new_layer_info"][k]["W_init"]) b_init_list.append(all_collapse_dict[model_id]["new_layer_info"][k]["b_init"]) model_ele_new = MLP(input_size = model_ele.input_size, struct_param = struct_param, W_init_list = W_init_list, b_init_list = b_init_list, settings = model_ele.settings, is_cuda = model_ele.is_cuda, ) else: model_ele_new = model_ele model_new.append(model_ele_new) model = model_new # Calculate the loss again: pred_valid = forward(model, X_valid, **kwargs) loss_new = to_np_array(criterion(pred_valid, y_valid)) if verbose >= 1: print("after collapsing linear layers in all models, new loss {}".format(loss_new)) loss_list.append(loss_new) mse_record_whole.append(to_np_array(nn.MSELoss()(pred_valid, y_valid))) data_DL_whole.append(to_np_array(DL_criterion(pred_valid, y_valid))) model_DL_whole.append(get_model_DL(model)) if "param" in record_keys: param_record_whole.append(model[0].get_weights_bias(W_source = "core", b_source = "core")) if "param_grad" in record_keys: param_grad_record_whole.append(model[0].get_weights_bias(W_source = "core", b_source = "core", is_grad = True)) iter_end_whole.append(1) event_list.append({mode_ele: all_collapse_dict}) elif mode_ele in ["local", "snap"]: # 'local': greedily try reducing the input dimension by removing input dimension from the beginning; # 'snap': greedily snap each float parameter into an integer or rational number. Set argument 'snap_mode' == 'integer' or 'rational'. if mode_ele == "snap": target_params = [[(model_id, layer_id), "snap"] for model_id, model_ele in enumerate(model) for layer_id in range(len(model_ele.struct_param))] elif mode_ele == "local": for model_id, model_ele in enumerate(model): if len(model_ele.struct_param) > 0: first_model_id = model_id break first_layer = getattr(model[first_model_id], "layer_0") target_params = [[(first_model_id, 0), [[(("weight", (i, j)), 0.) for j in range(first_layer.output_size)] for i in range(first_layer.input_size)]]] else: raise excluded_idx_dict = {item[0]: [] for item in target_params} target_layer_ids_exclude = [] for (model_id, layer_id), target_list in target_params: layer = getattr(model[model_id], "layer_{}".format(layer_id)) if isinstance(target_list, list): max_passes = len(target_list) elif target_list == "snap": max_passes = (layer.input_size + 1) * layer.output_size if "max_passes" in kwargs: max_passes = min(max_passes, kwargs["max_passes"]) else: raise Exception("target_list {} not recognizable!".format(target_list)) if verbose >= 2: print("\n****starting model:****") model[model_id].get_weights_bias(W_source = "core", b_source = "core", verbose = True) print("********\n" ) performance_monitor.reset() criteria_value, criteria_result = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs) to_stop, pivot_dict, log, _, pivot_id = performance_monitor.monitor(criteria_value, model_dict = model[model_id].model_dict, criteria_result = criteria_result) for i in range(max_passes): # Perform tentative simplification if isinstance(target_list, list): info = layer.simplify(mode = "snap", excluded_idx = excluded_idx_dict[(model_id, layer_id)], snap_targets = target_list[i], **kwargs) else: info = layer.simplify(mode = "snap", excluded_idx = excluded_idx_dict[(model_id, layer_id)], **kwargs) if len(info) == 0: target_layer_ids_exclude.append((model_id, layer_id)) print("Pass {0}, (model {1}, layer {2}) has no parameters to snap. Revert to pivot model. Go to next layer".format(i, model_id, layer_id)) break excluded_idx_dict[(model_id, layer_id)] = excluded_idx_dict[(model_id, layer_id)] + info _, loss_new, data_record = train_simple(model, X, y, optim_type = "adam", validation_data = validation_data, **kwargs) if verbose >= 2: print("=" * 8) model[model_id].get_weights_bias(W_source = "core", b_source = "core", verbose = True) criteria_value, criteria_result = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs) to_stop, pivot_dict, log, is_accept, pivot_id = performance_monitor.monitor(criteria_value, model_dict = model[model_id].model_dict, criteria_result = criteria_result) is_accept_whole.append(is_accept) if is_accept: print('[Accepted] as pivot model!') print() # Check if the criterion after simplification and refit is worse. If it is worse than the simplify_epsilon, revert: if to_stop: target_layer_ids_exclude.append((model_id, layer_id)) if verbose >= 1: print("Pass {0}, loss: {1}\tDL: {2}. New snap {3} is do not improve by {4} = {5} for {6} steps. Revert the simplification to pivot model. Go to next layer.".format( i, view_item(log, ("criteria_result", "loss")), view_item(log, ("criteria_result", "DL")), info, simplify_criteria[0], simplify_epsilon, simplify_patience)) break mse_record_whole += data_record["mse"] data_DL_whole += data_record["data_DL"] model_DL_whole += data_record["model_DL"] if "param" in record_keys: param_record_whole += data_record["param"] if "param_grad" in record_keys: param_grad_record_whole += data_record["param_grad"] iter_end_whole.append(len(data_record["mse"])) model[model_id].reset_layer(layer_id, layer) loss_list.append(loss_new) event_list.append({mode_ele: ((model_id, layer_id), info)}) if verbose >= 1: print("Pass {0}, snap (model {1}, layer {2}), snap {3}. \tloss: {4}\tDL: {5}".format( i, model_id, layer_id, info, view_item(log, ("criteria_result", "loss")), view_item(log, ("criteria_result", "DL")))) # Update the whole model's struct_param and snap_dict: model[model_id].load_model_dict(pivot_dict["model_dict"]) model[model_id].synchronize_settings() if verbose >= 2: print("\n****pivot model at {}th transformation:****".format(pivot_id)) model[model_id].get_weights_bias(W_source = "core", b_source = "core", verbose = True) print("********\n" ) elif mode_ele == "pair_snap": model_new = [] for model_id, model_ele in enumerate(model): for layer_id, layer_struct_param in enumerate(model_ele.struct_param): if layer_struct_param[1] == "Symbolic_Layer": layer = getattr(model_ele, "layer_{}".format(layer_id)) max_passes = len(layer.get_param_dict()) - 1 if "max_passes" in kwargs: max_passes = min(max_passes, kwargs["max_passes"]) if verbose > 1: print("original:") print("symbolic_expression: ", layer.symbolic_expression) print("numerical_expression: ", layer.numerical_expression) print() performance_monitor.reset() criteria_value, criteria_result = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs) to_stop, pivot_dict, log, _, pivot_id = performance_monitor.monitor(criteria_value, model_dict = model[model_id].model_dict, criteria_result = criteria_result) for i in range(max_passes): info = layer.simplify(mode = "pair_snap", **kwargs) if len(info) == 0: target_layer_ids_exclude.append((model_id, layer_id)) print("Pass {0}, (model {1}, layer {2}) has no parameters to pair_snap. Revert to pivot model. Go to next layer".format(i, model_id, layer_id)) break _, loss, data_record = train_simple(model, X, y, optim_type = "adam", epochs = 1000, validation_data = validation_data, **kwargs) criteria_value, criteria_result = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs) to_stop, pivot_dict, log, is_accept, pivot_id = performance_monitor.monitor(criteria_value, model_dict = model[model_id].model_dict, criteria_result = criteria_result) is_accept_whole.append(is_accept) if to_stop: if verbose >= 1: print("\nPass {0}, loss: {1}\tDL: {2}. New snap {3} is do not improve by {4} = {5} for {6} steps. Revert the simplification to pivot model. Go to next layer.".format( i, view_item(log, ("criteria_result", "loss")), view_item(log, ("criteria_result", "DL")), info, simplify_criteria[0], simplify_epsilon, simplify_patience)) break mse_record_whole += data_record["mse"] data_DL_whole += data_record["data_DL"] model_DL_whole += data_record["model_DL"] if "param" in record_keys: param_record_whole += data_record["param"] if "param_grad" in record_keys: param_grad_record_whole += data_record["param_grad"] iter_end_whole.append(len(data_record["mse"])) model[model_id].reset_layer(layer_id, layer) loss_list.append(loss) event_list.append({mode_ele: ((model_id, layer_id), info)}) if verbose >= 1: print("\nPass {0}, snap (model {1}, layer {2}), snap {3}. \tloss: {4}\tDL: {5}".format( i, model_id, layer_id, info, view_item(log, ("criteria_result", "loss")), view_item(log, ("criteria_result", "DL")))) print("symbolic_expression: ", layer.symbolic_expression) print("numerical_expression: ", layer.numerical_expression) print() model[model_id].load_model_dict(pivot_dict["model_dict"]) print("final: \nsymbolic_expression: ", getattr(model[model_id], "layer_{0}".format(layer_id)).symbolic_expression) print("numerical_expression: ", getattr(model[model_id], "layer_{0}".format(layer_id)).numerical_expression) print() elif mode_ele[:11] == "to_symbolic": from sympy import Symbol force_simplification = kwargs["force_simplification"] if "force_simplification" in kwargs else False is_multi_model = True if len(model) > 1 else False for model_id, model_ele in enumerate(model): for layer_id, layer_struct_param in enumerate(model_ele.struct_param): prefix = "L{}_".format(layer_id) if layer_struct_param[1] == "Simple_Layer": # Obtain loss before simplification: layer = getattr(model_ele, "layer_{}".format(layer_id)) if X is not None: criteria_prev, criteria_result_prev = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs) if mode_ele.split("_")[-1] == "separable": new_layer = Simple_2_Symbolic(layer, settings = model_ele.settings, mode = "separable", prefix = prefix) else: new_layer = Simple_2_Symbolic(layer, settings = model_ele.settings, prefix = prefix) model[model_id].reset_layer(layer_id, new_layer) if "snap_dict" in model_ele.settings and layer_id in model_ele.settings["snap_dict"]: subs_targets = [] for (pos, true_idx), item in model_ele.settings["snap_dict"][layer_id].items(): if pos == "weight": subs_targets.append((Symbol("W{0}{1}".format(true_idx[0], true_idx[1])), item["new_value"])) elif pos == "bias": subs_targets.append((Symbol("b{}".format(true_idx)), item["new_value"])) else: raise Exception("pos {} not recognized!".format(pos)) new_expression = [expression.subs(subs_targets) for expression in new_layer.symbolic_expression] new_layer.set_symbolic_expression(new_expression) model_ele.settings["snap_dict"].pop(layer_id) model_ele.struct_param[layer_id][2].update(new_layer.struct_param[2]) # Calculate the loss again: if X is not None: criteria_new, criteria_result_new = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs) if verbose >= 1: print("Prev_loss: {0}, new loss: {1}\tprev_DL: {2:.9f}, new DL: {3:.9f}".format( criteria_result_prev["loss"], criteria_result_new["loss"], criteria_result_prev["DL"], criteria_result_new["DL"])) print() if criteria_new > criteria_prev * (1 + 0.05): print("to_symbolic DL increase more than 5%! ", end = "") if not force_simplification: print("Reset layer.") model[model_id].reset_layer(layer_id, layer) else: print("Nevertheless, force simplification.") loss_list.append(criteria_result_new["loss"]) print("{0} succeed. Prev_loss: {1}\tnew_loss: {2}\tprev_DL: {3:.9f}, new_DL: {4:.9f}".format( mode_ele, criteria_result_prev["loss"], criteria_result_new["loss"], criteria_result_prev["DL"], criteria_result_new["DL"])) else: print("{0} succeed.".format(mode_ele)) event_list.append({mode_ele: (model_id, layer_id)}) elif layer_struct_param[1] == "Sneuron_Layer": # Obtain loss before simplification: layer = getattr(model_ele, "layer_{0}".format(layer_id)) criteria_prev, criteria_result_prev = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs) new_layer = Sneuron_2_Symbolic(layer, prefix = prefix) model[model_id].reset_layer(layer_id, new_layer) # Calculate the loss again: criteria_new, criteria_result_new = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs) if verbose >= 1: print("Prev_loss: {0}, new loss: {1}\tprev_DL: {2:.9f}, new DL: {3:.9f}".format( criteria_result_prev["loss"], criteria_result_new["loss"], criteria_result_prev["DL"], criteria_result_new["DL"])) print() if criteria_new > criteria_prev * (1 + 0.05): print("to_symbolic DL increase more than 5%! ", end = "") if not force_simplification: print("Reset layer.") model[model_id].reset_layer(layer_id, layer) else: print("Nevertheless, force simplification.") loss_list.append(criteria_result_new["loss"]) event_list.append({mode_ele: (model_id, layer_id)}) print("{0} succeed. Prev_loss: {1}\tnew_loss: {2}\tprev_DL: {3:.9f}, new_DL: {4:.9f}".format( mode_ele, criteria_result_prev["loss"], criteria_result_new["loss"], criteria_result_prev["DL"], criteria_result_new["DL"])) if X is not None: mse_record_whole.append(to_np_array(nn.MSELoss()(pred_valid, y_valid))) data_DL_whole.append(to_np_array(DL_criterion(pred_valid, y_valid))) model_DL_whole.append(get_model_DL(model)) if "param" in record_keys: param_record_whole.append(model[0].get_weights_bias(W_source = "core", b_source = "core")) if "param_grad" in record_keys: param_grad_record_whole.append(model[0].get_weights_bias(W_source = "core", b_source = "core", is_grad = True)) iter_end_whole.append(1) elif mode_ele == "symbolic_simplification": """Collapse multi-layer symbolic expression""" from sympy import Symbol, Poly, expand, prod force_simplification = kwargs["force_simplification"] if "force_simplification" in kwargs else False numerical_threshold = kwargs["numerical_threshold"] if "numerical_threshold" in kwargs else None is_numerical = kwargs["is_numerical"] if "is_numerical" in kwargs else False max_poly_degree = kwargs["max_poly_degree"] if "max_poly_degree" in kwargs else None show_before_truncate = kwargs["show_before_truncate"] if "show_before_truncate" in kwargs else False for model_id, model_ele in enumerate(model): is_all_symbolic = True for layer_id, layer_struct_param in enumerate(model_ele.struct_param): if layer_struct_param[1] != "Symbolic_Layer": is_all_symbolic = False if is_all_symbolic: criteria_prev, criteria_result_prev = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs) variables = OrderedDict() for i in range(model[0].layer_0.input_size): variables["x{0}".format(i)] = Symbol("x{0}".format(i)) expression = list(variables.values()) param_dict_all = {} # Collapse multiple layers: for layer_id, layer_struct_param in enumerate(model_ele.struct_param): layer = getattr(model_ele, "layer_{0}".format(layer_id)) layer_expression = deepcopy(layer.numerical_expression) layer_expression_new = [] for expr in layer_expression: new_expr = expr.subs({"x{0}".format(i): "t{0}".format(i) for i in range(len(expression))}) # Use a temporary variable to prevent overriding new_expr = new_expr.subs({"t{0}".format(i): expression[i] for i in range(len(expression))}) layer_expression_new.append(expand(new_expr)) expression = layer_expression_new # Show full expression before performing truncation: if show_before_truncate: for i, expr in enumerate(expression): print("Full expression {0}:".format(i)) pp.pprint(Poly(expr, *list(variables.values()))) print() model_ele_candidate = MLP(input_size = model[0].layer_0.input_size, struct_param = [[layer.output_size, "Symbolic_Layer", {"symbolic_expression": "x0"}]], settings = {}, is_cuda = model_ele.is_cuda, ) # Setting maximul degree for polynomial: if max_poly_degree is not None: new_expression = [] for expr in expression: expr = Poly(expr, *list(variables.values())) degree_list = [] coeff_list = [] for degree, coeff in expr.terms(): # Only use monomials with degree not larger than max_poly_degree: if sum(degree) <= max_poly_degree: degree_list.append(degree) coeff_list.append(coeff) new_expr = 0 for degree, coeff in zip(degree_list, coeff_list): new_expr += prod([variables["x{0}".format(i)] ** degree[i] for i in range(len(degree))]) * coeff new_expression.append(new_expr) expression = new_expression # Update symbolic expression for model_ele_candidate: if not is_numerical: param_dict_all = {} expression_new_all = [] for expr in expression: expression_new, param_dict = numerical_2_parameter(expr, idx = len(param_dict_all), threshold = numerical_threshold) expression_new_all.append(expression_new) param_dict_all.update(param_dict) model_ele_candidate.layer_0.set_symbolic_expression(expression_new_all, p_init = param_dict_all) else: model_ele_candidate.layer_0.set_symbolic_expression(expression) model_ele_candidate.layer_0.set_numerical(True) criteria_new, criteria_result_new = get_criteria_value(model_ele_candidate, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs) if criteria_new > criteria_prev * (1 + 0.05): print("to_symbolic DL increase more than 5%! ", end = "") if force_simplification: print("Nevertheless, force simplification.") model[model_id] = model_ele_candidate else: print("Revert.") else: model[model_id] = model_ele_candidate elif mode_ele == "activation_snap": from sympy import Function def get_sign_snap_candidate(layer, activation_source, excluded_neurons = None): coeff_dict = {} for i in range(len(layer.symbolic_expression)): current_expression = [layer.symbolic_expression[i]] func_names = layer.get_function_name_list(current_expression) if activation_source in func_names: coeff = [element for element in layer.get_param_name_list(current_expression) if element[0] == "W"] coeff_dict[i] = np.mean([np.abs(value) for key, value in layer.get_param_dict().items() if key in coeff]) best_idx = None best_value = 0 for key, value in coeff_dict.items(): if value > best_value and key not in excluded_neurons: best_value = value best_idx = key return best_idx, best_value activation_source = kwargs["activation_source"] if "activation_source" in kwargs else "sigmoid" activation_target = kwargs["activation_target"] if "activation_target" in kwargs else "heaviside" activation_fun_source = Function(activation_source) activation_fun_target = Function(activation_target) for model_id, model_ele in enumerate(model): for layer_id, layer_struct_param in enumerate(model_ele.struct_param): if layer_struct_param[1] == "Symbolic_Layer": layer = getattr(model_ele, "layer_{0}".format(layer_id)) excluded_neurons = [] if activation_source not in layer.get_function_name_list(): continue performance_monitor.reset() criteria_value, criteria_result = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs) to_stop, pivot_dict, log, _, pivot_id = performance_monitor.monitor(criteria_value, model_dict = model[model_id].model_dict, criteria_result = criteria_result) for i in range(layer_struct_param[0]): # Obtain loss before simplification: layer = getattr(model_ele, "layer_{0}".format(layer_id)) best_idx, _ = get_sign_snap_candidate(layer, activation_source, excluded_neurons = excluded_neurons) excluded_neurons.append(best_idx) new_expression = [expression.subs(activation_fun_source, activation_fun_target) if j == best_idx else expression for j, expression in enumerate(layer.symbolic_expression)] print("Pass {0}, candidate new expression: {1}".format(i, new_expression)) layer.set_symbolic_expression(new_expression) # Train: _, loss_new, data_record = train_simple(model, X, y, validation_data = validation_data, **kwargs) criteria_value, criteria_result = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs) to_stop, pivot_dict, log, is_accept, pivot_id = performance_monitor.monitor(criteria_value, model_dict = model[model_id].model_dict, criteria_result = criteria_result) is_accept_whole.append(is_accept) # Check if the criterion after simplification and refit is worse. If it is worse than the simplify_epsilon, revert: if to_stop: model[model_id].load_model_dict(pivot_dict["model_dict"]) if verbose >= 1: print("Pass {0}, loss: {1}\tDL: {2}. New snap {3} is do not improve by {4} = {5} for {6} steps. Revert the simplification to pivot model. Continue".format( i, view_item(log, ("criteria_result", "loss")), view_item(log, ("criteria_result", "DL")), info, simplify_criteria[0], simplify_epsilon, simplify_patience)) continue mse_record_whole += data_record["mse"] data_DL_whole += data_record["data_DL"] model_DL_whole += data_record["model_DL"] if "param" in record_keys: param_record_whole += data_record["param"] if "param_grad" in record_keys: param_grad_record_whole += data_record["param_grad"] iter_end_whole.append(len(data_record["mse"])) loss_list.append(loss_new) event_list.append({mode_ele: (model_id, layer_id)}) if verbose >= 1: print("{0} succeed at (model {1}, layer {2}). loss: {3}\tDL: {4}".format( mode_ele, model_id, layer_id, view_item(log, ("criteria_result", "loss")), view_item(log, ("criteria_result", "DL")))) print("symbolic_expression: ", layer.symbolic_expression) print("numerical_expression: ", layer.numerical_expression) print() model[model_id].load_model_dict(pivot_dict["model_dict"]) elif mode_ele == "ramping-L1": loss_list_specific = [] ramping_L1_list = kwargs["ramping_L1_list"] if "ramping_L1_list" in kwargs else np.logspace(-7, -1, 30) ramping_mse_threshold = kwargs["ramping_mse_threshold"] if "ramping_mse_threshold" in kwargs else 1e-5 ramping_final_multiplier = kwargs["ramping_final_multiplier"] if "ramping_final_multiplier" in kwargs else 1e-2 layer_dict_dict = {} for i, L1_amp in enumerate(ramping_L1_list): reg_dict = {"weight": L1_amp, "bias": L1_amp, "param": L1_amp} _, loss_end, data_record = train_simple(model, X, y, reg_dict = reg_dict, patience = None, validation_data = validation_data, **kwargs) layer_dict_dict[i] = model[0].layer_0.layer_dict weight, bias = model[0].layer_0.get_weights_bias() print("L1-amp: {0}\tloss: {1}\tweight: {2}\tbias: {3}".format(L1_amp, loss_end, weight, bias)) loss_list_specific.append(loss_end) if "param" in record_keys: param_record_whole.append((weight, bias)) if loss_end > ramping_mse_threshold: if len(loss_list_specific) == 1: print("\nThe MSE after the first L1-amp={0} is already larger than the ramping_mse_threshold. Stop and use current L1-amp. The figures will look empty.".format(ramping_mse_threshold)) else: print("\nThe MSE {0} is larger than the ramping_mse_threshold {1}, stop ramping-L1 simplification".format(loss_end, ramping_mse_threshold)) break mse_record_whole.append(data_record["mse"][-1]) data_DL_whole.append(data_record["data_DL"][-1]) model_DL_whole.append(data_record["model_DL"][-1]) iter_end_whole.append(1) final_L1_amp = L1_amp * ramping_final_multiplier final_L1_idx = np.argmin(np.abs(np.array(ramping_L1_list) - final_L1_amp)) layer_dict_final = layer_dict_dict[final_L1_idx] print("Final L1_amp used: {0}".format(ramping_L1_list[final_L1_idx])) if "param" in record_keys: print("Final param value:\nweights: {0}\nbias{1}".format(param_record_whole[final_L1_idx][0], param_record_whole[final_L1_idx][1])) model[0].layer_0.load_layer_dict(layer_dict_final) mse_record_whole = mse_record_whole[: final_L1_idx + 2] data_DL_whole = data_DL_whole[: final_L1_idx + 2] model_DL_whole = model_DL_whole[: final_L1_idx + 2] iter_end_whole = iter_end_whole[: final_L1_idx + 2] if isplot: def dict_to_list(Dict): return np.array([value for value in Dict.values()]) weights_list = [] bias_list = [] for element in param_record_whole: if isinstance(element[0], dict): element_core = dict_to_list(element[0]) weights_list.append(element_core) else: element_core = to_np_array(element[0]).squeeze(1) weights_list.append(element_core) bias_list.append(to_np_array(element[1])) weights_list = np.array(weights_list) bias_list = np.array(bias_list).squeeze(1) import matplotlib.pylab as plt plt.figure(figsize = (7,5)) plt.loglog(ramping_L1_list[: len(loss_list_specific)], loss_list_specific) plt.xlabel("L1 amp", fontsize = 16) plt.ylabel("mse", fontsize = 16) plt.show() plt.figure(figsize = (7,5)) plt.semilogx(ramping_L1_list[: len(loss_list_specific)], loss_list_specific) plt.xlabel("L1 amp", fontsize = 16) plt.ylabel("mse", fontsize = 16) plt.show() plt.figure(figsize = (7,5)) for i in range(weights_list.shape[1]): plt.semilogx(ramping_L1_list[: len(loss_list_specific)], weights_list[:,i], label = "weight_{0}".format(i)) if len(bias_list) > 0: plt.semilogx(ramping_L1_list[: len(loss_list_specific)], bias_list, label = "bias") plt.xlabel("L1 amp", fontsize = 16) plt.ylabel("parameter_values", fontsize = 16) plt.legend() plt.show() plt.clf() plt.close() else: raise Exception("mode {0} not recognized!".format(mode_ele)) loss_dict[mode_ele] = {} if X is not None: loss_dict[mode_ele]["mse_record_whole"] = mse_record_whole loss_dict[mode_ele]["data_DL_whole"] = data_DL_whole loss_dict[mode_ele]["{0}_test".format(loss_type)] = loss_list loss_dict[mode_ele]["model_DL_whole"] = model_DL_whole if "param" in record_keys: loss_dict[mode_ele]["param_record_whole"] = param_record_whole if "param_grad" in record_keys: loss_dict[mode_ele]["param_grad_record_whole"] = param_grad_record_whole loss_dict[mode_ele]["iter_end_whole"] = iter_end_whole loss_dict[mode_ele]["event_list"] = event_list loss_dict[mode_ele]["is_accept_whole"] = is_accept_whole if mode_ele == "ramping-L1": loss_dict[mode_ele]["ramping_L1_list"] = ramping_L1_list loss_dict[mode_ele]["loss_list_specific"] = loss_list_specific if not is_list: model = model[0] return model, loss_dict # ## The following are different model architectures: # ## MLP: # In[3]: class MLP(nn.Module): def __init__( self, input_size, struct_param = None, W_init_list = None, # initialization for weights b_init_list = None, # initialization for bias settings = {}, # Default settings for each layer, if the settings for the layer is not provided in struct_param is_cuda = False, ): super(MLP, self).__init__() self.input_size = input_size self.is_cuda = is_cuda self.settings = deepcopy(settings) if struct_param is not None: self.num_layers = len(struct_param) self.W_init_list = W_init_list self.b_init_list = b_init_list self.info_dict = {} self.init_layers(deepcopy(struct_param)) else: self.num_layers = 0 @property def struct_param(self): return [getattr(self, "layer_{0}".format(i)).struct_param for i in range(self.num_layers)] @property def output_size(self): return self.get_layer(-1).output_size @property def structure(self): structure = OrderedDict() structure["input_size"] = self.input_size structure["output_size"] = self.output_size structure["struct_param"] = self.struct_param if hasattr(self, "struct_param") else None return structure def init_layers(self, struct_param): res_forward = self.settings["res_forward"] if "res_forward" in self.settings else False for k, layer_struct_param in enumerate(struct_param): if res_forward: num_neurons_prev = struct_param[k - 1][0] + self.input_size if k > 0 else self.input_size else: num_neurons_prev = struct_param[k - 1][0] if k > 0 else self.input_size num_neurons = layer_struct_param[0] W_init = self.W_init_list[k] if self.W_init_list is not None else None b_init = self.b_init_list[k] if self.b_init_list is not None else None # Get settings for the current layer: layer_settings = deepcopy(self.settings) if bool(self.settings) else {} layer_settings.update(layer_struct_param[2]) # Construct layer: layer = get_Layer(layer_type = layer_struct_param[1], input_size = num_neurons_prev, output_size = num_neurons, W_init = W_init, b_init = b_init, settings = layer_settings, is_cuda = self.is_cuda, ) setattr(self, "layer_{}".format(k), layer) def forward(self, *input, p_dict=None, **kwargs): kwargs = filter_kwargs(kwargs, ["res_forward", "is_res_block", "act_noise_scale"]) # only allow certain kwargs to be passed if isinstance(input, tuple): input = torch.cat(input, -1) output = input res_forward = self.settings["res_forward"] if "res_forward" in self.settings else False is_res_block = self.settings["is_res_block"] if "is_res_block" in self.settings else False for k in range(len(self.struct_param)): p_dict_ele = p_dict[k] if p_dict is not None else None if res_forward and k > 0: output = getattr(self, "layer_{}".format(k))(torch.cat([output, input], -1), p_dict=p_dict_ele, **kwargs) else: output = getattr(self, "layer_{}".format(k))(output, p_dict=p_dict_ele, **kwargs) if is_res_block: output = output + input return output def copy(self): return deepcopy(self) def simplify(self, X=None, y=None, mode="full", isplot=False, target_name=None, validation_data = None, **kwargs): new_model, _ = simplify(self, X, y, mode=mode, isplot=isplot, target_name=target_name, validation_data=validation_data, **kwargs) self.__dict__.update(new_model.__dict__) def snap(self, snap_mode="integer", top=5, **kwargs): """Generate a set of new models whose parameters are snapped, each model with a different number of snapped parameters.""" if not hasattr(self, "num_layers") or self.num_layers != 1: return False, [self] else: model_list = [] top = top if snap_mode != "unsnap" else 1 for top_ele in range(1, top + 1): new_model = self.copy() layer = new_model.layer_0 info_list = layer.simplify(mode="snap", top=top_ele, snap_mode=snap_mode) if len(info_list) > 0: new_model.reset_layer(0, layer) model_list.append(new_model) is_succeed = len(model_list) > 0 return is_succeed, model_list def get_regularization(self, source = ["weight", "bias"], mode = "L1", **kwargs): reg = to_Variable([0], is_cuda=self.is_cuda) for k in range(len(self.struct_param)): layer = getattr(self, "layer_{}".format(k)) reg = reg + layer.get_regularization(mode = mode, source = source) return reg def get_layer(self, layer_id): if layer_id < 0: layer_id += self.num_layers return getattr(self, "layer_{}".format(layer_id)) def reset_layer(self, layer_id, layer): setattr(self, "layer_{}".format(layer_id), layer) def insert_layer(self, layer_id, layer): if layer_id < 0: layer_id += self.num_layers if layer_id < self.num_layers - 1: next_layer = getattr(self, "layer_{}".format(layer_id + 1)) if next_layer.struct_param[1] == "Simple_Layer": assert next_layer.input_size == layer.output_size, "The inserted layer's output_size {0} must be compatible with next layer_{1}'s input_size {2}!" .format(layer.output_size, layer_id + 1, next_layer.input_size) for i in range(self.num_layers - 1, layer_id - 1, -1): setattr(self, "layer_{}".format(i + 1), getattr(self, "layer_{}".format(i))) setattr(self, "layer_{}".format(layer_id), layer) self.num_layers += 1 def remove_layer(self, layer_id): if layer_id < 0: layer_id += self.num_layers if layer_id < self.num_layers - 1: num_neurons_prev = self.struct_param[layer_id - 1][0] if layer_id > 0 else self.input_size replaced_layer = getattr(self, "layer_{}".format(layer_id + 1)) if replaced_layer.struct_param[1] == "Simple_Layer": assert replaced_layer.input_size == num_neurons_prev, "After deleting layer_{0}, the replaced layer's input_size {1} must be compatible with previous layer's output neurons {2}!" .format(layer_id, replaced_layer.input_size, num_neurons_prev) for i in range(layer_id, self.num_layers - 1): setattr(self, "layer_{}".format(i), getattr(self, "layer_{}".format(i + 1))) self.num_layers -= 1 def prune_neurons(self, layer_id, neuron_ids): if layer_id == "input": layer = self.get_layer(0) layer.prune_input_neurons(neuron_ids) self.input_size = layer.input_size else: if layer_id < 0: layer_id = self.num_layers + layer_id layer = getattr(self, "layer_{}".format(layer_id)) layer.prune_output_neurons(neuron_ids) self.reset_layer(layer_id, layer) if layer_id < self.num_layers - 1: next_layer = getattr(self, "layer_{}".format(layer_id + 1)) next_layer.prune_input_neurons(neuron_ids) self.reset_layer(layer_id + 1, next_layer) def add_neurons(self, layer_id, num_neurons, mode = ("imitation", "zeros")): if not isinstance(mode, list) and not isinstance(mode, tuple): mode = (mode, mode) if layer_id < 0: layer_id = self.num_layers + layer_id layer = getattr(self, "layer_{}".format(layer_id)) layer.add_output_neurons(num_neurons, mode = mode[0]) self.reset_layer(layer_id, layer) if layer_id < self.num_layers - 1: next_layer = getattr(self, "layer_{}".format(layer_id + 1)) next_layer.add_input_neurons(num_neurons, mode = mode[1]) self.reset_layer(layer_id + 1, next_layer) if layer_id == 0: self.input_size = self.get_layer(0).input_size def inspect_operation(self, input, operation_between, p_dict = None, **kwargs): output = input res_forward = self.settings["res_forward"] if "res_forward" in self.settings else False is_res_block = self.settings["is_res_block"] if "is_res_block" in self.settings else False for k in range(*operation_between): p_dict_ele = p_dict[k] if p_dict is not None else None if res_forward and k > 0: output = getattr(self, "layer_{}".format(k))(torch.cat([output, input], -1), p_dict = p_dict_ele) else: output = getattr(self, "layer_{}".format(k))(output, p_dict = p_dict_ele) if is_res_block: output = output + input return output def get_weights_bias(self, W_source = "core", b_source = "core", layer_ids = None, is_grad = False, isplot = False, verbose = False, raise_error = True): if not hasattr(self, "struct_param"): return None, None layer_ids = range(len(self.struct_param)) if layer_ids is None else layer_ids W_list = [] b_list = [] if W_source is not None: for k in range(len(self.struct_param)): if k in layer_ids: if W_source == "core": try: W, _ = getattr(self, "layer_{}".format(k)).get_weights_bias(is_grad = is_grad) except Exception as e: if raise_error: raise else: print(e) W = np.array([np.NaN]) else: raise Exception("W_source '{}' not recognized!".format(W_source)) W_list.append(W) if b_source is not None: for k in range(len(self.struct_param)): if k in layer_ids: if b_source == "core": try: _, b = getattr(self, "layer_{}".format(k)).get_weights_bias(is_grad = is_grad) except Exception as e: if raise_error: raise else: print(e) b = np.array([np.NaN]) else: raise Exception("b_source '{}' not recognized!".format(b_source)) b_list.append(b) if verbose: import pprint as pp if W_source is not None: print("weight:") pp.pprint(W_list) if b_source is not None: print("bias:") pp.pprint(b_list) if isplot: if W_source is not None: print("weight {}:".format(W_source)) plot_matrices(W_list) if b_source is not None: print("bias {}:".format(b_source)) plot_matrices(b_list) return W_list, b_list def split_to_model_ensemble(self, mode = "standardize"): num_models = self.struct_param[-1][0] model_core = deepcopy(self) if mode == "standardize": last_layer = getattr(model_core, "layer_{}".format(model_core.num_layers - 1)) last_layer.standardize(mode = "b_mean_zero") else: raise Exception("mode {} not recognized!".format(mode)) model_list = [deepcopy(model_core) for i in range(num_models)] for i, model in enumerate(model_list): to_prune = list(range(num_models)) to_prune.pop(i) model.prune_neurons(-1, to_prune) return construct_model_ensemble_from_nets(model_list) @property def model_dict(self): model_dict = {"type": self.__class__.__name__} model_dict["input_size"] = self.input_size model_dict["struct_param"] = get_full_struct_param(self.struct_param, self.settings) model_dict["weights"], model_dict["bias"] = self.get_weights_bias(W_source = "core", b_source = "core") model_dict["settings"] = deepcopy(self.settings) model_dict["net_type"] = self.__class__.__name__ return model_dict @property def DL(self): return np.sum([getattr(self, "layer_{}".format(i)).DL for i in range(self.num_layers)]) def load_model_dict(self, model_dict): new_net = load_model_dict_net(model_dict, is_cuda = self.is_cuda) self.__dict__.update(new_net.__dict__) def load(self, filename): mode = "json" if filename.endswith(".json") else "pickle" model_dict = load_model(filename, mode=mode) self.load_model_dict(model_dict) def save(self, filename): mode = "json" if filename.endswith(".json") else "pickle" save_model(self.model_dict, filename, mode=mode) def get_loss(self, input, target, criterion, **kwargs): y_pred = self(input, **kwargs) return criterion(y_pred, target) def prepare_inspection(self, X, y, **kwargs): return {} def set_cuda(self, is_cuda): for k in range(self.num_layers): getattr(self, "layer_{}".format(k)).set_cuda(is_cuda) self.is_cuda = is_cuda def set_trainable(self, is_trainable): for k in range(self.num_layers): getattr(self, "layer_{}".format(k)).set_trainable(is_trainable) def get_snap_dict(self): snap_dict = {} for k in range(len(self.struct_param)): layer = getattr(self, "layer_{}".format(k)) if hasattr(layer, "snap_dict"): recorded_layer_snap_dict = {} for key, item in layer.snap_dict.items(): recorded_layer_snap_dict[key] = {"new_value": item["new_value"]} if len(recorded_layer_snap_dict) > 0: snap_dict[k] = recorded_layer_snap_dict return snap_dict def synchronize_settings(self): snap_dict = self.get_snap_dict() if len(snap_dict) > 0: self.settings["snap_dict"] = snap_dict return self.settings def get_sympy_expression(self, verbose = True): expressions = {i: {} for i in range(self.num_layers)} for i in range(self.num_layers): layer = getattr(self, "layer_{}".format(i)) if layer.struct_param[1] == "Symbolic_Layer": if verbose: print("Layer {}, symbolic_expression: {}".format(i, layer.symbolic_expression)) print(" numerical_expression: {}".format(layer.numerical_expression)) expressions[i]["symbolic_expression"] = layer.symbolic_expression expressions[i]["numerical_expression"] = layer.numerical_expression expressions[i]["param_dict"] = layer.get_param_dict() expressions[i]["DL"] = layer.DL else: if verbose: print("Layer {} is not a symbolic layer.".format(i)) expressions[i] = None return expressions # ## Labelmix_MLP: # In[ ]: class Labelmix_MLP(nn.Module): def __init__( self, input_size, struct_param, idx_label=None, is_cuda=False, ): super(Labelmix_MLP, self).__init__() self.input_size = input_size self.struct_param = struct_param self.num_layers = len(struct_param) if idx_label is not None and len(idx_label) == input_size: idx_label = None if idx_label is not None: self.idx_label = torch.LongTensor(idx_label) idx_main = list(set(range(input_size)) - set(to_np_array(idx_label).astype(int).tolist())) self.idx_main = torch.LongTensor(idx_main) else: self.idx_label = None self.idx_main = torch.LongTensor(list(range(input_size))) num_neurons_prev = len(self.idx_main) for i, layer_struct_param in enumerate(struct_param): num_neurons = layer_struct_param[0] setattr(self, "W_{}_main".format(i), nn.Parameter(torch.randn(num_neurons_prev, num_neurons))) setattr(self, "b_{}_main".format(i), nn.Parameter(torch.zeros(num_neurons))) init_weight(getattr(self, "W_{}_main".format(i)), init=None) num_neurons_prev = num_neurons if self.idx_label is not None: setattr(self, "W_{}_mul".format(i), nn.Parameter(torch.randn(len(self.idx_label), num_neurons))) setattr(self, "W_{}_add".format(i), nn.Parameter(torch.randn(len(self.idx_label), num_neurons))) init_weight(getattr(self, "W_{}_mul".format(i)), init=None) init_weight(getattr(self, "W_{}_add".format(i)), init=None) setattr(self, "b_{}_mul".format(i), nn.Parameter(torch.zeros(num_neurons))) setattr(self, "b_{}_add".format(i), nn.Parameter(torch.zeros(num_neurons))) self.set_cuda(is_cuda) def forward(self, input): output = input[:, self.idx_main] if self.idx_label is not None: labels = input[:, self.idx_label] for i, layer_struct_param in enumerate(self.struct_param): output = torch.matmul(output, getattr(self, "W_{}_main".format(i))) + getattr(self, "b_{}_main".format(i)) if "activation" in layer_struct_param[2]: output = get_activation(layer_struct_param[2]["activation"])(output) if self.idx_label is not None: A_mul = torch.matmul(labels, getattr(self, "W_{}_mul".format(i))) + getattr(self, "b_{}_mul".format(i)) A_add = torch.matmul(labels, getattr(self, "W_{}_add".format(i))) + getattr(self, "b_{}_add".format(i)) output = output * A_mul + A_add return output def get_loss(self, X, y, criterion, **kwargs): y_pred = self(X) return criterion(y_pred, y) def set_cuda(self, is_cuda): if isinstance(is_cuda, str): self.cuda(is_cuda) else: if is_cuda: self.cuda() else: self.cpu() self.is_cuda = is_cuda def get_regularization(self, source = ["weight", "bias"], mode = "L1", **kwargs): reg = to_Variable([0], is_cuda=self.is_cuda) return reg @property def model_dict(self): model_dict = {"type": "Labelmix_MLP"} model_dict["input_size"] = self.input_size model_dict["struct_param"] = self.struct_param if self.idx_label is not None: model_dict["idx_label"] = to_np_array(self.idx_label).astype(int) model_dict["state_dict"] = to_cpu_recur(self.state_dict()) return model_dict # ## Multi_MLP (MLPs in series): # In[ ]: class Multi_MLP(nn.Module): def __init__( self, input_size, struct_param, W_init_list = None, # initialization for weights b_init_list = None, # initialization for bias settings = None, # Default settings for each layer, if the settings for the layer is not provided in struct_param is_cuda = False, ): super(Multi_MLP, self).__init__() self.input_size = input_size self.num_layers = len(struct_param) self.W_init_list = W_init_list self.b_init_list = b_init_list self.settings = deepcopy(settings) self.num_blocks = len(struct_param) self.is_cuda = is_cuda for i, struct_param_ele in enumerate(struct_param): input_size_block = input_size if i == 0 else struct_param[i - 1][-1][0] setattr(self, "block_{0}".format(i), MLP(input_size = input_size_block, struct_param = struct_param_ele, W_init_list = W_init_list[i] if W_init_list is not None else None, b_init_list = b_init_list[i] if b_init_list is not None else None, settings = self.settings[i] if self.settings is not None else {}, is_cuda = self.is_cuda, )) def forward(self, input): output = input for i in range(self.num_blocks): output = getattr(self, "block_{0}".format(i))(output) return output def get_loss(self, input, target, criterion, **kwargs): y_pred = self(input, **kwargs) return criterion(y_pred, target) def get_regularization(self, source = ["weight", "bias"], mode = "L1", **kwargs): reg = Variable(torch.FloatTensor([0]), requires_grad = False) if self.is_cuda: reg = reg.cuda() for i in range(self.num_blocks): reg = reg + getattr(self, "block_{0}".format(i)).get_regularization(mode = mode, source = source) return reg @property def struct_param(self): return [getattr(self, "block_{0}".format(i)).struct_param for i in range(self.num_blocks)] @property def model_dict(self): model_dict = {"type": self.__class__.__name__} model_dict["input_size"] = self.input_size model_dict["struct_param"] = self.struct_param model_dict["weights"], model_dict["bias"] = self.get_weights_bias(W_source = "core", b_source = "core") model_dict["settings"] = deepcopy(self.settings) model_dict["net_type"] = self.__class__.__name__ return model_dict def load_model_dict(self, model_dict): new_net = load_model_dict_Multi_MLP(model_dict, is_cuda = self.is_cuda) self.__dict__.update(new_net.__dict__) def load(self, filename): mode = "json" if filename.endswith(".json") else "pickle" model_dict = load_model(filename, mode=mode) self.load_model_dict(model_dict) def save(self, filename): mode = "json" if filename.endswith(".json") else "pickle" save_model(self.model_dict, filename, mode=mode) def get_weights_bias(self, W_source = "core", b_source = "core"): W_list = [] b_list = [] for i in range(self.num_blocks): W, b = getattr(self, "block_{0}".format(i)).get_weights_bias(W_source = W_source, b_source = b_source) W_list.append(W) b_list.append(b) return deepcopy(W_list), deepcopy(b_list) def prepare_inspection(self, X, y, **kwargs): return {} def set_cuda(self, is_cuda): for i in range(self.num_blocks): getattr(self, "block_{0}".format(i)).set_cuda(is_cuda) self.is_cuda = is_cuda def set_trainable(self, is_trainable): for i in range(self.num_blocks): getattr(self, "block_{0}".format(i)).set_trainable(is_trainable) # ## Branching_Net: # In[ ]: class Branching_Net(nn.Module): """An MLP that consists of a base network, and net_1 and net_2 that branches off from the output of the base network.""" def __init__( self, net_base_model_dict, net_1_model_dict, net_2_model_dict, is_cuda = False, ): super(Branching_Net, self).__init__() self.net_base = load_model_dict(net_base_model_dict, is_cuda = is_cuda) self.net_1 = load_model_dict(net_1_model_dict, is_cuda = is_cuda) self.net_2 = load_model_dict(net_2_model_dict, is_cuda = is_cuda) self.info_dict = {} def forward(self, X, **kwargs): shared = self.net_base(X) shared = shared.max(0, keepdim = True)[0] return self.net_1(shared)[0], self.net_2(shared)[0] def get_regularization(self, source = ["weights", "bias"], mode = "L1"): reg = self.net_base.get_regularization(source = source, mode = mode) + self.net_1.get_regularization(source = source, mode = mode) + self.net_2.get_regularization(source = source, mode = mode) return reg def set_trainable(self, is_trainable): self.net_base.set_trainable(is_trainable) self.net_1.set_trainable(is_trainable) self.net_2.set_trainable(is_trainable) def prepare_inspection(self, X, y, **kwargs): return deepcopy(self.info_dict) @property def model_dict(self): model_dict = {"type": "Branching_Net"} model_dict["net_base_model_dict"] = self.net_base.model_dict model_dict["net_1_model_dict"] = self.net_1.model_dict model_dict["net_2_model_dict"] = self.net_2.model_dict return model_dict def load(self, filename): mode = "json" if filename.endswith(".json") else "pickle" model_dict = load_model(filename, mode=mode) self.load_model_dict(model_dict) def save(self, filename): mode = "json" if filename.endswith(".json") else "pickle" save_model(self.model_dict, filename, mode=mode) class Fan_in_MLP(nn.Module): def __init__( self, model_dict_branch1, model_dict_branch2, model_dict_joint, is_cuda=False, ): super(Fan_in_MLP, self).__init__() if model_dict_branch1 is not None: self.net_branch1 = load_model_dict(model_dict_branch1, is_cuda=is_cuda) else: self.net_branch1 = None if model_dict_branch2 is not None: self.net_branch2 = load_model_dict(model_dict_branch2, is_cuda=is_cuda) else: self.net_branch2 = None self.net_joint = load_model_dict(model_dict_joint, is_cuda=is_cuda) self.is_cuda = is_cuda self.info_dict = {} def forward(self, X1, X2, is_outer=False): if is_outer: X2 = X2[...,None,:] if self.net_branch1 is not None: X1 = self.net_branch1(X1) if self.net_branch2 is not None: X2 = self.net_branch2(X2) X1, X2 = broadcast_all(X1, X2) out = torch.cat([X1, X2], -1) # if is_outer=True, then output dimension: [..., X2dim, X1dim, out_dim]: return self.net_joint(out).squeeze(-1) def get_loss(self, input, target, criterion, **kwargs): X1, X2 = input y_pred = self(X1, X2) return criterion(y_pred, target) def get_regularization(self, source = ["weight", "bias"], mode = "L1", **kwargs): reg = Variable(torch.FloatTensor([0]), requires_grad = False) if self.is_cuda: reg = reg.cuda() if self.net_branch1 is not None: reg = reg + self.net_branch1.get_regularization(source=source, mode=mode) if self.net_branch2 is not None: reg = reg + self.net_branch2.get_regularization(source=source, mode=mode) return reg def prepare_inspection(self, X, y, **kwargs): return deepcopy(self.info_dict) @property def model_dict(self): model_dict = {'type': self.__class__.__name__} model_dict["model_dict_branch1"] = self.net_branch1.model_dict if self.net_branch1 is not None else None model_dict["model_dict_branch2"] = self.net_branch2.model_dict if self.net_branch2 is not None else None model_dict["model_dict_joint"] = self.net_joint.model_dict return model_dict def load(self, filename): mode = "json" if filename.endswith(".json") else "pickle" model_dict = load_model(filename, mode=mode) self.load_model_dict(model_dict) def save(self, filename): mode = "json" if filename.endswith(".json") else "pickle" save_model(self.model_dict, filename, mode=mode) # ## Mixture_Model: # In[ ]: class Mixture_Model(nn.Module): def __init__( self, model_dict_list, weight_logits_model_dict, num_components, is_cuda=False, ): super(Mixture_Model, self).__init__() self.num_components = num_components for i in range(self.num_components): if isinstance(model_dict_list, list): setattr(self, "model_{}".format(i), load_model_dict(model_dict_list[i], is_cuda=is_cuda)) else: assert isinstance(model_dict_list, dict) setattr(self, "model_{}".format(i), load_model_dict(model_dict_list, is_cuda=is_cuda)) self.weight_logits_model = load_model_dict(weight_logits_model_dict, is_cuda=is_cuda) self.is_cuda = is_cuda def forward(self, input): output_list = [] for i in range(self.num_components): output = getattr(self, "model_{}".format(i))(input) output_list.append(output) output_list = torch.stack(output_list, -1) weight_logits = self.weight_logits_model(input) return output_list, weight_logits @property def model_dict(self): model_dict = {"type": "Mixture_Model", "model_dict_list": [getattr(self, "model_{}".format(i)).model_dict for i in range(self.num_components)], "weight_logits_model_dict": self.weight_logits_model.model_dict, "num_components": self.num_components, } return model_dict # ## Model_Ensemble: # In[ ]: class Model_Ensemble(nn.Module): """Model_Ensemble is a collection of models with the same architecture but independent parameters""" def __init__( self, num_models, input_size, struct_param, W_init_list = None, b_init_list = None, settings = None, net_type = "MLP", is_cuda = False, ): super(Model_Ensemble, self).__init__() self.num_models = num_models self.input_size = input_size self.net_type = net_type self.is_cuda = is_cuda for i in range(self.num_models): if settings is None: settings_model = {} elif isinstance(settings, list) or isinstance(settings, tuple): settings_model = settings[i] else: settings_model = settings if isinstance(struct_param, tuple): struct_param_model = struct_param[i] else: struct_param_model = struct_param if net_type == "MLP": net = MLP(input_size = self.input_size, struct_param = deepcopy(struct_param_model), W_init_list = deepcopy(W_init_list[i]) if W_init_list is not None else None, b_init_list = deepcopy(b_init_list[i]) if b_init_list is not None else None, settings = deepcopy(settings_model), is_cuda = is_cuda, ) elif net_type == "ConvNet": net = ConvNet(input_channels = self.input_size, struct_param = deepcopy(struct_param_model), settings = deepcopy(settings_model), is_cuda = is_cuda, ) else: raise Exception("Net_type {0} not recognized!".format(net_type)) setattr(self, "model_{0}".format(i), net) @property def struct_param(self): return tuple(getattr(self, "model_{0}".format(i)).struct_param for i in range(self.num_models)) @property def settings(self): return [getattr(self, "model_{0}".format(i)).settings for i in range(self.num_models)] def get_all_models(self): return [getattr(self, "model_{0}".format(i)) for i in range(self.num_models)] def init_bias_with_input(self, input, mode = "std_sqrt", neglect_last_layer = True): for i in range(self.num_models): model = getattr(self, "model_{0}".format(i)) model.init_bias_with_input(input, mode = mode, neglect_last_layer = neglect_last_layer) def initialize_param_freeze(self, update_values = True): for i in range(self.num_models): model = getattr(self, "model_{0}".format(i)) model.initialize_param_freeze(update_values = update_values) def apply_model(self, input, model_id): return fetch_model(self, model_id)(input) def fetch_model(self, model_id): return getattr(self, "model_{0}".format(model_id)) def set_trainable(self, is_trainable): for i in range(self.num_models): getattr(self, "model_{0}".format(i)).set_trainable(is_trainable) def forward(self, input): output_list = [] for i in range(self.num_models): if self.net_type == "MLP": output = getattr(self, "model_{0}".format(i))(input) elif self.net_type == "ConvNet": output = getattr(self, "model_{0}".format(i))(input)[0] else: raise Exception("Net_type {0} not recognized!".format(self.net_type)) output_list.append(output) return torch.stack(output_list, 1) def get_regularization(self, source = ["weight", "bias"], mode = "L1", **kwargs): if not isinstance(source, list): source = [source] reg = Variable(torch.FloatTensor([0]), requires_grad = False) if self.is_cuda: reg = reg.cuda() model0 = self.model_0 # Elastic_weight_reg: if "elastic_weight" in source or "elastic_bias" in source: # Setting up excluded layer: excluded_layer = kwargs["excluded_layer"] if "excluded_layer" in kwargs else [-1] if not isinstance(excluded_layer, list): excluded_layer = [excluded_layer] excluded_layer = [element + model0.num_layers if element < 0 else element for element in excluded_layer] elastic_mode = kwargs["elastic_mode"] if "elastic_mode" in kwargs else "var" # Compute the elastic_weight_reg: for k in range(model0.num_layers): if k in excluded_layer: continue W_accum_k = [] b_accum_k = [] num_neurons_prev = model0.struct_param[k - 1][0] if k > 0 else self.input_size num_neurons = model0.struct_param[k][0] for i in range(self.num_models): model = getattr(self, "model_{0}".format(i)) assert model0.num_layers == model.num_layers assert num_neurons_prev == model.struct_param[k - 1][0] if k > 0 else model.input_size, "all models' input/output size at each layer must be identical!" assert num_neurons == model.struct_param[k][0], "all models' input/output size at each layer must be identical!" layer_k = getattr(model, "layer_{0}".format(k)) if "elastic_weight" in source: W_accum_k.append(layer_k.W_core) if "elastic_bias" in source: b_accum_k.append(layer_k.b_core) if "elastic_weight" in source: if elastic_mode == "var": reg = reg + torch.stack(W_accum_k, -1).var(-1).sum() elif elastic_mode == "std": reg = reg + torch.stack(W_accum_k, -1).std(-1).sum() else: raise if "elastic_bias" in source: if elastic_mode == "var": reg = reg + torch.stack(b_accum_k, -1).var(-1).sum() elif elastic_mode == "std": reg = reg + torch.stack(b_accum_k, -1).std(-1).sum() else: raise source_core = deepcopy(source) if "elastic_weight" in source_core: source_core.remove("elastic_weight") if "elastic_bias" in source_core: source_core.remove("elastic_bias") else: source_core = source # Other regularizations: for k in range(self.num_models): reg = reg + getattr(self, "model_{0}".format(k)).get_regularization(source = source_core, mode = mode, **kwargs) return reg def get_weights_bias(self, W_source = None, b_source = None, verbose = False, isplot = False): W_list_dict = {} b_list_dict = {} for i in range(self.num_models): if verbose: print("\nmodel {0}:".format(i)) W_list_dict[i], b_list_dict[i] = getattr(self, "model_{0}".format(i)).get_weights_bias( W_source = W_source, b_source = b_source, verbose = verbose, isplot = isplot) return W_list_dict, b_list_dict def combine_to_net(self, mode = "mean", last_layer_mode = "concatenate"): model0 = self.model_0 if mode == "mean": struct_param = deepcopy(model0.struct_param) settings = deepcopy(model0.settings) W_init_list = [] b_init_list = [] for k in range(model0.num_layers): num_neurons_prev = model0.struct_param[k - 1][0] if k > 0 else self.input_size num_neurons = model0.struct_param[k][0] W_accum_k = [] b_accum_k = [] for i in range(self.num_models): model = getattr(self, "model_{0}".format(i)) assert model0.num_layers == model.num_layers assert num_neurons_prev == model.struct_param[k - 1][0] if k > 0 else model.input_size, "If mode == 'mean', all models' input/output size at each layer must be identical!" assert num_neurons == model.struct_param[k][0], "If mode == 'mean', all models' input/output size at each layer must be identical!" layer_k = getattr(model, "layer_{0}".format(k)) W_accum_k.append(layer_k.W_core) b_accum_k.append(layer_k.b_core) if k == model0.num_layers - 1: current_mode = last_layer_mode else: current_mode = mode if current_mode == "mean": W_accum_k = torch.stack(W_accum_k, -1).mean(-1) b_accum_k = torch.stack(b_accum_k, -1).mean(-1) elif current_mode == "concatenate": W_accum_k = torch.cat(W_accum_k, -1) b_accum_k = torch.cat(b_accum_k, -1) struct_param[-1][0] = sum([self.struct_param[i][-1][0] for i in range(self.num_models)]) else: raise Exception("mode {0} not recognized!".format(last_layer_mode)) W_init_list.append(W_accum_k.data.numpy()) b_init_list.append(b_accum_k.data.numpy()) # Build the net: net = MLP(input_size = self.input_size, struct_param = struct_param, W_init_list = W_init_list, b_init_list = b_init_list, settings = settings, ) else: raise Exception("mode {0} not recognized!".format(mode)) return net def remove_models(self, model_ids): if not isinstance(model_ids, list): model_ids = [model_ids] model_list = [] k = 0 for i in range(self.num_models): if i not in model_ids: if k != i: setattr(self, "model_{0}".format(k), getattr(self, "model_{0}".format(i))) k += 1 num_models_new = k for i in range(num_models_new, self.num_models): delattr(self, "model_{0}".format(i)) self.num_models = num_models_new def add_models(self, models): if not isinstance(models, list): models = [models] for i, model in enumerate(models): setattr(self, "model_{0}".format(i + self.num_models), model) self.num_models += len(models) def simplify(self, X, y, idx, mode = "full", validation_data = None, isplot = False, **kwargs): def process_idx(idx): idx = idx.byte() if len(idx.size()) == 1: idx = idx.unqueeze(1) if idx.size(1) == 1: idx = idx.repeat(1, self.num_models) return idx idx = process_idx(idx) if validation_data is not None: X_valid, y_valid, idx_valid = validation_data idx_valid = process_idx(idx_valid) loss_dict = {} for i in range(self.num_models): model = getattr(self, "model_{0}".format(i)) X_chosen = torch.masked_select(X, idx[:, i:i+1]).view(-1, X.size(1)) y_chosen = torch.masked_select(y, idx[:, i:i+1]).view(-1, y.size(1)) if validation_data is not None: X_valid_chosen = torch.masked_select(X_valid, idx_valid[:, i:i+1]).view(-1, X_valid.size(1)) y_valid_chosen = torch.masked_select(y_valid, idx_valid[:, i:i+1]).view(-1, y_valid.size(1)) if len(X_valid_chosen) == 0: validation_data_chosen = None else: validation_data_chosen = (X_valid_chosen, y_valid_chosen) else: validation_data_chosen = None if len(X_chosen) == 0: print("The {0}'th model has no corresponding data to simplify with, skip.".format(i)) else: new_model, loss_dict["model_{0}".format(i)] = simplify(model, X_chosen, y_chosen, mode = mode, validation_data = validation_data_chosen, isplot = isplot, target_name = "model_{0}".format(i), **kwargs) setattr(self, "model_{0}".format(i), new_model) return loss_dict def get_sympy_expression(self): expressions = {} for k in range(self.num_models): print("\nmodel {0}:".format(k)) expressions["model_{0}".format(k)] = getattr(self, "model_{0}".format(k)).get_sympy_expression() return expressions @property def DL(self): return np.sum([getattr(self, "model_{0}".format(i)).DL for i in range(self.num_models)]) def get_weights_bias(self, W_source = None, b_source = None, verbose = False, isplot = False): W_list_dict = {} b_list_dict = {} for i in range(self.num_models): if verbose: print("\nmodel {0}:".format(i)) W_list_dict[i], b_list_dict[i] = getattr(self, "model_{0}".format(i)).get_weights_bias(W_source = W_source, b_source = b_source, verbose = verbose, isplot = isplot) return W_list_dict, b_list_dict @property def model_dict(self): model_dict = {"type": "Model_Ensemble"} for i in range(self.num_models): model_dict["model_{0}".format(i)] = getattr(self, "model_{0}".format(i)).model_dict model_dict["input_size"] = self.input_size model_dict["struct_param"] = self.struct_param model_dict["num_models"] = self.num_models model_dict["net_type"] = self.net_type return model_dict def load_model_dict(self, model_dict): new_model_ensemble = load_model_dict(model_dict, is_cuda = self.is_cuda) self.__dict__.update(new_model_ensemble.__dict__) def load(self, filename): mode = "json" if filename.endswith(".json") else "pickle" model_dict = load_model(filename, mode=mode) self.load_model_dict(model_dict) def save(self, filename): mode = "json" if filename.endswith(".json") else "pickle" save_model(self.model_dict, filename, mode=mode) def load_model_dict_model_ensemble(model_dict, is_cuda = False): num_models = len([model_name for model_name in model_dict if model_name[:6] == "model_"]) return Model_Ensemble(num_models = num_models, input_size = model_dict["input_size"], struct_param = tuple([deepcopy(model_dict["model_{0}".format(i)]["struct_param"]) for i in range(num_models)]), W_init_list = [deepcopy(model_dict["model_{0}".format(i)]["weights"]) for i in range(num_models)], b_init_list = [deepcopy(model_dict["model_{0}".format(i)]["bias"]) for i in range(num_models)], settings = [deepcopy(model_dict["model_{0}".format(i)]["settings"]) for i in range(num_models)], net_type = model_dict["net_type"] if "net_type" in model_dict else "MLP", is_cuda = is_cuda, ) def combine_model_ensembles(model_ensembles, input_size): model_ensembles = deepcopy(model_ensembles) model_ensemble_combined = None model_id = 0 for k, model_ensemble in enumerate(model_ensembles): if model_ensemble.input_size == input_size: if model_ensemble_combined is None: model_ensemble_combined = model_ensemble else: continue for i in range(model_ensemble.num_models): model = getattr(model_ensemble, "model_{0}".format(i)) setattr(model_ensemble_combined, "model_{0}".format(model_id), model) model_id += 1 model_ensemble_combined.num_models = model_id return model_ensemble_combined def construct_model_ensemble_from_nets(nets): num_models = len(nets) if num_models is None: return None input_size = nets[0].input_size struct_param = tuple(net.struct_param for net in nets) is_cuda = False for net in nets: if net.input_size != input_size: raise Exception("The input_size for all nets must be the same!") if net.is_cuda: is_cuda = True model_ensemble = Model_Ensemble(num_models = num_models, input_size = input_size, struct_param = struct_param, is_cuda = is_cuda) for i, net in enumerate(nets): setattr(model_ensemble, "model_{0}".format(i), net) return model_ensemble # In[ ]: class Model_with_uncertainty(nn.Module): def __init__( self, model_pred, model_logstd, ): super(Model_with_uncertainty, self).__init__() self.model_pred = model_pred self.model_logstd = model_logstd def forward(self, input, noise_amp = None, **kwargs): return self.model_pred(input, noise_amp = noise_amp, **kwargs), self.model_logstd(input, **kwargs) def get_loss(self, input, target, criterion, noise_amp = None, **kwargs): pred, log_std = self(input, noise_amp = noise_amp, **kwargs) return criterion(pred = pred, target = target, log_std = log_std) def get_regularization(self, source = ["weight", "bias"], mode = "L1", **kwargs): return self.model_pred.get_regularization(source = source, mode = mode, **kwargs) + self.model_logstd.get_regularization(source = source, mode = mode, **kwargs) @property def model_dict(self): model_dict = {} model_dict["type"] = "Model_with_Uncertainty" model_dict["model_pred"] = self.model_pred.model_dict model_dict["model_logstd"] = self.model_logstd.model_dict return model_dict def load(self, filename): mode = "json" if filename.endswith(".json") else "pickle" model_dict = load_model(filename, mode=mode) self.load_model_dict(model_dict) def save(self, filename): mode = "json" if filename.endswith(".json") else "pickle" save_model(self.model_dict, filename, mode=mode) def set_cuda(self, is_cuda): self.model_pred.set_cuda(is_cuda) self.model_logstd.set_cuda(is_cuda) def set_trainable(self, is_trainable): self.model_pred.set_trainable(is_trainable) self.model_logstd.set_trainable(is_trainable) # ## RNN: # In[ ]: class RNNCellBase(nn.Module): def extra_repr(self): s = '{input_size}, {hidden_size}' if 'bias' in self.__dict__ and self.bias is not True: s += ', bias={bias}' if 'nonlinearity' in self.__dict__ and self.nonlinearity != "tanh": s += ', nonlinearity={nonlinearity}' return s.format(**self.__dict__) def check_forward_input(self, input): if input.size(1) != self.input_size: raise RuntimeError( "input has inconsistent input_size: got {}, expected {}".format( input.size(1), self.input_size)) def check_forward_hidden(self, input, hx, hidden_label=''): if input.size(0) != hx.size(0): raise RuntimeError( "Input batch size {} doesn't match hidden{} batch size {}".format( input.size(0), hidden_label, hx.size(0))) if hx.size(1) != self.hidden_size: raise RuntimeError( "hidden{} has inconsistent hidden_size: got {}, expected {}".format( hidden_label, hx.size(1), self.hidden_size)) # ### LSTM: # In[ ]: class LSTM(RNNCellBase): """a LSTM class""" def __init__( self, input_size, hidden_size, output_struct_param, output_settings = {}, bias = True, is_cuda = False, ): super(LSTM, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.W_ih = nn.Parameter(torch.Tensor(4 * hidden_size, input_size)) self.W_hh = nn.Parameter(torch.Tensor(4 * hidden_size, hidden_size)) self.output_net = MLP(input_size = self.hidden_size, struct_param = output_struct_param, settings = output_settings, is_cuda = is_cuda) if bias: self.b_ih = nn.Parameter(torch.Tensor(4 * hidden_size)) self.b_hh = nn.Parameter(torch.Tensor(4 * hidden_size)) else: self.register_parameter('b_ih', None) self.register_parameter('b_hh', None) self.reset_parameters() self.is_cuda = is_cuda self.device = torch.device(self.is_cuda if isinstance(self.is_cuda, str) else "cuda" if self.is_cuda else "cpu") self.to(self.device) def reset_parameters(self): stdv = 1.0 / np.sqrt(self.hidden_size) for weight in self.parameters(): weight.data.uniform_(-stdv, stdv) def forward_one_step(self, input, hx): self.check_forward_input(input) self.check_forward_hidden(input, hx[0], '[0]') self.check_forward_hidden(input, hx[1], '[1]') return self._backend.LSTMCell( input, hx, self.W_ih, self.W_hh, self.b_ih, self.b_hh, ) def forward(self, input, hx = None): if hx is None: hx = [torch.randn(input.size(0), self.hidden_size).to(self.device), torch.randn(input.size(0), self.hidden_size).to(self.device), ] hhx, ccx = hx for i in range(input.size(1)): hhx, ccx = self.forward_one_step(input[:, i], (hhx, ccx)) output = self.output_net(hhx) return output def get_regularization(self, source, mode = "L1", **kwargs): if not isinstance(source, list): source = [source] reg = self.output_net.get_regularization(source = source, mode = mode) for source_ele in source: if source_ele == "weight": if mode == "L1": reg = reg + self.W_ih.abs().sum() + self.W_hh.abs().sum() elif mode == "L2": reg = reg + (self.W_ih ** 2).sum() + (self.W_hh ** 2).sum() else: raise Exception("mode {0} not recognized!".format(mode)) elif source_ele == "bias": if self.bias: if mode == "L1": reg = reg + self.b_ih.abs().sum() + self.b_hh.abs().sum() elif mode == "L2": reg = reg + (self.b_ih ** 2).sum() + (self.b_hh ** 2).sum() else: raise Exception("mode {0} not recognized!".format(mode)) else: raise Exception("source {0} not recognized!".format(source_ele)) return reg def get_weights_bias(self, W_source = None, b_source = None, verbose = False, isplot = False): W_dict = OrderedDict() b_dict = OrderedDict() W_o, b_o = self.output_net.get_weights_bias(W_source = W_source, b_source = b_source) if W_source == "core": W_dict["W_ih"] = self.W_ih.cpu().detach().numpy() W_dict["W_hh"] = self.W_hh.cpu().detach().numpy() W_dict["W_o"] = W_o if isplot: print("W_ih, W_hh:") plot_matrices([W_dict["W_ih"], W_dict["W_hh"]]) print("W_o:") plot_matrices(W_o) if self.bias and b_source == "core": b_dict["b_ih"] = self.b_ih.cpu().detach().numpy() b_dict["b_hh"] = self.b_hh.cpu().detach().numpy() b_dict["b_o"] = b_o if isplot: print("b_ih, b_hh:") plot_matrices([b_dict["b_ih"], b_dict["b_hh"]]) print("b_o:") plot_matrices(b_o) return W_dict, b_dict def get_loss(self, input, target, criterion, hx = None, **kwargs): y_pred = self(input, hx = hx) return criterion(y_pred, target) def prepare_inspection(self, X, y, **kwargs): return {} def load(self, filename): mode = "json" if filename.endswith(".json") else "pickle" model_dict = load_model(filename, mode=mode) self.load_model_dict(model_dict) def save(self, filename): mode = "json" if filename.endswith(".json") else "pickle" save_model(self.model_dict, filename, mode=mode) # ## Wide ResNet: # In[ ]: def conv3x3(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True) def conv_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: init.xavier_uniform_(m.weight, gain=np.sqrt(2)) init.constant_(m.bias, 0) elif classname.find('BatchNorm') != -1: init.constant_(m.weight, 1) init.constant_(m.bias, 0) class wide_basic(nn.Module): def __init__(self, in_planes, planes, dropout_rate=None, stride=1): super(wide_basic, self).__init__() self.bn1 = nn.BatchNorm2d(in_planes) self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True) if dropout_rate is not None: self.dropout = nn.Dropout(p=dropout_rate) else: self.dropout = None self.bn2 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True) self.shortcut = nn.Sequential() if stride != 1 or in_planes != planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True), ) def forward(self, x): out = self.conv1(F.relu(self.bn1(x))) if self.dropout is not None: out = self.dropout(out) out = self.conv2(F.relu(self.bn2(out))) out += self.shortcut(x) return out class Wide_ResNet(nn.Module): """Adapted from https://github.com/meliketoy/wide-resnet.pytorch/blob/master/networks/wide_resnet.py""" def __init__( self, depth, widen_factor, input_channels, output_size, dropout_rate=None, is_cuda=False, ): super(Wide_ResNet, self).__init__() self.depth = depth self.widen_factor = widen_factor self.input_channels = input_channels self.dropout_rate = dropout_rate self.output_size = output_size assert ((depth-4)%6 ==0), 'Wide-resnet depth should be 6n+4' n = (depth-4)//6 k = widen_factor nStages = [16*k, 16*k, 32*k, 64*k] self.in_planes = nStages[0] self.conv1 = conv3x3(self.input_channels,nStages[0]) self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1) self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2) self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2) self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9) self.linear = nn.Linear(nStages[3], output_size) self.set_cuda(is_cuda) def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride): strides = [stride] + [1]*(int(num_blocks)-1) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, dropout_rate, stride)) self.in_planes = planes return nn.Sequential(*layers) def forward(self, x): out = self.conv1(x) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = F.relu(self.bn1(out)) out = out.mean((-1,-2)) # replacing the out= F.avg_pool2d(out, 8) which is sensitive to the input shape. out = out.view(out.size(0), -1) out = self.linear(out) return out def set_cuda(self, is_cuda): if isinstance(is_cuda, str): self.cuda(is_cuda) else: if is_cuda: self.cuda() else: self.cpu() self.is_cuda = is_cuda @property def model_dict(self): model_dict = {"type": "Wide_ResNet"} model_dict["state_dict"] = to_cpu_recur(self.state_dict()) model_dict["depth"] = self.depth model_dict["widen_factor"] = self.widen_factor model_dict["input_channels"] = self.input_channels model_dict["output_size"] = self.output_size model_dict["dropout_rate"] = self.dropout_rate return model_dict def load(self, filename): mode = "json" if filename.endswith(".json") else "pickle" model_dict = load_model(filename, mode=mode) self.load_model_dict(model_dict) def save(self, filename): mode = "json" if filename.endswith(".json") else "pickle" save_model(self.model_dict, filename, mode=mode) def get_regularization(self, *args, **kwargs): return to_Variable([0], is_cuda = self.is_cuda) def prepare_inspection(self, *args, **kwargs): return {} # ## CNN: # In[ ]: class ConvNet(nn.Module): def __init__( self, input_channels, struct_param=None, W_init_list=None, b_init_list=None, settings={}, return_indices=False, is_cuda=False, ): super(ConvNet, self).__init__() self.input_channels = input_channels if struct_param is not None: self.struct_param = struct_param self.W_init_list = W_init_list self.b_init_list = b_init_list self.settings = settings self.num_layers = len(struct_param) self.info_dict = {} self.param_available = ["Conv2d", "ConvTranspose2d", "BatchNorm2d", "Simple_Layer"] self.return_indices = return_indices for i in range(len(self.struct_param)): if i > 0: k = 1 while self.struct_param[i - k][0] is None: k += 1 num_channels_prev = self.struct_param[i - k][0] else: num_channels_prev = input_channels k = 0 if self.struct_param[i - k][1] == "Simple_Layer" and isinstance(num_channels_prev, tuple) and len(num_channels_prev) == 3: num_channels_prev = num_channels_prev[0] num_channels = self.struct_param[i][0] layer_type = self.struct_param[i][1] layer_settings = self.struct_param[i][2] if "layer_input_size" in layer_settings and isinstance(layer_settings["layer_input_size"], tuple): num_channels_prev = layer_settings["layer_input_size"][0] if layer_type == "Conv2d": layer = nn.Conv2d(num_channels_prev, num_channels, kernel_size = layer_settings["kernel_size"], stride = layer_settings["stride"] if "stride" in layer_settings else 1, padding = layer_settings["padding"] if "padding" in layer_settings else 0, dilation = layer_settings["dilation"] if "dilation" in layer_settings else 1, ) elif layer_type == "ConvTranspose2d": layer = nn.ConvTranspose2d(num_channels_prev, num_channels, kernel_size = layer_settings["kernel_size"], stride = layer_settings["stride"] if "stride" in layer_settings else 1, padding = layer_settings["padding"] if "padding" in layer_settings else 0, output_padding = layer_settings["output_padding"] if "output_padding" in layer_settings else 0, dilation = layer_settings["dilation"] if "dilation" in layer_settings else 1, ) elif layer_type == "Simple_Layer": layer = get_Layer(layer_type = layer_type, input_size = layer_settings["layer_input_size"], output_size = num_channels, W_init = W_init_list[i] if self.W_init_list is not None and self.W_init_list[i] is not None else None, b_init = b_init_list[i] if self.b_init_list is not None and self.b_init_list[i] is not None else None, settings = layer_settings, is_cuda = is_cuda, ) elif layer_type == "MaxPool2d": layer = nn.MaxPool2d(kernel_size = layer_settings["kernel_size"], stride = layer_settings["stride"] if "stride" in layer_settings else None, padding = layer_settings["padding"] if "padding" in layer_settings else 0, return_indices = layer_settings["return_indices"] if "return_indices" in layer_settings else False, ) elif layer_type == "MaxUnpool2d": layer = nn.MaxUnpool2d(kernel_size = layer_settings["kernel_size"], stride = layer_settings["stride"] if "stride" in layer_settings else None, padding = layer_settings["padding"] if "padding" in layer_settings else 0, ) elif layer_type == "Upsample": layer = nn.Upsample(scale_factor = layer_settings["scale_factor"], mode = layer_settings["mode"] if "mode" in layer_settings else "nearest", ) elif layer_type == "BatchNorm2d": layer = nn.BatchNorm2d(num_features = num_channels) elif layer_type == "Dropout2d": layer = nn.Dropout2d(p = 0.5) elif layer_type == "Flatten": layer = Flatten() else: raise Exception("layer_type {0} not recognized!".format(layer_type)) # Initialize using provided initial values: if self.W_init_list is not None and self.W_init_list[i] is not None and layer_type not in ["Simple_Layer"]: layer.weight.data = torch.FloatTensor(self.W_init_list[i]) layer.bias.data = torch.FloatTensor(self.b_init_list[i]) setattr(self, "layer_{0}".format(i), layer) self.set_cuda(is_cuda) def forward(self, input, indices_list = None, **kwargs): return self.inspect_operation(input, operation_between = (0, self.num_layers), indices_list = indices_list) def inspect_operation(self, input, operation_between, indices_list = None): output = input if indices_list is None: indices_list = [] start_layer, end_layer = operation_between if end_layer < 0: end_layer += self.num_layers for i in range(start_layer, end_layer): if "layer_input_size" in self.struct_param[i][2]: output_size_last = output.shape[0] layer_input_size = self.struct_param[i][2]["layer_input_size"] if not isinstance(layer_input_size, tuple): layer_input_size = (layer_input_size,) output = output.view(-1, *layer_input_size) assert output.shape[0] == output_size_last, "output_size reshaped to different length. Check shape!" if "Unpool" in self.struct_param[i][1]: output_tentative = getattr(self, "layer_{0}".format(i))(output, indices_list.pop(-1)) else: output_tentative = getattr(self, "layer_{0}".format(i))(output) if isinstance(output_tentative, tuple): output, indices = output_tentative indices_list.append(indices) else: output = output_tentative if "activation" in self.struct_param[i][2]: activation = self.struct_param[i][2]["activation"] else: if "activation" in self.settings: activation = self.settings["activation"] else: activation = "linear" if "Pool" in self.struct_param[i][1] or "Unpool" in self.struct_param[i][1] or "Upsample" in self.struct_param[i][1]: activation = "linear" output = get_activation(activation)(output) if self.return_indices: return output, indices_list else: return output def get_loss(self, input, target, criterion, **kwargs): y_pred = self(input, **kwargs) if self.return_indices: y_pred = y_pred[0] return criterion(y_pred, target) def get_regularization(self, source = ["weight", "bias"], mode = "L1", **kwargs): if not isinstance(source, list): source = [source] reg = Variable(torch.FloatTensor([0]), requires_grad = False) if self.is_cuda: reg = reg.cuda() for k in range(self.num_layers): if self.struct_param[k][1] not in self.param_available: continue layer = getattr(self, "layer_{0}".format(k)) for source_ele in source: if source_ele == "weight": if self.struct_param[k][1] not in ["Simple_Layer"]: item = layer.weight else: item = layer.W_core elif source_ele == "bias": if self.struct_param[k][1] not in ["Simple_Layer"]: item = layer.bias else: item = layer.b_core if mode == "L1": reg = reg + item.abs().sum() elif mode == "L2": reg = reg + (item ** 2).sum() else: raise Exception("mode {0} not recognized!".format(mode)) return reg def get_weights_bias(self, W_source = "core", b_source = "core"): W_list = [] b_list = [] for k in range(self.num_layers): if self.struct_param[k][1] == "Simple_Layer": layer = getattr(self, "layer_{0}".format(k)) if W_source == "core": W_list.append(to_np_array(layer.W_core)) if b_source == "core": b_list.append(to_np_array(layer.b_core)) elif self.struct_param[k][1] in self.param_available: layer = getattr(self, "layer_{0}".format(k)) if W_source == "core": W_list.append(to_np_array(layer.weight)) if b_source == "core": b_list.append(to_np_array(layer.bias, full_reduce = False)) else: if W_source == "core": W_list.append(None) if b_source == "core": b_list.append(None) return W_list, b_list @property def model_dict(self): model_dict = {"type": self.__class__.__name__} model_dict["net_type"] = self.__class__.__name__ model_dict["input_channels"] = self.input_channels model_dict["struct_param"] = self.struct_param model_dict["settings"] = self.settings model_dict["weights"], model_dict["bias"] = self.get_weights_bias(W_source = "core", b_source = "core") model_dict["return_indices"] = self.return_indices return model_dict @property def output_size(self): return self.struct_param[-1][0] @property def structure(self): structure = OrderedDict() structure["input_channels"] = self.input_channels structure["output_size"] = self.output_size structure["struct_param"] = self.struct_param if hasattr(self, "struct_param") else None return structure def get_sympy_expression(self, verbose=True): expressions = {i: None for i in range(self.num_layers)} return expressions def load(self, filename): mode = "json" if filename.endswith(".json") else "pickle" model_dict = load_model(filename, mode=mode) self.load_model_dict(model_dict) def save(self, filename): mode = "json" if filename.endswith(".json") else "pickle" save_model(self.model_dict, filename, mode=mode) def DL(self): DL = 0 for k in range(self.num_layers): layer_type = self.struct_param[k][1] if layer_type in self.param_available: layer = getattr(self, "layer_{0}".format(k)) if layer_type == "Simple_Layer": DL += layer.DL else: DL += get_list_DL(to_np_array(layer.weight), "non-snapped") DL += get_list_DL(to_np_array(layer.bias), "non-snapped") return DL def load_model_dict(self, model_dict): new_net = load_model_dict_net(model_dict, is_cuda = self.is_cuda) self.__dict__.update(new_net.__dict__) def prepare_inspection(self, X, y, **kwargs): pred_prob = self(X) if self.return_indices: pred_prob = pred_prob[0] pred = pred_prob.max(1)[1] # self.info_dict["accuracy"] = get_accuracy(pred, y) return deepcopy(self.info_dict) def set_cuda(self, is_cuda): if isinstance(is_cuda, str): self.cuda(is_cuda) else: if is_cuda: self.cuda() else: self.cpu() self.is_cuda = is_cuda def set_trainable(self, is_trainable): for k in range(self.num_layers): layer = getattr(self, "layer_{0}".format(k)) if self.struct_param[k][1] == "Simple_Layer": layer.set_trainable(is_trainable) elif self.struct_param[k][1] in self.param_available: for param in layer.parameters(): param.requires_grad = is_trainable class Conv_Model(nn.Module): def __init__( self, encoder_model_dict, core_model_dict, decoder_model_dict, latent_size = 2, is_generative = True, is_res_block = True, is_cuda = False, ): """Conv_Model consists of an encoder, a core and a decoder""" super(Conv_Model, self).__init__() self.latent_size = latent_size self.is_generative = is_generative if not is_generative: self.encoder = load_model_dict(encoder_model_dict, is_cuda = is_cuda) self.core = load_model_dict(core_model_dict, is_cuda = is_cuda) self.decoder = load_model_dict(decoder_model_dict, is_cuda = is_cuda) self.is_res_block = is_res_block self.is_cuda = is_cuda self.info_dict = {} @property def num_layers(self): if self.is_generative: return 1 else: return len(self.core.model_dict["struct_param"]) def forward( self, X, latent = None, **kwargs ): if self.is_generative: if len(latent.shape) == 1: latent = latent.repeat(len(X), 1) latent = self.core(latent) else: p_dict = {k: latent if k == 0 else None for k in range(self.num_layers)} latent = self.encoder(X) latent = self.core(latent, p_dict = p_dict) output = self.decoder(latent) if self.is_res_block: output = (X + nn.Sigmoid()(output)).clamp(0, 1) return output def forward_multistep(self, X, latents, isplot = False, num_images = 1): assert len(latents.shape) == 1 length = int(len(latents) / 2) output = X for i in range(length - 1): latent = latents[i * self.latent_size: (i + 2) * self.latent_size] output = self(output, latent = latent) if isplot: plot_matrices(output[:num_images,0]) return output def get_loss(self, X, y, criterion, **kwargs): return criterion(self(X = X[0], latent = X[1]), y) def plot(self, X, y, num_images = 1): y_pred = self(X[0], latent = X[1]) idx_list = np.random.choice(len(X[0]), num_images) for idx in idx_list: matrix = torch.cat([X[0][idx], y[idx], y_pred[idx]]) plot_matrices(matrix, images_per_row = 8) def get_regularization(self, source = ["weights", "bias"], mode = "L1"): if self.is_generative: return self.core.get_regularization(source = source, mode = mode) + self.decoder.get_regularization(source = source, mode = mode) else: return self.encoder.get_regularization(source = source, mode = mode) + self.core.get_regularization(source = source, mode = mode) + self.decoder.get_regularization(source = source, mode = mode) def prepare_inspection(self, X, y, **kwargs): return deepcopy(self.info_dict) def set_trainable(self, is_trainable): if not self.is_generative: self.encoder.set_trainable(is_trainable) self.core.set_trainable(is_trainable) self.decoder.set_trainable(is_trainable) @property def model_dict(self): model_dict = {"type": "Conv_Model"} if not self.is_generative: model_dict["encoder_model_dict"] = self.encoder.model_dict model_dict["latent_size"] = self.latent_size model_dict["core_model_dict"] = self.core.model_dict model_dict["decoder_model_dict"] = self.decoder.model_dict model_dict["is_generative"] = self.is_generative model_dict["is_res_block"] = self.is_res_block return model_dict def load(self, filename): mode = "json" if filename.endswith(".json") else "pickle" model_dict = load_model(filename, mode=mode) self.load_model_dict(model_dict) def save(self, filename): mode = "json" if filename.endswith(".json") else "pickle" save_model(self.model_dict, filename, mode=mode) class Conv_Autoencoder(nn.Module): def __init__( self, input_channels_encoder, input_channels_decoder, struct_param_encoder, struct_param_decoder, latent_size = (1,2), share_model_among_steps = False, settings = {}, is_cuda = False, ): """Conv_Autoencoder consists of an encoder and a decoder""" super(Conv_Autoencoder, self).__init__() self.input_channels_encoder = input_channels_encoder self.input_channels_decoder = input_channels_decoder self.struct_param_encoder = struct_param_encoder self.struct_param_decoder = struct_param_decoder self.share_model_among_steps = share_model_among_steps self.settings = settings self.encoder = ConvNet(input_channels = input_channels_encoder, struct_param = struct_param_encoder, settings = settings, is_cuda = is_cuda) self.decoder = ConvNet(input_channels = input_channels_decoder, struct_param = struct_param_decoder, settings = settings, is_cuda = is_cuda) self.is_cuda = is_cuda def encode(self, input): if self.share_model_among_steps: latent = [] for i in range(input.shape[1]): latent_step = self.encoder(input[:, i:i+1]) latent.append(latent_step) return torch.cat(latent, 1) else: return self.encoder(input) def decode(self, latent): if self.share_model_among_steps: latent_size = self.struct_param_encoder[-1][0] latent = latent.view(latent.size(0), -1, latent_size) output = [] for i in range(latent.shape[1]): output_step = self.decoder(latent[:, i].contiguous()) output.append(output_step) return torch.cat(output, 1) else: return self.decoder(latent) def set_trainable(self, is_trainable): self.encoder.set_trainable(is_trainable) self.decoder.set_trainable(is_trainable) def forward(self, input): return self.decode(self.encode(input)) def get_loss(self, input, target, criterion, **kwargs): return criterion(self(input), target) def get_regularization(self, source = ["weight", "bias"], mode = "L1"): return self.encoder.get_regularization(source = source, mode = mode) + self.decoder.get_regularization(source = source, mode = mode) @property def model_dict(self): model_dict = {"type": "Conv_Autoencoder"} model_dict["net_type"] = "Conv_Autoencoder" model_dict["input_channels_encoder"] = self.input_channels_encoder model_dict["input_channels_decoder"] = self.input_channels_decoder model_dict["struct_param_encoder"] = self.struct_param_encoder model_dict["struct_param_decoder"] = self.struct_param_decoder model_dict["share_model_among_steps"] = self.share_model_among_steps model_dict["settings"] = self.settings model_dict["encoder"] = self.encoder.model_dict model_dict["decoder"] = self.decoder.model_dict return model_dict def load_model_dict(self, model_dict): model = load_model_dict(model_dict, is_cuda = self.is_cuda) self.__dict__.update(model.__dict__) def load(self, filename): mode = "json" if filename.endswith(".json") else "pickle" model_dict = load_model(filename, mode=mode) self.load_model_dict(model_dict) def save(self, filename): mode = "json" if filename.endswith(".json") else "pickle" save_model(self.model_dict, filename, mode=mode) def DL(self): return self.encoder.DL + self.decoder.DL class Flatten(nn.Module): def __init__(self): super(Flatten, self).__init__() def forward(self, x): return x.view(x.size(0), -1) # ## VAE: # In[ ]: class VAE(nn.Module): def __init__( self, encoder_model_dict, decoder_model_dict, is_cuda = False, ): super(VAE, self).__init__() self.encoder = load_model_dict(encoder_model_dict, is_cuda = is_cuda) self.decoder = load_model_dict(decoder_model_dict, is_cuda = is_cuda) self.is_cuda = is_cuda self.info_dict = {} def encode(self, X): Z = self.encoder(X) latent_size = int(Z.shape[-1] / 2) mu = Z[..., :latent_size] logvar = Z[..., latent_size:] return mu, logvar def reparameterize(self, mu, logvar): std = torch.exp(0.5*logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) def decode(self, Z): return self.decoder(Z) def forward(self, X): mu, logvar = self.encode(X) Z = self.reparameterize(mu, logvar) return self.decode(Z), mu, logvar def get_loss(self, X, y = None, **kwargs): recon_X, mu, logvar = self(X) BCE = F.binary_cross_entropy(recon_X.view(recon_X.shape[0], -1), X.view(X.shape[0], -1), reduction='sum') # see Appendix B from VAE paper: # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 # https://arxiv.org/abs/1312.6114 # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) loss = (BCE + KLD) / len(X) self.info_dict["KLD"] = KLD.item() / len(X) self.info_dict["BCE"] = BCE.item() / len(X) return loss def model_dict(self): model_dict = {"type": "VAE"} model_dict["encoder_model_dict"] = self.encoder.model_dict model_dict["decoder_model_dict"] = self.decoder.model_dict return model_dict def load(self, filename): mode = "json" if filename.endswith(".json") else "pickle" model_dict = load_model(filename, mode=mode) self.load_model_dict(model_dict) def save(self, filename): mode = "json" if filename.endswith(".json") else "pickle" save_model(self.model_dict, filename, mode=mode) def get_regularization(self, source = ["weight", "bias"], mode = "L1"): return self.encoder.get_regularization(source = source, mode = mode) + self.decoder.get_regularization(source = source, mode = mode) def prepare_inspection(self, X, y, **kwargs): return deepcopy(self.info_dict) # ## Reparameterization toolkit: # In[ ]: class Net_reparam(nn.Module): """Module that uses reparameterization to take into two inputs and gets a scaler""" def __init__( self, model_dict, reparam_mode, is_cuda=False, ): super(Net_reparam, self).__init__() self.model = load_model_dict(model_dict, is_cuda=is_cuda) self.reparam_mode = reparam_mode def forward(self, X, Z, is_outer=False): """ Obtaining single value using reparameterization. Args: X shape: [Bx, ...] Z shape: [S, Bz, Z] is_outer: whether to use outer product to get a tensor with shape [S, Bz, Bx]. Returns: If is_outer==True, return log_prob of shape [S, Bz, Bx] If is_outer==False, return log_prob of shape [S, Bz] (where Bz=Bx) """ dist, _ = reparameterize(self.model, X, mode=self.reparam_mode) if is_outer: log_prob = dist.log_prob(Z[...,None,:]) else: log_prob = dist.log_prob(Z) if self.reparam_mode == 'diag': log_prob = log_prob.sum(-1) return log_prob def get_regularization(self, source = ["weight", "bias"], mode = "L1", **kwargs): return self.model.get_regularization(source=source, model=mode, **kwargs) def prepare_inspection(self, X, y, **kwargs): return {} @property def model_dict(self): model_dict = {"type": "Net_reparam"} model_dict["model"] = self.model.model_dict model_dict["reparam_mode"] = self.reparam_mode return model_dict def load(self, filename): mode = "json" if filename.endswith(".json") else "pickle" model_dict = load_model(filename, mode=mode) self.load_model_dict(model_dict) def save(self, filename): mode = "json" if filename.endswith(".json") else "pickle" save_model(self.model_dict, filename, mode=mode) def reparameterize(model, input, mode="full", size=None): if mode.startswith("diag"): if model is not None and model.__class__.__name__ == "Mixture_Model": return reparameterize_mixture_diagonal(model, input, mode=mode) else: return reparameterize_diagonal(model, input, mode=mode) elif mode == "full": return reparameterize_full(model, input, size=size) else: raise Exception("Mode {} is not valid!".format(mode)) def reparameterize_diagonal(model, input, mode): if model is not None: mean_logit = model(input) else: mean_logit = input if mode.startswith("diagg"): if isinstance(mean_logit, tuple): mean = mean_logit[0] else: mean = mean_logit std = torch.ones(mean.shape).to(mean.device) dist = Normal(mean, std) return dist, (mean, std) elif mode.startswith("diag"): if isinstance(mean_logit, tuple): mean_logit = mean_logit[0] size = int(mean_logit.size(-1) / 2) mean = mean_logit[:, :size] std = F.softplus(mean_logit[:, size:], beta=1) + 1e-10 dist = Normal(mean, std) return dist, (mean, std) else: raise Exception("mode {} is not valid!".format(mode)) def reparameterize_mixture_diagonal(model, input, mode): mean_logit, weight_logits = model(input) if mode.startswith("diagg"): mean_list = mean_logit scale_list = torch.ones(mean_list.shape).to(mean_list.device) else: size = int(mean_logit.size(-2) / 2) mean_list = mean_logit[:, :size] scale_list = F.softplus(mean_logit[:, size:], beta=1) + 0.01 # Avoid the std to go to 0 dist = Mixture_Gaussian_reparam(mean_list=mean_list, scale_list=scale_list, weight_logits=weight_logits, ) return dist, (mean_list, scale_list) def reparameterize_full(model, input, size=None): if model is not None: mean_logit = model(input) else: mean_logit = input if isinstance(mean_logit, tuple): mean_logit = mean_logit[0] if size is None: dim = mean_logit.size(-1) size = int((np.sqrt(9 + 8 * dim) - 3) / 2) mean = mean_logit[:, :size] scale_tril = fill_triangular(mean_logit[:, size:], size) scale_tril = matrix_diag_transform(scale_tril, F.softplus) dist = MultivariateNormal(mean, scale_tril = scale_tril) return dist, (mean, scale_tril) def sample(dist, n=None): """Sample n instances from distribution dist""" if n is None: return dist.rsample() else: return dist.rsample((n,)) # ## Probability models: # ### Mixture of Gaussian: # In[ ]: class Mixture_Gaussian(nn.Module): def __init__( self, num_components, dim, param_mode = "full", is_cuda = False, ): super(Mixture_Gaussian, self).__init__() self.num_components = num_components self.dim = dim self.param_mode = param_mode self.is_cuda = is_cuda self.device = torch.device(self.is_cuda if isinstance(self.is_cuda, str) else "cuda" if self.is_cuda else "cpu") self.info_dict = {} def initialize(self, model_dict = None, input = None, num_samples = 100, verbose = False): if input is not None: neg_log_prob_min = np.inf loc_init_min = None scale_init_min = None for i in range(num_samples): neg_log_prob, loc_init_list, scale_init_list = self.initialize_ele(input) if verbose: print("{0}: neg_log_prob: {1:.4f}".format(i, neg_log_prob)) if neg_log_prob < neg_log_prob_min: neg_log_prob_min = neg_log_prob loc_init_min = self.loc_list.detach() scale_init_min = self.scale_list.detach() self.loc_list = nn.Parameter(loc_init_min.to(self.device)) self.scale_list = nn.Parameter(scale_init_min.to(self.device)) print("min neg_log_prob: {0:.6f}".format(to_np_array(neg_log_prob_min))) else: if model_dict is None: self.weight_logits = nn.Parameter((torch.randn(self.num_components) * np.sqrt(2 / (1 + self.dim))).to(self.device)) else: self.weight_logits = nn.Parameter((torch.FloatTensor(model_dict["weight_logits"])).to(self.device)) if self.param_mode == "full": size = self.dim * (self.dim + 1) // 2 elif self.param_mode == "diag": size = self.dim else: raise if model_dict is None: self.loc_list = nn.Parameter(torch.randn(self.num_components, self.dim).to(self.device)) self.scale_list = nn.Parameter((torch.randn(self.num_components, size) / self.dim).to(self.device)) else: self.loc_list = nn.Parameter(torch.FloatTensor(model_dict["loc_list"]).to(self.device)) self.scale_list = nn.Parameter(torch.FloatTensor(model_dict["scale_list"]).to(self.device)) def initialize_ele(self, input): if self.param_mode == "full": size = self.dim * (self.dim + 1) // 2 elif self.param_mode == "diag": size = self.dim else: raise length = len(input) self.weight_logits = nn.Parameter(torch.zeros(self.num_components).to(self.device)) self.loc_list = nn.Parameter(input[torch.multinomial(torch.ones(length) / length, self.num_components)].detach()) self.scale_list = nn.Parameter((torch.randn(self.num_components, size).to(self.device) * input.std() / 5).to(self.device)) neg_log_prob = self.get_loss(input) return neg_log_prob def prob(self, input): if len(input.shape) == 1: input = input.unsqueeze(1) assert len(input.shape) in [0, 2, 3] input = input.unsqueeze(-2) if self.param_mode == "diag": scale_list = F.softplus(self.scale_list) logits = (- (input - self.loc_list) ** 2 / 2 / scale_list ** 2 - torch.log(scale_list * np.sqrt(2 * np.pi))).sum(-1) else: raise prob = torch.matmul(torch.exp(logits), nn.Softmax(dim = 0)(self.weight_logits)) # prob_list = [] # for i in range(self.num_components): # if self.param_mode == "full": # scale_tril = fill_triangular(getattr(self, "scale_{0}".format(i)), self.dim) # scale_tril = matrix_diag_transform(scale_tril, F.softplus) # dist = MultivariateNormal(getattr(self, "loc_{0}".format(i)), scale_tril = scale_tril) # log_prob = dist.log_prob(input) # elif self.param_mode == "diag": # dist = Normal(getattr(self, "loc_{0}".format(i)).unsqueeze(0), F.softplus(getattr(self, "scale_{0}".format(i)))) # mu = getattr(self, "loc_{0}".format(i)).unsqueeze(0) # sigma = F.softplus(getattr(self, "scale_{0}".format(i))) # log_prob = (- (input - mu) ** 2 / 2 / sigma ** 2 - torch.log(sigma * np.sqrt(2 * np.pi))).sum(-1) # else: # raise # setattr(self, "component_{0}".format(i), dist) # prob = torch.exp(log_prob) # prob_list.append(prob) # prob_list = torch.stack(prob_list, -1) # prob = torch.matmul(prob_list, nn.Softmax(dim = 0)(self.weight_logits)) return prob def log_prob(self, input): return torch.log(self.prob(input) + 1e-45) def get_loss(self, X, y = None, **kwargs): """Optimize negative log-likelihood""" neg_log_prob = - self.log_prob(X).mean() / np.log(2) self.info_dict["loss"] = to_np_array(neg_log_prob) return neg_log_prob def prepare_inspection(X, y, criterion, **kwargs): return deepcopy(self.info_dict) @property def model_dict(self): model_dict = {"type": "Mixture_Gaussian"} model_dict["num_components"] = self.num_components model_dict["dim"] = self.dim model_dict["param_mode"] = self.param_mode model_dict["weight_logits"] = to_np_array(self.weight_logits) model_dict["loc_list"] = to_np_array(self.loc_list) model_dict["scale_list"] = to_np_array(self.scale_list) return model_dict def load(self, filename): mode = "json" if filename.endswith(".json") else "pickle" model_dict = load_model(filename, mode=mode) self.load_model_dict(model_dict) def save(self, filename): mode = "json" if filename.endswith(".json") else "pickle" save_model(self.model_dict, filename, mode=mode) def get_param(self): weights = to_np_array(nn.Softmax(dim = 0)(self.weight_logits)) loc_list = to_np_array(self.loc_list) scale_list = to_np_array(self.scale_list) print("weights: {0}".format(weights)) print("loc:") pp.pprint(loc_list) print("scale:") pp.pprint(scale_list) return weights, loc_list, scale_list def visualize(self, input): import scipy import matplotlib.pylab as plt std = to_np_array(input.std()) X = np.arange(to_np_array(input.min()) - 0.2 * std, to_np_array(input.max()) + 0.2 * std, 0.1) Y_dict = {} weights = nn.Softmax(dim = 0)(self.weight_logits) plt.figure(figsize=(10, 4), dpi=100).set_facecolor('white') for i in range(self.num_components): Y_dict[i] = weights[0].item() * scipy.stats.norm.pdf((X - self.loc_list[i].item()) / self.scale_list[i].item()) plt.plot(X, Y_dict[i]) Y = np.sum([item for item in Y_dict.values()], 0) plt.plot(X, Y, 'k--') plt.plot(input.data.numpy(), np.zeros(len(input)), 'k*') plt.title('Density of {0}-component mixture model'.format(self.num_components)) plt.ylabel('probability density'); def get_regularization(self, source = ["weights", "bias"], mode = "L1", **kwargs): reg = to_Variable([0], requires_grad = False).to(self.device) return reg # ### Mixture_Gaussian for reparameterization: # In[ ]: class Mixture_Gaussian_reparam(nn.Module): def __init__( self, # Use as reparamerization: mean_list=None, scale_list=None, weight_logits=None, # Use as prior: Z_size=None, n_components=None, mean_scale=0.1, scale_scale=0.1, # Mode: is_reparam=True, reparam_mode="diag", device= torch.device("cpu"), ): super(Mixture_Gaussian_reparam, self).__init__() self.is_reparam = is_reparam self.reparam_mode = reparam_mode # self.is_cuda = is_cuda self.device = device if self.is_reparam: self.mean_list = mean_list # size: [B, Z, k] self.scale_list = scale_list # size: [B, Z, k] self.weight_logits = weight_logits # size: [B, k] self.n_components = self.weight_logits.shape[-1] self.Z_size = self.mean_list.shape[-2] else: self.n_components = n_components self.Z_size = Z_size self.mean_list = nn.Parameter((torch.rand(1, Z_size, n_components) - 0.5) * mean_scale) self.scale_list = nn.Parameter(torch.log(torch.exp((torch.rand(1, Z_size, n_components) * 0.2 + 0.9) * scale_scale) - 1)) self.weight_logits = nn.Parameter(torch.zeros(1, n_components)) if mean_list is not None: self.mean_list.data = to_Variable(mean_list) self.scale_list.data = to_Variable(scale_list) self.weight_logits.data = to_Variable(weight_logits) self.to(self.device) def log_prob(self, input): """Obtain the log_prob of the input.""" input = input.unsqueeze(-1) # [S, B, Z, 1] if self.reparam_mode == "diag": if self.is_reparam: # logits: [S, B, Z, k] logits = - (input - self.mean_list) ** 2 / 2 / self.scale_list ** 2 - torch.log(self.scale_list * np.sqrt(2 * np.pi)) else: scale_list = F.softplus(self.scale_list, beta=1) logits = - (input - self.mean_list) ** 2 / 2 / scale_list ** 2 - torch.log(scale_list * np.sqrt(2 * np.pi)) else: raise # log_softmax(weight_logits): [B, k] # logits: [S, B, Z, k] # log_prob: [S, B, Z] log_prob = torch.logsumexp(logits + F.log_softmax(self.weight_logits, -1).unsqueeze(-2), axis=-1) # F(...).unsqueeze(-2): [B, 1, k] return log_prob def prob(self, Z): return torch.exp(self.log_prob(Z)) def sample(self, n=None): if n is None: n_core = 1 else: assert isinstance(n, tuple) n_core = n[0] weight_probs = F.softmax(self.weight_logits, -1) # size: [B, m] idx = torch.multinomial(weight_probs, n_core, replacement=True).unsqueeze(-2).expand(-1, self.mean_list.shape[-2], -1) # multinomial result: [B, S]; result: [B, Z, S] mean_list = torch.gather(self.mean_list, dim=-1, index=idx) # [B, Z, S] if self.is_reparam: scale_list = torch.gather(self.scale_list, dim=-1, index=idx) # [B, Z, S] else: scale_list = F.softplus(torch.gather(self.scale_list, dim=-1, index=idx), beta=1) # [B, Z, S] Z = torch.normal(mean_list, scale_list).permute(2, 0, 1) if n is None: Z = Z.squeeze(0) return Z def rsample(self, n=None): return self.sample(n=n) def __repr__(self): return "Mixture_Gaussian_reparam({}, Z_size={})".format(self.n_components, self.Z_size) @property def model_dict(self): model_dict = {"type": "Mixture_Gaussian_reparam"} model_dict["is_reparam"] = self.is_reparam model_dict["reparam_mode"] = self.reparam_mode model_dict["Z_size"] = self.Z_size model_dict["n_components"] = self.n_components model_dict["mean_list"] = to_np_array(self.mean_list) model_dict["scale_list"] = to_np_array(self.scale_list) model_dict["weight_logits"] = to_np_array(self.weight_logits) return model_dict # ### Triangular distribution: # In[ ]: class Triangular_dist(Distribution): """Probability distribution with a Triangular shape.""" def __init__(self, loc, a, b, validate_args=None): self.loc, self.a, self.b = broadcast_all(loc, a, b) batch_shape = torch.Size() if isinstance(loc, Number) else self.loc.size() super(Triangular_dist, self).__init__(batch_shape, validate_args=validate_args) @property def mean(self): return self.loc + (self.b - self.a) / 3 @property def variance(self): return (self.a ** 2 + self.b ** 2 + self.a * self.b) / 18 @property def stddev(self): return torch.sqrt(self.variance) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(PieceWise, _instance) batch_shape = torch.Size(batch_shape) new.loc = self.loc.expand(batch_shape) new.a = self.a.expand(batch_shape) new.b = self.b.expand(batch_shape) super(Triangular_dist, new).__init__(batch_shape, validate_args=False) new._validate_args = self._validate_args return new @constraints.dependent_property def support(self): return constraints.interval(self.loc - self.a, self.loc + self.b) def sample(self, sample_shape=torch.Size()): shape = self._extended_shape(sample_shape) rand = torch.rand(shape, dtype=self.loc.dtype, device=self.loc.device) with torch.no_grad(): return self.icdf(rand) def rsample(self, sample_shape=torch.Size()): """Sample with reparameterization.""" shape = self._extended_shape(sample_shape) rand = torch.rand(shape, dtype=self.loc.dtype, device=self.loc.device) return self.icdf(rand) def icdf(self, value): """Inverse cdf.""" if self._validate_args: self._validate_sample(value) assert value.min() >= 0 and value.max() <= 1 value, loc, a, b = broadcast_all(value, self.loc, self.a, self.b) a_plus_b = a + b idx = value < a / a_plus_b iidx = ~idx out = torch.ones_like(value) out[idx] = loc[idx] - a[idx] + torch.sqrt(a[idx] * a_plus_b[idx] * value[idx]) out[iidx] = loc[iidx] + b[iidx] - torch.sqrt(b[iidx] * a_plus_b[iidx] * (1 - value[iidx]) ) return out def prob(self, value): """Get probability.""" if self._validate_args: self._validate_sample(value) # compute the variance value, loc, a, b = broadcast_all(value, self.loc, self.a, self.b) idx1 = (loc - a <= value) & (value <= loc) idx2 = (loc < value) & (value <= loc + b) a_plus_b = a + b out = torch.zeros_like(value) out[idx1] = 2 * (value[idx1] - loc[idx1] + a[idx1]) / a[idx1] / a_plus_b[idx1] out[idx2] = -2 * (value[idx2] - loc[idx2] - b[idx2]) / b[idx2] / a_plus_b[idx2] return out def log_prob(self, value): """Get log probability.""" return torch.log(self.prob(value)) @property def model_dict(self): model_dict = {"type": "Triangular_dist"} model_dict["loc"] = to_np_array(self.loc) model_dict["a"] = to_np_array(self.a) model_dict["b"] = to_np_array(self.b) return model_dict def load(self, filename): mode = "json" if filename.endswith(".json") else "pickle" model_dict = load_model(filename, mode=mode) self.load_model_dict(model_dict) def save(self, filename): mode = "json" if filename.endswith(".json") else "pickle" save_model(self.model_dict, filename, mode=mode) # In[ ]: def load_model_dict_distribution(model_dict, is_cuda = False): if model_dict["type"] == "Mixture_Gaussian": model = Mixture_Gaussian( num_components=model_dict["num_components"], dim=model_dict["dim"], param_mode=model_dict["param_mode"], is_cuda=is_cuda, ) model.initialize(model_dict = model_dict) elif model_dict["type"] == "Mixture_Gaussian_reparam": model = Mixture_Gaussian_reparam( is_reparam=model_dict["is_reparam"], reparam_mode=model_dict["reparam_mode"], mean_list=model_dict["mean_list"], scale_list=model_dict["scale_list"], weight_logits=model_dict["weight_logits"], Z_size=model_dict["Z_size"], n_components=model_dict["n_components"], is_cuda=is_cuda, ) elif model_dict["type"] == "Triangular_dist": model = Triangular_dist( loc=model_dict["loc"], a=model_dict["a"], b=model_dict["b"], ) else: raise Exception("Type {} is not valid!".format(model_dict["type"])) return model
208,307
46.428962
300
py
theedhum-nandrum
theedhum-nandrum-master/src/__init__.py
""" Package Initialization file. """ import os import logging from logging import StreamHandler from logging.handlers import RotatingFileHandler # Create the Handler for logging data to a file logger_handler = RotatingFileHandler(os.path.join(os.path.dirname(__file__), '../logs/tn.log'), maxBytes=1024, backupCount=5) logger_handler.setLevel(logging.INFO) #Create the Handler for logging data to console. console_handler = StreamHandler() console_handler.setLevel(logging.INFO) # Create a Formatter for formatting the log messages logger_formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s') # Add the Formatter to the Handler logger_handler.setFormatter(logger_formatter) console_handler.setFormatter(logger_formatter) root_logger = logging.getLogger() root_logger.setLevel(logging.INFO) root_logger.addHandler(logger_handler) root_logger.addHandler(console_handler)
892
33.346154
125
py
theedhum-nandrum
theedhum-nandrum-master/src/playground/classify.py
# Load and prepare the dataset import nltk from nltk.corpus import movie_reviews from nltk.util import ngrams import random import sys import re from emoji import UNICODE_EMOJI from bisect import bisect_left import math from sklearn.metrics import classification_report from nltk.classify.scikitlearn import SklearnClassifier from sklearn.naive_bayes import MultinomialNB,BernoulliNB from sklearn.linear_model import LogisticRegression,SGDClassifier from sklearn.svm import SVC, LinearSVC, NuSVC # Appeding our src directory to sys path so that we can import modules. sys.path.append('../..') from src.tn.lib.sentimoji import get_emoji_sentiment_rank nltk.download('movie_reviews') #nltk_documents = [(list(movie_reviews.words(fileid)), category) # for category in movie_reviews.categories() # for fileid in movie_reviews.fileids(category)] def load_docs(source): documents = [] with open(source, 'r', encoding='utf-8') as inf: # skipping header row next(inf) for line in inf: (review, cat) = re.split('\t', line.strip()) words = review.split() document = (list(words), cat) documents.append(document) # Commenting this because this might bias the distribution # if cat != 'Positive': # for i in range(5): # # oversampling to correct bias # documents.append(document) return documents # Define the feature extractor def document_features(document, feature_sets): document_words = set(document) # TODO: use bigrams in both training and testing # document_bigrams = set(list(nltk.bigrams(document))) features = {} if ('bag_of_words' in feature_sets): document_bag_of_words_feature(document_words, features) if ('emojis' in feature_sets): document_emoji_feature(document_words, features) if ('length' in feature_sets): document_length_feature(document_words, features) if ('ngram' in feature_sets): for size in feature_sets['ngram']: document_ngram_feature(document, features, size) return(features) def get_bag_of_all_words(): if not hasattr(get_bag_of_all_words, "bag_of_words"): get_bag_of_all_words.bag_of_words = {} imdb_words = list(nltk.FreqDist(w.lower() for w in movie_reviews.words()))[:1000] training_words = nltk.FreqDist(w.lower() for d in training_documents for w in d[0]) training_words = list(training_words)[:3000] all_words = imdb_words + training_words word_features = all_words for word in word_features: get_bag_of_all_words.bag_of_words['contains({})'.format( word)] = (False) return get_bag_of_all_words.bag_of_words # The bag of Words Feature Classifier. Marks occurance of words from the universal # dictonary def document_bag_of_words_feature(document_words, features): bag_of_words = get_bag_of_all_words() features.update(bag_of_words) for word in document_words: features['contains({})'.format(word)] = (True) def get_all_emojis(): if not hasattr(get_all_emojis, "all_emojis"): get_all_emojis.all_emojis = {} for c in UNICODE_EMOJI: get_all_emojis.all_emojis['has-emoji({})'.format(c)] = (False) return get_all_emojis.all_emojis # The emoji feature classifier def document_emoji_feature(document_words, features): all_emojis = get_all_emojis() features.update(all_emojis) allchars = set(''.join(document_words)) score = 0.0 for c in allchars: features['has-emoji({})'.format(c)] = (True) sentiment = get_emoji_sentiment_rank(c) if sentiment is not False: score += sentiment['sentiment_score'] features['emoji-positive'] = (False) features['emoji-negative'] = (False) features['emoji-neutral'] = (False) if score > 0.2: features['emoji-positive'] = (True) elif score < -0.2: features['emoji-negative'] = (True) else: features['emoji-neutral'] = (True) def document_length_feature(document_words, features): features['word-count'] = len(document_words) # doclen = sum(len(word) for word in document_words) # features['doc-length'] = get_range(doclen) # features['avg-word-length'] = int(round(features['doc-length']/len(document_words))) def get_range(doclen): ranges = ["1-10", "11-20", "21-30", "31-40", "41-50", "51-60", "61-70", "71-80", "81-90", "91-100", "101-110", "111-120", "121-130", "131-140", "141-150", "151-160", "161-170", "171-180", "181-190", "191-200", ">200"] breakpoints = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, math.inf] index = bisect_left(breakpoints, doclen) return ranges[index] # Similar to bag of words filter, but for N grams def get_all_ngrams(n): if not hasattr(get_all_ngrams, "all_ngrams"): get_all_ngrams.all_ngrams = {} imdb_ngrams = list(ngrams(movie_reviews.words(), n))[:1000] training_ngrams = [] for d in training_documents: training_ngrams.extend(ngrams(d[0], n)) training_ngrams = training_ngrams[:3000] total_ngrams = imdb_ngrams + training_ngrams for ngram in total_ngrams: get_all_ngrams.all_ngrams['contains({})'.format( "-".join(ngram))] = (False) return get_all_ngrams.all_ngrams def document_ngram_feature(doc, features, n): all_ngrams = get_all_ngrams(n) doc_ngrams = list(ngrams(doc, n)) features.update(all_ngrams) for ngram in doc_ngrams: features['contains({})'.format("-".join(ngram))] = (True) # Output classification in sklearn report format - # https://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.html # The inputset is the documents and not the document features def get_classifier_metrics_report(classifier, inputset, features): refset, guesset= [], [] for (d,c) in inputset: refset.append(c) guesset.append(classifier.classify(document_features(d, features))) return classification_report(refset, guesset) training_documents = load_docs("../../resources/data/tamil_train.tsv") testing_documents = load_docs("../../resources/data/tamil_dev.tsv") # random.shuffle(documents) # test_size = int(len(documents)/20.0) feature_filters = [{'length': 1}, {'bag_of_words': 1}, {'length': 1, 'ngram': [5]}, {'length': 1, 'ngram': [4]}, {'emojis': 1}, {'emojis': 1, 'ngram': [2, 3, 4]}, {'bag_of_words': 1, 'ngram': [2, 3, 4], 'length': 1, 'emojis': 1}] # feature_filters = [{'length': 1}, {'bag_of_words': 1}] for filter in feature_filters: # Train Naive Bayes classifier train_set = [ (document_features(d, filter), c) for (d, c) in training_documents] test_set = testing_documents[2000:] # classifier = nltk.NaiveBayesClassifier.train(train_set) print(filter) NB_classifier = nltk.NaiveBayesClassifier.train(train_set) report = get_classifier_metrics_report(NB_classifier, test_set, filter) print("Classification report for NaiveBayesian classifier %s\n" % (report)) MNB_classifier = SklearnClassifier(MultinomialNB()) MNB_classifier.train(train_set) report = get_classifier_metrics_report(MNB_classifier, test_set, filter) print("Classification report for MNB classifier %s\n" % (report)) BNB_classifier = SklearnClassifier(BernoulliNB()) BNB_classifier.train(train_set) report = get_classifier_metrics_report(BNB_classifier, test_set, filter) print("Classification report for BNB classifier %s\n" % (report)) LogisticRegression_classifier = SklearnClassifier(LogisticRegression()) LogisticRegression_classifier.train(train_set) report = get_classifier_metrics_report(LogisticRegression_classifier, test_set, filter) print("Classification report for LR classifier %s\n" % (report)) SGDClassifier_classifier = SklearnClassifier(SGDClassifier()) SGDClassifier_classifier.train(train_set) report = get_classifier_metrics_report(SGDClassifier_classifier, test_set, filter) print("Classification report for SGD classifier %s\n" % (report)) SVC_classifier = SklearnClassifier(SVC()) SVC_classifier.train(train_set) report = get_classifier_metrics_report(SVC_classifier, test_set, filter) print("Classification report for SVC classifier %s\n" % (report)) LinearSVC_classifier = SklearnClassifier(LinearSVC()) LinearSVC_classifier.train(train_set) report = get_classifier_metrics_report(LinearSVC_classifier, test_set, filter) print("Classification report for LSVC classifier %s\n" % (report)) # Test the classifier # print("{} -> {}". format(str(filter), # nltk.classify.accuracy(classifier, test_set))) # Classify a few docs and check # for(d, c) in documents[:100]: # guess = classifier.classify(document_features( # d, {'length' : 1 ,'ngram': 4})) # if(guess != c): # print('Got It Wrong correct={} guess={} comment={}'.format( # c, guess, ' '.join(d))) # else: # print('Got It Right guess={} comment={}'.format( # guess, ' '.join(d).strip()))
9,391
38.79661
147
py
theedhum-nandrum
theedhum-nandrum-master/src/playground/emoji_sentiment.py
import linecache import sys import emoji import re import csv from collections import Counter # Appeding our src directory to sys path so that we can import modules. sys.path.append('../..') from src.tn.lib.sentimoji import get_emoji_sentiment_rank def extract_emojis(s): return [c for c in s if c in emoji.UNICODE_EMOJI] matchedFn = "../../resources/data/matched_emojis.txt" unmatchedFn = "../../resources/data/unmatched_emojis.txt" matched = Counter() unmatched = Counter() occurences = Counter() # Get the list of unmatched emojis and put it in a file so that we can process it. fileName = "../../resources/data/all_records.tsv" with open (fileName, "r", encoding="UTF-8") as mainFile, open(matchedFn, "w", encoding="UTF-8") as matchedFile, open(unmatchedFn, "w", encoding="UTF-8") as unmatchedFile: readTsv = csv.reader(mainFile, delimiter="\t") for row in readTsv: if len(row) == 2: txt, emotion = row emojiji = extract_emojis(txt) for em in emojiji: sentiment = get_emoji_sentiment_rank(em) if sentiment == False: unmatched[(em,emotion)] += 1 occurences[em] += 1 else: matched[(em,emotion)] += sentiment['sentiment_score'] for em, tot in occurences.items(): pos = unmatched[(em, 'Positive')] neg = unmatched[(em, 'Negative')] neu = tot - (pos +neg) assert(neu>=0) unmatchedFile.write(",".join((em,'-',str(tot),'1',str(neg),str(neu),str(pos),'-','Unknown')) + '\n') matchedFile.writelines("\n".join([elem[0] + ',' + elem[1] + ',' + str(cnt) for elem, cnt in matched.items()])) #unmatchedFile.writelines("\n".join([elem[0] + ',' + elem[1] + ',' + str(cnt) for elem, cnt in unmatched.items()])) sys.exit() ''' lineNum = 626 line = linecache.getline(fileName, lineNum) text = line.split("\t")[0].strip() emojiji = extract_emojis(text) for em in emojiji: sentiment = get_emoji_sentiment_rank(em) print (sentiment) '''
2,049
36.962963
170
py
theedhum-nandrum
theedhum-nandrum-master/src/playground/collect_emojis.py
''' @author mojosaurus This script scrapes all the files under ../resources/data/*.tsv, collects emojis and checks which of these emojis do we have sentimant analysis for by src.tn.lib.sentimoji. Output of the script is two files - ../../resources/data/matched_emojis.txt and ../../resources/data/unmatched_emojis.txt ''' import linecache import sys import emoji import csv # Appeding our src directory to sys path so that we can import modules. sys.path.append('../..') from src.tn.lib.sentimoji import get_emoji_sentiment_rank def extract_emojis(s): return [c for c in s if c in emoji.UNICODE_EMOJI] files = [ '../../resources/data/tamil_dev.tsv', '../../resources/data/tamil_train.tsv', '../../resources/data/tamil_trial.tsv', '../../resources/data/malayalam_dev.tsv', '../../resources/data/malayalam_train.tsv', '../../resources/data/malayalam_trial.tsv', ] matchedFn = "../../resources/data/matched_emojis.txt" unmatchedFn = "../../resources/data/unmatched_emojis.txt" matched = [] unmatched = [] with open(matchedFn, "w", encoding="UTF-8") as matchedFile, open(unmatchedFn, "w", encoding="UTF-8") as unmatchedFile: for fn in files: with open(fn, "r", encoding="utf-8") as mainFile: readTsv = csv.reader(mainFile, delimiter="\t") for row in readTsv: txt, emotion = row emojiji = extract_emojis(txt) for em in emojiji: sentiment = get_emoji_sentiment_rank(em) if sentiment == False: if [em, emotion] not in unmatched: unmatched.append([em, emotion]) else: if [em, emotion] not in matched: matched.append([em, emotion]) matchedFile.writelines("\n".join([",".join(mat) for mat in matched])) unmatchedFile.writelines("\n".join([",".join(unmat) for unmat in unmatched]))
1,963
37.509804
121
py
theedhum-nandrum
theedhum-nandrum-master/src/playground/test_cld2.py
import cld2 import linecache import sys fileName = "resources/data/tamil_train.tsv" lineNum = 11106 # Russian lineNum = 11046 # tamil lineNum = 8423 # telugu lineNum = 7922 # tamil #lineNum = 7787 # telugu #lineNum = 7607 # telugu lineNum = 570 # kannada lineNum = 611 # kannada line = linecache.getline(fileName, lineNum) text = line.split("\t")[0].strip().encode("utf-8") print(len(text)) isReliable, textBytesFound, details, vectors = cld2.detect(text, returnVectors=True) print(' reliable: %s' % (isReliable != 0)) print(' textBytes: %s' % textBytesFound) print(' details: %s' % str(details)) i=0 for vector in vectors: print ("*************") #print (vector) print (details[i]) start = vector[0] end = vector[1] print ("Start : {}, end : {}".format(start, start+end)) print (vector) print (text[start:start+end].decode("utf-8")) i += 1 print ("*************")
910
25.794118
84
py
theedhum-nandrum
theedhum-nandrum-master/src/playground/plot_document_classification.py
#!/usr/bin/env python # coding: utf-8 # Adapted the original for our requirement. # # # Classification of text documents using sparse features # # # This is an example showing how scikit-learn can be used to classify documents # by topics using a bag-of-words approach. This example uses a scipy.sparse # matrix to store the features and demonstrates various classifiers that can # efficiently handle sparse matrices. # # The dataset used in this example is the 20 newsgroups dataset. It will be # automatically downloaded, then cached. # # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com> # Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # Lars Buitinck # License: BSD 3 clause import logging import numpy as np from optparse import OptionParser import sys import re from time import time import matplotlib.pyplot as plt from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer from sklearn.feature_selection import SelectFromModel from sklearn.feature_selection import SelectKBest, chi2 from sklearn.linear_model import RidgeClassifier from sklearn.pipeline import Pipeline from sklearn.svm import LinearSVC from sklearn.linear_model import SGDClassifier from sklearn.linear_model import Perceptron from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.naive_bayes import BernoulliNB, ComplementNB, MultinomialNB from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import NearestCentroid from sklearn.ensemble import RandomForestClassifier from sklearn.utils.extmath import density from sklearn import metrics from src.playground.feature_utils import load_docs # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') op = OptionParser() op.add_option("--report", action="store_true", dest="print_report", help="Print a detailed classification report.") op.add_option("--chi2_select", action="store", type="int", dest="select_chi2", help="Select some number of features using a chi-squared test") op.add_option("--confusion_matrix", action="store_true", dest="print_cm", help="Print the confusion matrix.") op.add_option("--top10", action="store_true", dest="print_top10", help="Print ten most discriminative terms per class" " for every classifier.") op.add_option("--all_categories", action="store_true", dest="all_categories", help="Whether to use all categories or not.") op.add_option("--use_hashing", action="store_true", help="Use a hashing vectorizer.") op.add_option("--n_features", action="store", type=int, default=2 ** 16, help="n_features when using the hashing vectorizer.") op.add_option("--filtered", action="store_true", help="Remove newsgroup information that is easily overfit: " "headers, signatures, and quoting.") def is_interactive(): return not hasattr(sys.modules['__main__'], '__file__') # work-around for Jupyter notebook and IPython console argv = [] if is_interactive() else sys.argv[1:] (opts, args) = op.parse_args(argv) if len(args) > 0: op.error("this script takes no arguments.") sys.exit(1) print(__doc__) op.print_help() print() # Load data from the training set # ------------------------------------ data_train = load_docs("../../resources/data/tamil_train.tsv") data_test = load_docs("../../resources/data/tamil_dev.tsv") print(data_train['data'][5]) print('data loaded') # order of labels in `target_names` can be different from `categories` target_names = data_train['target_names'] def size_mb(docs): return sum(len(s.encode('utf-8')) for s in docs) / 1e6 data_train_size_mb = size_mb(data_train['data']) data_test_size_mb = size_mb(data_test['data']) print("%d documents - %0.3fMB (training set)" % ( len(data_train['data']), data_train_size_mb)) print("%d documents - %0.3fMB (test set)" % ( len(data_test['data']), data_test_size_mb)) print("%d categories" % len(target_names)) print() # split a training set and a test set y_train, y_test = data_train['target_names'], data_test['target_names'] print("Extracting features from the training data using a sparse vectorizer") t0 = time() if opts.use_hashing: vectorizer = HashingVectorizer(stop_words='english', alternate_sign=False, n_features=opts.n_features) X_train = vectorizer.transform(data_train['data']) else: vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english') X_train = vectorizer.fit_transform(data_train['data']) duration = time() - t0 print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration)) print("n_samples: %d, n_features: %d" % X_train.shape) print() print("Extracting features from the test data using the same vectorizer") t0 = time() X_test = vectorizer.transform(data_test['data']) duration = time() - t0 print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration)) print("n_samples: %d, n_features: %d" % X_test.shape) print() # mapping from integer feature name to original token string if opts.use_hashing: feature_names = None else: feature_names = vectorizer.get_feature_names() if opts.select_chi2: print("Extracting %d best features by a chi-squared test" % opts.select_chi2) t0 = time() ch2 = SelectKBest(chi2, k=opts.select_chi2) X_train = ch2.fit_transform(X_train, y_train) X_test = ch2.transform(X_test) if feature_names: # keep selected feature names feature_names = [feature_names[i] for i in ch2.get_support(indices=True)] print("done in %fs" % (time() - t0)) print() if feature_names: feature_names = np.asarray(feature_names) def trim(s): """Trim string to fit on terminal (assuming 80-column display)""" return s if len(s) <= 80 else s[:77] + "..." # Benchmark classifiers # ------------------------------------ # We train and test the datasets with 15 different classification models # and get performance results for each model. # # def benchmark(clf): print('_' * 80) print("Training: ") print(clf) t0 = time() clf.fit(X_train, y_train) train_time = time() - t0 print("train time: %0.3fs" % train_time) t0 = time() pred = clf.predict(X_test) test_time = time() - t0 print("test time: %0.3fs" % test_time) score = metrics.accuracy_score(y_test, pred) print("accuracy: %0.3f" % score) if hasattr(clf, 'coef_'): print("dimensionality: %d" % clf.coef_.shape[1]) print("density: %f" % density(clf.coef_)) if opts.print_top10 and feature_names is not None: print("top 10 keywords per class:") for i, label in enumerate(target_names): top10 = np.argsort(clf.coef_[i])[-10:] print(trim("%s: %s" % (label, " ".join(feature_names[top10])))) print() if opts.print_report: print("classification report:") print(metrics.classification_report(y_test, pred, target_names=target_names)) if opts.print_cm: print("confusion matrix:") print(metrics.confusion_matrix(y_test, pred)) print() clf_descr = str(clf).split('(')[0] return clf_descr, score, train_time, test_time results = [] for clf, name in ( (RidgeClassifier(tol=1e-2, solver="sag"), "Ridge Classifier"), (Perceptron(max_iter=50), "Perceptron"), (PassiveAggressiveClassifier(max_iter=50), "Passive-Aggressive"), (KNeighborsClassifier(n_neighbors=10), "kNN"), (RandomForestClassifier(), "Random forest")): print('=' * 80) print(name) results.append(benchmark(clf)) for penalty in ["l2", "l1"]: print('=' * 80) print("%s penalty" % penalty.upper()) # Train Liblinear model results.append(benchmark(LinearSVC(penalty=penalty, dual=False, tol=1e-3))) # Train SGD model results.append(benchmark(SGDClassifier(alpha=.0001, max_iter=50, penalty=penalty))) # Train SGD with Elastic Net penalty print('=' * 80) print("Elastic-Net penalty") results.append(benchmark(SGDClassifier(alpha=.0001, max_iter=50, penalty="elasticnet"))) # Train NearestCentroid without threshold print('=' * 80) print("NearestCentroid (aka Rocchio classifier)") results.append(benchmark(NearestCentroid())) # Train sparse Naive Bayes classifiers print('=' * 80) print("Naive Bayes") results.append(benchmark(MultinomialNB(alpha=.01))) results.append(benchmark(BernoulliNB(alpha=.01))) results.append(benchmark(ComplementNB(alpha=.1))) print('=' * 80) print("LinearSVC with L1-based feature selection") # The smaller C, the stronger the regularization. # The more regularization, the more sparsity. results.append(benchmark(Pipeline([ ('feature_selection', SelectFromModel(LinearSVC(penalty="l1", dual=False, tol=1e-3))), ('classification', LinearSVC(penalty="l2"))]))) # Add plots # ------------------------------------ # The bar plot indicates the accuracy, training time (normalized) and test time # (normalized) of each classifier. # # indices = np.arange(len(results)) results = [[x[i] for x in results] for i in range(4)] clf_names, score, training_time, test_time = results training_time = np.array(training_time) / np.max(training_time) test_time = np.array(test_time) / np.max(test_time) plt.figure(figsize=(12, 8)) plt.title("Score") plt.barh(indices, score, .2, label="score", color='navy') plt.barh(indices + .3, training_time, .2, label="training time", color='c') plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange') plt.yticks(()) plt.legend(loc='best') plt.subplots_adjust(left=.25) plt.subplots_adjust(top=.95) plt.subplots_adjust(bottom=.05) for i, c in zip(indices, clf_names): plt.text(-.3, i, c) plt.show() # In[ ]:
10,396
31.28882
79
py
theedhum-nandrum
theedhum-nandrum-master/src/tn/sentiment_classifier.py
""" @author sanjeethr, oligoglot Implements SGDClassifier using FeatureUnions for Sentiment Classification of text It also has code to experiment with hyper tuning parameters of the classifier """ from __future__ import print_function import numpy as np import pickle import json from pprint import pprint from time import time import sys, os from sklearn.base import BaseEstimator, TransformerMixin from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction import DictVectorizer from sklearn.feature_extraction.text import TfidfVectorizer, HashingVectorizer, CountVectorizer from sklearn.metrics import classification_report from sklearn.pipeline import FeatureUnion from sklearn.pipeline import Pipeline from sklearn.svm import SVC from sklearn.linear_model import SGDClassifier from sklearn.model_selection import RandomizedSearchCV from scipy.stats import uniform from sklearn.model_selection import GridSearchCV from libindic.soundex import Soundex from lib.feature_utils import load_docs, get_emojis_from_text, get_doc_len_range sys.path.append(os.path.join(os.path.dirname(sys.path[0]),'extern', 'indic_nlp_library')) from indicnlp.normalize.indic_normalize import BaseNormalizer sys.path.append(os.path.join(os.path.dirname(sys.path[0]),'extern')) import deepchar sys.path.append(os.path.join(os.path.dirname(sys.path[0]),'extern', 'solthiruthi-sothanaikal')) from symspellpy import SymSpell, Verbosity try: from indictrans import Transliterator except ImportError: print('Please install indic-trans from git: https://github.com/libindic/indic-trans') class ItemSelector(BaseEstimator, TransformerMixin): """For data grouped by feature, select subset of data at a provided key. The data is expected to be stored in a 2D data structure, where the first index is over features and the second is over samples. i.e. >> len(data[key]) == n_samples Please note that this is the opposite convention to scikit-learn feature matrixes (where the first index corresponds to sample). ItemSelector only requires that the collection implement getitem (data[key]). Examples include: a dict of lists, 2D numpy array, Pandas DataFrame, numpy record array, etc. >> data = {'a': [1, 5, 2, 5, 2, 8], 'b': [9, 4, 1, 4, 1, 3]} >> ds = ItemSelector(key='a') >> data['a'] == ds.transform(data) ItemSelector is not designed to handle data grouped by sample. (e.g. a list of dicts). If your data is structured this way, consider a transformer along the lines of `sklearn.feature_extraction.DictVectorizer`. Parameters ---------- key : hashable, required The key corresponding to the desired value in a mappable. """ def __init__(self, key): self.key = key def fit(self, x, y=None): return self def transform(self, data_dict): return data_dict[self.key] class TextStats(BaseEstimator, TransformerMixin): """Extract features from each document for DictVectorizer""" def fit(self, x, y=None): return self def transform(self, reviews): return [{'length': len(text), 'num_sentences': text.count('.')} for text in reviews] class FeatureExtractor(BaseEstimator, TransformerMixin): """Extract review text, emojis and emoji sentiment. Takes a sequence of strings and produces a dict of values. Keys are `review`, `emojis`, and `emoji-sentiment`. """ def __init__(self, lang = 'ta'): self.lang = lang self.normalizer = BaseNormalizer(lang) # This language map was created using Google's googletrans module. Create the file alltextlang.txt by calling # detect_lang_and_store in feature_utils.py self.lmap = self.load_language_maps( os.path.join(os.path.dirname(sys.path[0]),'../resources/data/alltextslang.txt')) self.soundexer = Soundex() self.ta_trans = Transliterator(source='eng', target='tam', build_lookup=True) self.ml_trans = Transliterator(source='eng', target='mal', build_lookup=True) self.sym_spell = SymSpell(max_dictionary_edit_distance=2, prefix_length=7) self.sym_spell.load_dictionary('../../src/extern/data/etymdict.csv.vocab.tsv.gz', term_index=0, count_index=1, separator="\t") super().__init__() def load_language_maps(self, mapfile): lmap = {} with open(mapfile, 'r') as mapf: for line in mapf: text, lang, conf = line.rstrip().split('\t') lmap[text] = (lang, float(conf)) return lmap def get_language_tag(self, text): return self.lmap.get(text, ('unknown', 0.0)) def fit(self, x, y=None): return self def transform(self, reviews): features = np.recarray(shape=(len(reviews),), dtype=[('review', object), ('emojis', object), ('emoji_sentiment', object), ('lang_tag', object), ('len_range', object), ('soundexes', object),],) for i, review in enumerate(reviews): features['review'][i] = self.normalizer.normalize(text = review) emojis, sentiment = get_emojis_from_text(review) features['emojis'][i] = ' '.join(emojis) features['emoji_sentiment'][i] = sentiment lang, conf = self.get_language_tag(review.strip()) if lang == self.lang or lang == (self.lang + 'en'): # google agrees with some confidence agreement = 1 elif conf < 0.5: # google says not-tamil, but weakly agreement = 0.5 else: # google clearly says not-tamil agreement = 0 features['lang_tag'][i] = {'lang': lang, 'agreement': agreement} features['len_range'][i] = get_doc_len_range(review) if self.lang == 'ta': review_trans = self.ta_trans.transform(review) for word in review_trans.split(): suggestions = self.sym_spell.lookup(word, Verbosity.CLOSEST, max_edit_distance=2, include_unknown=True) if len(suggestions) > 0 and suggestions[0].distance < 3: print(word, suggestions[0].term) # no match with dictionary, we need a more comprehensive dictionary plus phonetic similarity elif self.lang == 'ml': review_trans = self.ml_trans.transform(review) else: review_trans = review # TODO: introduce spell correct here for added normalisation # print(lang, review_trans) features['soundexes'][i] = ' '.join([self.soundexer.soundex(word) for word in review_trans.split()]) return features def fit_predict_measure(mode, train_file, test_file, inputfile, lang = 'ta'): print(train_file, test_file) data_train = load_docs(train_file, mode='train') data_test = load_docs(test_file, mode=mode) print('Data Loaded') target_names = data_train['target_names'] if mode == 'experiment': perform_hyper_param_tuning(data_train, data_test, inputfile, lang) if mode == 'test': pipeline = get_pipeline(lang, len(data_train['data'])) pipeline.fit(data_train['data'], data_train['target_names']) """ params = pipeline.get_params(deep=True) print(params['rsrch__estimator__alpha'], params['rsrch__estimator__penalty']) """ y = pipeline.predict(data_test['data']) print(len(y)) assert(len(data_test['data'])==len(y)) # TODO: TypeError: can't pickle module objects. # pickle.dump(pipeline, open(inputfile, 'wb')) idx = 0 for v in data_test['data']: if (y[idx] == data_test['target_names'][idx]): print("Right : {} -> Prediction : {} -> Original : {}".format(v, y[idx], data_test['target_names'][idx])) else: print("Wrong : {} -> Prediction : {} -> Original : {}".format(v, y[idx], data_test['target_names'][idx])) idx += 1 print(classification_report(y, data_test['target_names'])) if mode == 'predict': pipeline = pickle.load(open(inputfile, 'rb')) pipeline.fit(data_train['data'], data_train['target_names']) """ params = pipeline.get_params(deep=True) print(params['rsrch__estimator__alpha'], params['rsrch__estimator__penalty']) """ y = pipeline.predict(data_test['data']) print(len(y)) assert(len(data_test['data'])==len(y)) with open(f'theedhumnandrum_{lang}.tsv', 'w') as outf: outf.write('id\ttext\tlabel\n') for idx, review, label in zip(data_test['ids'], data_test['data'], y): print(idx) outf.write('\t'.join((idx, review, label)) + '\n') print(f'predict data written to theedhumnandrum_{lang}.tsv') # Perform tuning of hyper parameters by passing in the field you want to # tune as a json input file. You can find sample files in the config directory def perform_hyper_param_tuning(data_train, data_test, input_file, lang = 'ta'): pipeline = get_pipeline(lang, len(data_train['data'])) # parameters = { # 'sgd__loss' : ["hinge", "log", "squared_hinge", "modified_huber"], # 'sgd__alpha' : [0.0001, 0.001, 0.01, 0.1], # 'sgd__penalty' : ["l2", "l1", "none"], # } with open(input_file) as f: parameters = json.load(f) grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1, scoring='accuracy') print("Performing grid search...") print("pipeline:", [name for name, _ in pipeline.steps]) print("parameters:") pprint(parameters) t0 = time() grid_search.fit(data_train['data'], data_train['target_names']) print("done in %0.3fs" % (time() - t0)) print() print("Best score: %0.3f" % grid_search.best_score_) print("Best parameters set:") best_parameters = grid_search.best_estimator_.get_params() for param_name in sorted(parameters.keys()): print("\t%s: %r" % (param_name, best_parameters[param_name])) print("Grid scores on development set:") print() means = grid_search.cv_results_['mean_test_score'] stds = grid_search.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, grid_search.cv_results_['params']): print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params)) print() print("Detailed classification report:") print() print("The model is trained on the full development set.") print("The scores are computed on the full evaluation set.") print() y_true, y_pred = data_test["target_names"], grid_search.predict(data_test["data"]) print(classification_report(y_true, y_pred)) print() # Get the tranformer weights for a language. Use the experiment mode of the script # to find the right hypertuning parameters def get_transformer_weights(lang = 'ta'): lang_weights = { 'ta' : { 'emoji_sentiment': 0.6, 'emojis': 0.8, #higher value seems to improve negative ratings 'review_bow': 0.0, 'review_ngram': 1.0, 'lang_tag': 0.6, 'len_range': 0.0, 'soundexes_bow': 0.5, }, 'ml' : { 'emoji_sentiment': 0.6, 'emojis': 0.8, #higher value seems to improve negative ratings 'review_bow': 0.0, 'review_ngram': 1.0, 'lang_tag': 0.7, 'len_range': 0.5, 'soundexes_bow': 0.5, } } return lang_weights[lang] # The core function that returns the Pipeline. This is a FeatureUnion of # a SGD Classifier. Borrows from https://scikit-learn.org/0.18/auto_examples/hetero_feature_union.html def get_pipeline(lang = 'ta', datalen = 1000): chosen_weights = get_transformer_weights(lang) print(chosen_weights) """ distributions = dict( penalty=['l1', 'l2', 'elasticnet'], alpha=uniform(loc=1e-6, scale=1e-4) ) """ pipeline = Pipeline([ # Extract the review text & emojis ('reviewfeatures', FeatureExtractor(lang)), # Use FeatureUnion to combine the features from emojis and text ('union', FeatureUnion( transformer_list=[ # Pipeline for emojis handled like a bag of words ('emojis', Pipeline([ ('selector', ItemSelector(key='emojis')), ('tfidf', TfidfVectorizer(token_pattern=r'[^\s]+', stop_words=None, max_df=0.4, min_df=2, max_features=10)), ])), # Pipeline for pulling features from the post's emoji sentiment ('emoji_sentiment', Pipeline([ ('selector', ItemSelector(key='emoji_sentiment')), ('vect', HashingVectorizer()), ])), # Pipeline for length of doc feature ('len_range', Pipeline([ ('selector', ItemSelector(key='len_range')), ('vect', HashingVectorizer()), ])), # Pipeline for standard bag-of-words model for soundexes ('soundexes_bow', Pipeline([ ('selector', ItemSelector(key='soundexes')), # Best Tamil Configuration # ('tfidf', TfidfVectorizer( input='content', stop_words=None, sublinear_tf=True, max_df=0.4, min_df=1, max_features=200)) ('tfidf', TfidfVectorizer(token_pattern=r'[^\s]+', input='content', stop_words=None, sublinear_tf=True, max_df=0.4, min_df=1, max_features=200)), ('best', TruncatedSVD(n_components=50)), ])), # Pipeline for standard bag-of-words model for review ('review_bow', Pipeline([ ('selector', ItemSelector(key='review')), # Best Tamil Configuration # ('tfidf', TfidfVectorizer( input='content', stop_words=None, sublinear_tf=True, max_df=0.4, min_df=1, max_features=200)) ('tfidf', TfidfVectorizer( input='content', stop_words=None, sublinear_tf=True, max_df=0.4, min_df=1, max_features=200)), ('best', TruncatedSVD(n_components=50)), ])), # Pipeline for pulling ad hoc features from review text ('review_stats', Pipeline([ ('selector', ItemSelector(key='review')), ('stats', TextStats()), # returns a list of dicts ('vect', DictVectorizer()), # list of dicts -> feature matrix ])), # Pipeline for ngram model for review ('review_ngram', Pipeline([ ('selector', ItemSelector(key='review')), #tamil - best config # ('tfidf', CountVectorizer(ngram_range=(1, 4))), ('tfidf', CountVectorizer(ngram_range=(1, 4))), #('tfidf', TfidfVectorizer(ngram_range=(2, 4), max_df=0.4, min_df=2, norm='l2', sublinear_tf=True)), ])), # Pipeline for pulling langtag features ('lang_tag', Pipeline([ ('selector', ItemSelector(key='lang_tag')), ('vect', DictVectorizer()), # list of dicts -> feature matrix ])), ], # weight components in FeatureUnion transformer_weights=chosen_weights, )), # Use an SVC/SGD classifier on the combined features # the value for max_iter(np.ceil(10**6/datalen)) is based on suggestion here - https://scikit-learn.org/stable/modules/sgd.html#tips-on-practical-use # ('sgd', SGDClassifier(loss="modified_huber", penalty="elasticnet", max_iter=np.ceil(10**6/datalen), random_state=0, alpha = 0.0001)), ('sgd', SGDClassifier(loss="modified_huber", penalty="elasticnet", max_iter=np.ceil(10**6/datalen), random_state=0, alpha = 0.0001)), # ('rsrch', RandomizedSearchCV(estimator=clf, param_distributions=distributions, cv=5, n_iter=5)), ]) return pipeline if __name__ == "__main__": args = sys.argv if len(args) < 6: print('Your command should be:') print('python sentiment_classifier.py <mode> <language code> <training file path> <test file path> <inputfilepath>') print('mode:predict/test/experiment, language: ta/ml') print('Input file path is the pickle file path for train and predict, and json file path for experiment') sys.exit() mode, lang, train_file, test_file, inputfile = args[1:6] fit_predict_measure(mode, train_file, test_file, inputfile, lang = lang)
16,938
43.459318
165
py
theedhum-nandrum
theedhum-nandrum-master/src/tn/__init__.py
""" Package Initialization file. """
36
36
36
py
theedhum-nandrum
theedhum-nandrum-master/src/tn/multiclassrnnclassifier.py
""" @author sanjeethr, oligoglot Thanks to Susan Li for this step by step guide: https://towardsdatascience.com/multi-class-text-classification-with-lstm-1590bee1bd17 """ import pandas as pd import matplotlib.pyplot as plt import numpy as np import sys, os from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras import Sequential from keras.layers import Embedding, SpatialDropout1D, LSTM, Dense from keras.callbacks import EarlyStopping from keras.optimizers import Adam from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelBinarizer from sklearn.metrics import classification_report from libindic.soundex import Soundex from lib.feature_utils import load_docs, get_emojis_from_text, get_doc_len_range sys.path.append(os.path.join(os.path.dirname(sys.path[0]),'extern', 'indic_nlp_library')) from indicnlp.normalize.indic_normalize import BaseNormalizer try: from indictrans import Transliterator except ImportError: print('Please install indic-trans from git: https://github.com/libindic/indic-trans') ta_trans = Transliterator(source='eng', target='tam', build_lookup=True) ml_trans = Transliterator(source='eng', target='mal', build_lookup=True) # The maximum number of words to be used. (most frequent) MAX_NB_WORDS = 50000 # Max number of words in each review. MAX_SEQUENCE_LENGTH = 150 # This is fixed. EMBEDDING_DIM = 100 tokenizer = Tokenizer(num_words=MAX_NB_WORDS, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True) soundexer = Soundex() def load_language_maps(mapfile): lmap = {} with open(mapfile, 'r') as mapf: for line in mapf: text, lang, conf = line.rstrip().split('\t') lmap[text] = (lang, float(conf)) return lmap def get_language_tag(text): return lmap.get(text, ('unknown', 0.0)) def append_language_tag(text): p_lang, conf = get_language_tag(text) if p_lang == lang or p_lang == (lang + 'en'): # google agrees with some confidence agreement = 1 elif conf < 0.5: # google says not-tamil, but weakly agreement = 0.5 else: # google clearly says not-tamil agreement = 0 return ' '.join((' ', text, p_lang, lang, str(agreement), ' ')) def append_emoji_sentiment(text): emojis, sentiment = get_emojis_from_text(text) return ' '.join((' ', text, str(emojis), sentiment, ' ')) def append_soundex(text): if lang == 'ta': text = ta_trans.transform(text) if lang == 'ml': text = ml_trans.transform(text) soundexes = [soundexer.soundex(word) for word in text.split()] return ' ' + text + ' ' + ' '.join(soundexes) + ' ' def append_doc_len_range(text): return ' ' + get_doc_len_range(text) + ' ' def load_data(df, mode, lb = None): df.info() df = df.reset_index(drop=True) df['text'] = df['text'].apply(append_emoji_sentiment) df['text'] = df['text'].apply(append_language_tag) df['text'] = df['text'].apply(append_soundex) df['text'] = df['text'].apply(append_doc_len_range) tokenizer.fit_on_texts([normalizer.normalize (text) for text in df.text.values]) word_index = tokenizer.word_index print('Found %s unique tokens.' % len(word_index)) X = tokenizer.texts_to_sequences(df.text.values) X = pad_sequences(X, maxlen=MAX_SEQUENCE_LENGTH) print('Shape of data tensor:', X.shape) if mode == 'pred': Y = df.id.values else: print(df.category.value_counts()) if lb is None: lb = LabelBinarizer() Y = lb.fit_transform(df.category.values.reshape(-1, 1)) else: Y = lb.transform(df.category.values.reshape(-1, 1)) print('Shape of label tensor:', Y.shape) return (X, Y, lb) lang, train_file, test_file, predict_file, outfile = sys.argv[1:6] normalizer = BaseNormalizer(lang) lmap = load_language_maps('../../resources/data/alltextslang.txt') #train_file = '../../resources/data/tamil_train.tsv' train_df = pd.read_csv(train_file, sep='\t') X_train, Y_train, lb = load_data(train_df, 'train') #test_file = '../../resources/data/tamil_dev.tsv' test_df = pd.read_csv(test_file, sep='\t') X_test, Y_test, lb = load_data(test_df, 'test', lb) # X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.10, random_state = 42) print(X_train.shape,Y_train.shape) print(X_test.shape,Y_test.shape) if lang == 'ta': model = Sequential() model.add(Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=X_train.shape[1])) model.add(SpatialDropout1D(0.8)) model.add(LSTM(100, dropout=0.7, recurrent_dropout=0.5)) model.add(Dense(5, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.0001), metrics=['accuracy']) epochs = 15 batch_size = 64 if lang == 'ml': model = Sequential() model.add(Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=X_train.shape[1])) model.add(SpatialDropout1D(0.5)) #model.add(LSTM(100, dropout=0.3, recurrent_dropout=0.3, return_sequences=True)) model.add(LSTM(100, dropout=0.3, recurrent_dropout=0.3)) model.add(Dense(5, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.0001), metrics=['accuracy']) epochs = 10 batch_size = 64 history = model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size,validation_split=0.1,callbacks=[EarlyStopping(monitor='val_loss', patience=3, min_delta=0.0001)]) # accr = model.evaluate(X_test,Y_test) # print('Test set\n Loss: {:0.3f}\n Accuracy: {:0.3f}'.format(accr[0],accr[1])) Y_test_idx = np.argmax(Y_test, axis=1) # Convert one-hot to index Y_pred = model.predict_classes(X_test) print(classification_report(Y_test_idx, Y_pred)) new_review = ['Thalaiva superstar Rajinikanth number one mass Hero'] seq = tokenizer.texts_to_sequences(new_review) padded = pad_sequences(seq, maxlen=MAX_SEQUENCE_LENGTH) pred = model.predict(padded) print(pred, lb.inverse_transform(pred)) with open(outfile, 'w') as outf: test_df = pd.read_csv(predict_file, sep='\t') X_pred, ID_pred, lb = load_data(test_df, 'pred', lb) Y_pred = lb.inverse_transform(model.predict(X_pred)).flatten() outf.write('id\ttext\tlabel\n') for idx, text, pred_category in zip(ID_pred, test_df.text.values, Y_pred): #print(idx, text, pred_category) outf.write('\t'.join((idx, text, pred_category)) + '\n')
6,528
38.331325
172
py
theedhum-nandrum
theedhum-nandrum-master/src/tn/document/document.py
''' @author mojosaurus This OM represents that document that will be passed around in the docproc pipeline ''' import json # Inputs to this class class be various, but it always returns a JSON object. class Document: js = {} def __init__(self, text : str =""): self.js["original"] = text # Keep the orignial text for reference self.js["text"] = text # This is the filed that will be modified self.js["tagged"] = [text] # Sets the value to key. Simple shit. def set(self, key : str, value : str): self.js[key] = value if key == "text": self.js["tagged"][0] = value def get(self, key : str): return self.js[key] # Overload the __str__ function so that this can be used directly in print. def __str__(self): return json.dumps(self.js, indent=4, ensure_ascii=False) if __name__ == "__main__": doc = Document("Fellow world") print (doc)
947
30.6
83
py