repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
EDGY
EDGY-master/Training/VQ-VAE/Preprocessing/preprocess.py
import hydra from hydra import utils from pathlib import Path import librosa import scipy import json import numpy as np from multiprocessing import cpu_count from concurrent.futures import ProcessPoolExecutor from functools import partial from tqdm import tqdm def preemphasis(x, preemph): return scipy.signal.lfilter([1, -preemph], [1], x) def mulaw_encode(x, mu): mu = mu - 1 fx = np.sign(x) * np.log1p(mu * np.abs(x)) / np.log1p(mu) return np.floor((fx + 1) / 2 * mu + 0.5) def mulaw_decode(y, mu): mu = mu - 1 x = np.sign(y) / mu * ((1 + mu) ** np.abs(y) - 1) return x def process_wav(wav_path, out_path, sr=160000, preemph=0.97, n_fft=2048, n_mels=80, hop_length=160, win_length=400, fmin=50, top_db=80, bits=8, offset=0.0, duration=None): wav, _ = librosa.load(wav_path.with_suffix(".wav"), sr=sr, offset=offset, duration=duration) wav = wav / np.abs(wav).max() * 0.999 mel = librosa.feature.melspectrogram(preemphasis(wav, preemph), sr=sr, n_fft=n_fft, n_mels=n_mels, hop_length=hop_length, win_length=win_length, fmin=fmin, power=1) logmel = librosa.amplitude_to_db(mel, top_db=top_db) logmel = logmel / top_db + 1 wav = mulaw_encode(wav, mu=2**bits) np.save(out_path.with_suffix(".wav.npy"), wav) np.save(out_path.with_suffix(".mel.npy"), logmel) return out_path, logmel.shape[-1] @hydra.main(config_path="Training/VQ-VAE/Configuration_files/Preprocessing/preprocessing.yaml") def preprocess_dataset(cfg): in_dir = Path(utils.to_absolute_path(cfg.in_dir)) out_dir = Path(utils.to_absolute_path("datasets")) / str(cfg.dataset.dataset) out_dir.mkdir(parents=True, exist_ok=True) executor = ProcessPoolExecutor(max_workers=cpu_count()) for split in ["train", "test"]: print("Extracting features for {} set".format(split)) futures = [] split_path = out_dir / cfg.dataset.language / split with open(split_path.with_suffix(".json")) as file: metadata = json.load(file) for in_path, start, duration, out_path in metadata: wav_path = in_dir / in_path out_path = out_dir / out_path out_path.parent.mkdir(parents=True, exist_ok=True) futures.append(executor.submit( partial(process_wav, wav_path, out_path, **cfg.preprocessing, offset=start, duration=duration))) results = [future.result() for future in tqdm(futures)] lengths = [x[-1] for x in results] frames = sum(lengths) frame_shift_ms = cfg.preprocessing.hop_length / cfg.preprocessing.sr hours = frames * frame_shift_ms / 3600 print("Wrote {} utterances, {} frames ({:.2f} hours)".format(len(lengths), frames, hours)) if __name__ == "__main__": preprocess_dataset()
3,174
35.918605
99
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/utils_incremental/incremental_train_and_eval_AMR_LF.py
#!/usr/bin/env python # coding=utf-8 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import datasets, models, transforms from torch.autograd import Variable import numpy as np import time import os import copy import argparse from PIL import Image from scipy.spatial.distance import cdist from sklearn.metrics import confusion_matrix from utils_pytorch import * cur_features = [] ref_features = [] old_scores = [] new_scores = [] def get_ref_features(self, inputs, outputs): global ref_features ref_features = inputs[0] def get_cur_features(self, inputs, outputs): global cur_features cur_features = inputs[0] def get_old_scores_before_scale(self, inputs, outputs): global old_scores old_scores = outputs def get_new_scores_before_scale(self, inputs, outputs): global new_scores new_scores = outputs def incremental_train_and_eval_AMR_LF(epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iteration, \ lamda, \ dist, K, lw_mr, \ weight_per_class=None, device=None): if device is None: device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") #trainset.train_data = X_train.astype('uint8') #trainset.train_labels = Y_train #trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, # shuffle=True, num_workers=2) #testset.test_data = X_valid.astype('uint8') #testset.test_labels = Y_valid #testloader = torch.utils.data.DataLoader(testset, batch_size=100, # shuffle=False, num_workers=2) #print('Max and Min of train labels: {}, {}'.format(min(Y_train), max(Y_train))) #print('Max and Min of valid labels: {}, {}'.format(min(Y_valid), max(Y_valid))) if iteration > start_iteration: ref_model.eval() num_old_classes = ref_model.fc.out_features handle_ref_features = ref_model.fc.register_forward_hook(get_ref_features) handle_cur_features = tg_model.fc.register_forward_hook(get_cur_features) handle_old_scores_bs = tg_model.fc.fc1.register_forward_hook(get_old_scores_before_scale) handle_new_scores_bs = tg_model.fc.fc2.register_forward_hook(get_new_scores_before_scale) for epoch in range(epochs): #train tg_model.train() train_loss = 0 train_loss1 = 0 train_loss2 = 0 train_loss3 = 0 correct = 0 total = 0 tg_lr_scheduler.step() print('\nEpoch: %d, LR: ' % epoch, end='') print(tg_lr_scheduler.get_lr()) for batch_idx, (inputs, targets) in enumerate(trainloader): inputs, targets = inputs.to(device), targets.to(device) tg_optimizer.zero_grad() outputs = tg_model(inputs) if iteration == start_iteration: loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets) else: ref_outputs = ref_model(inputs) loss1 = nn.CosineEmbeddingLoss()(cur_features, ref_features.detach(), \ torch.ones(inputs.shape[0]).to(device)) * lamda loss2 = nn.CrossEntropyLoss(weight_per_class)(outputs, targets) ################################################# #scores before scale, [-1, 1] outputs_bs = torch.cat((old_scores, new_scores), dim=1) assert(outputs_bs.size()==outputs.size()) #print("outputs_bs:", outputs_bs.size(), outputs_bs) #print("targets:", targets.size(), targets) #get groud truth scores gt_index = torch.zeros(outputs_bs.size()).to(device) gt_index = gt_index.scatter(1, targets.view(-1,1), 1).ge(0.5) gt_scores = outputs_bs.masked_select(gt_index) #print("gt_index:", gt_index.size(), gt_index) #print("gt_scores:", gt_scores.size(), gt_scores) #get top-K scores on none gt classes none_gt_index = torch.zeros(outputs_bs.size()).to(device) none_gt_index = none_gt_index.scatter(1, targets.view(-1,1), 1).le(0.5) none_gt_scores = outputs_bs.masked_select(none_gt_index).reshape((outputs_bs.size(0), outputs.size(1)-1)) #print("none_gt_index:", none_gt_index.size(), none_gt_index) #print("none_gt_scores:", none_gt_scores.size(), none_gt_scores) hard_scores = none_gt_scores.topk(K, dim=1)[0] #print("hard_scores:", hard_scores.size(), hard_scores) #the index of hard samples, i.e., samples of old classes hard_index = targets.lt(num_old_classes) hard_num = torch.nonzero(hard_index).size(0) #print("hard examples size: ", hard_num) if hard_num > 0: gt_scores = gt_scores[hard_index].view(-1, 1).repeat(1, K) hard_scores = hard_scores[hard_index] assert(gt_scores.size() == hard_scores.size()) assert(gt_scores.size(0) == hard_num) #print("hard example gt scores: ", gt_scores.size(), gt_scores) #print("hard example max novel scores: ", hard_scores.size(), hard_scores) loss3 = nn.MarginRankingLoss(margin=dist)(gt_scores.view(-1, 1), \ hard_scores.view(-1, 1), torch.ones(hard_num*K).to(device)) * lw_mr else: loss3 = torch.zeros(1).to(device) ################################################# loss = loss1 + loss2 + loss3 loss.backward() tg_optimizer.step() train_loss += loss.item() if iteration > start_iteration: train_loss1 += loss1.item() train_loss2 += loss2.item() train_loss3 += loss3.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() #if iteration == 0: # msg = 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % \ # (train_loss/(batch_idx+1), 100.*correct/total, correct, total) #else: # msg = 'Loss1: %.3f Loss2: %.3f Loss: %.3f | Acc: %.3f%% (%d/%d)' % \ # (loss1.item(), loss2.item(), train_loss/(batch_idx+1), 100.*correct/total, correct, total) #progress_bar(batch_idx, len(trainloader), msg) if iteration == start_iteration: print('Train set: {}, Train Loss: {:.4f} Acc: {:.4f}'.format(\ len(trainloader), train_loss/(batch_idx+1), 100.*correct/total)) else: print('Train set: {}, Train Loss1: {:.4f}, Train Loss2: {:.4f}, Train Loss3: {:.4f},\ Train Loss: {:.4f} Acc: {:.4f}'.format(len(trainloader), \ train_loss1/(batch_idx+1), train_loss2/(batch_idx+1), train_loss3/(batch_idx+1), train_loss/(batch_idx+1), 100.*correct/total)) #eval tg_model.eval() test_loss = 0 correct = 0 total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs, targets = inputs.to(device), targets.to(device) outputs = tg_model(inputs) loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() #progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' # % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) print('Test set: {} Test Loss: {:.4f} Acc: {:.4f}'.format(\ len(testloader), test_loss/(batch_idx+1), 100.*correct/total)) if iteration > start_iteration: print("Removing register_forward_hook") handle_ref_features.remove() handle_cur_features.remove() handle_old_scores_bs.remove() handle_new_scores_bs.remove() return tg_model
8,384
45.071429
121
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/utils_incremental/compute_confusion_matrix.py
#!/usr/bin/env python # coding=utf-8 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import datasets, models, transforms from torch.autograd import Variable import numpy as np import time import os import copy import argparse from PIL import Image from scipy.spatial.distance import cdist from sklearn.metrics import confusion_matrix from utils_pytorch import * def compute_confusion_matrix(tg_model, tg_feature_model, class_means, evalloader, print_info=False, device=None): if device is None: device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") tg_model.eval() tg_feature_model.eval() #evalset = torchvision.datasets.CIFAR100(root='./data', train=False, # download=False, transform=transform_test) #evalset.test_data = input_data.astype('uint8') #evalset.test_labels = input_labels #evalloader = torch.utils.data.DataLoader(evalset, batch_size=128, # shuffle=False, num_workers=2) correct = 0 correct_icarl = 0 correct_ncm = 0 total = 0 num_classes = tg_model.fc.out_features cm = np.zeros((3, num_classes, num_classes)) all_targets = [] all_predicted = [] all_predicted_icarl = [] all_predicted_ncm = [] with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(evalloader): inputs, targets = inputs.to(device), targets.to(device) total += targets.size(0) all_targets.append(targets) outputs = tg_model(inputs) _, predicted = outputs.max(1) correct += predicted.eq(targets).sum().item() all_predicted.append(predicted) outputs_feature = np.squeeze(tg_feature_model(inputs)) # Compute score for iCaRL sqd_icarl = cdist(class_means[:,:,0].T, outputs_feature, 'sqeuclidean') score_icarl = torch.from_numpy((-sqd_icarl).T).to(device) _, predicted_icarl = score_icarl.max(1) correct_icarl += predicted_icarl.eq(targets).sum().item() all_predicted_icarl.append(predicted_icarl) # Compute score for NCM sqd_ncm = cdist(class_means[:,:,1].T, outputs_feature, 'sqeuclidean') score_ncm = torch.from_numpy((-sqd_ncm).T).to(device) _, predicted_ncm = score_ncm.max(1) correct_ncm += predicted_ncm.eq(targets).sum().item() all_predicted_ncm.append(predicted_ncm) # print(sqd_icarl.shape, score_icarl.shape, predicted_icarl.shape, \ # sqd_ncm.shape, score_ncm.shape, predicted_ncm.shape) cm[0, :, :] = confusion_matrix(np.concatenate(all_targets), np.concatenate(all_predicted)) cm[1, :, :] = confusion_matrix(np.concatenate(all_targets), np.concatenate(all_predicted_icarl)) cm[2, :, :] = confusion_matrix(np.concatenate(all_targets), np.concatenate(all_predicted_ncm)) if print_info: print(" top 1 accuracy CNN :\t\t{:.2f} %".format( 100.*correct/total )) print(" top 1 accuracy iCaRL :\t\t{:.2f} %".format( 100.*correct_icarl/total )) print(" top 1 accuracy NCM :\t\t{:.2f} %".format( 100.*correct_ncm/total )) print(" top 1 accuracy CNN :\t\t{:.2f} %".format( 100.*np.mean(np.diag(cm[0])/np.sum(cm[0],axis=1)) )) print(" top 1 accuracy iCaRL :\t\t{:.2f} %".format( 100.*np.mean(np.diag(cm[1])/np.sum(cm[1],axis=1)) )) print(" top 1 accuracy NCM :\t\t{:.2f} %".format( 100.*np.mean(np.diag(cm[2])/np.sum(cm[2],axis=1)) )) return cm
3,725
43.357143
122
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/utils_incremental/incremental_train_and_eval_MS.py
#!/usr/bin/env python # coding=utf-8 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import datasets, models, transforms from torch.autograd import Variable import numpy as np import time import os import copy import argparse from PIL import Image from scipy.spatial.distance import cdist from sklearn.metrics import confusion_matrix from utils_pytorch import * def get_old_scores_before_scale(self, inputs, outputs): global old_scores old_scores = outputs def get_new_scores_before_scale(self, inputs, outputs): global new_scores new_scores = outputs def incremental_train_and_eval_MS(epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iteration, \ lw_ms, \ fix_bn=False, weight_per_class=None, device=None): if device is None: device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") #trainset.train_data = X_train.astype('uint8') #trainset.train_labels = Y_train #trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, # shuffle=True, num_workers=2) #testset.test_data = X_valid.astype('uint8') #testset.test_labels = Y_valid #testloader = torch.utils.data.DataLoader(testset, batch_size=100, # shuffle=False, num_workers=2) #print('Max and Min of train labels: {}, {}'.format(min(Y_train), max(Y_train))) #print('Max and Min of valid labels: {}, {}'.format(min(Y_valid), max(Y_valid))) if iteration > start_iteration: ref_model.eval() num_old_classes = ref_model.fc.out_features handle_old_scores_bs = tg_model.fc.fc1.register_forward_hook(get_old_scores_before_scale) handle_new_scores_bs = tg_model.fc.fc2.register_forward_hook(get_new_scores_before_scale) for epoch in range(epochs): #train tg_model.train() if fix_bn: for m in tg_model.modules(): if isinstance(m, nn.BatchNorm2d): m.eval() #m.weight.requires_grad = False #m.bias.requires_grad = False train_loss = 0 train_loss1 = 0 train_loss2 = 0 correct = 0 total = 0 tg_lr_scheduler.step() print('\nEpoch: %d, LR: ' % epoch, end='') print(tg_lr_scheduler.get_lr()) for batch_idx, (inputs, targets) in enumerate(trainloader): inputs, targets = inputs.to(device), targets.to(device) tg_optimizer.zero_grad() outputs = tg_model(inputs) if iteration == start_iteration: loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets) else: ref_outputs = ref_model(inputs) #loss1 = nn.KLDivLoss()(F.log_softmax(outputs[:,:num_old_classes]/T, dim=1), \ # F.softmax(ref_outputs.detach()/T, dim=1)) * T * T * beta * num_old_classes ref_scores = ref_outputs.detach() / ref_model.fc.sigma.detach() loss1 = nn.MSELoss()(old_scores, ref_scores.detach()) * lw_ms * num_old_classes loss2 = nn.CrossEntropyLoss(weight_per_class)(outputs, targets) loss = loss1 + loss2 loss.backward() tg_optimizer.step() train_loss += loss.item() if iteration > start_iteration: train_loss1 += loss1.item() train_loss2 += loss2.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() #if iteration == 0: # msg = 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % \ # (train_loss/(batch_idx+1), 100.*correct/total, correct, total) #else: # msg = 'Loss1: %.3f Loss2: %.3f Loss: %.3f | Acc: %.3f%% (%d/%d)' % \ # (loss1.item(), loss2.item(), train_loss/(batch_idx+1), 100.*correct/total, correct, total) #progress_bar(batch_idx, len(trainloader), msg) if iteration == start_iteration: print('Train set: {}, Train Loss: {:.4f} Acc: {:.4f}'.format(\ len(trainloader), train_loss/(batch_idx+1), 100.*correct/total)) else: print('Train set: {}, Train Loss1: {:.4f}, Train Loss2: {:.4f},\ Train Loss: {:.4f} Acc: {:.4f}'.format(len(trainloader), \ train_loss1/(batch_idx+1), train_loss2/(batch_idx+1), train_loss/(batch_idx+1), 100.*correct/total)) #eval tg_model.eval() test_loss = 0 correct = 0 total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs, targets = inputs.to(device), targets.to(device) outputs = tg_model(inputs) loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() #progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' # % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) print('Test set: {} Test Loss: {:.4f} Acc: {:.4f}'.format(\ len(testloader), test_loss/(batch_idx+1), 100.*correct/total)) if iteration > start_iteration: print("Removing register_forward_hook") handle_old_scores_bs.remove() handle_new_scores_bs.remove() return tg_model
5,759
41.666667
107
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/utils_incremental/incremental_train_and_eval.py
#!/usr/bin/env python # coding=utf-8 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import datasets, models, transforms from torch.autograd import Variable import numpy as np import time import os import copy import argparse from PIL import Image from scipy.spatial.distance import cdist from sklearn.metrics import confusion_matrix from utils_pytorch import * def incremental_train_and_eval(epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iteration, \ T, beta, \ fix_bn=False, weight_per_class=None, device=None): if device is None: device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") #trainset.train_data = X_train.astype('uint8') #trainset.train_labels = Y_train #trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, # shuffle=True, num_workers=2) #testset.test_data = X_valid.astype('uint8') #testset.test_labels = Y_valid #testloader = torch.utils.data.DataLoader(testset, batch_size=100, # shuffle=False, num_workers=2) #print('Max and Min of train labels: {}, {}'.format(min(Y_train), max(Y_train))) #print('Max and Min of valid labels: {}, {}'.format(min(Y_valid), max(Y_valid))) if iteration > start_iteration: ref_model.eval() num_old_classes = ref_model.fc.out_features for epoch in range(epochs): #train tg_model.train() if fix_bn: for m in tg_model.modules(): if isinstance(m, nn.BatchNorm2d): m.eval() #m.weight.requires_grad = False #m.bias.requires_grad = False train_loss = 0 train_loss1 = 0 train_loss2 = 0 correct = 0 total = 0 tg_lr_scheduler.step() print('\nEpoch: %d, LR: ' % epoch, end='') print(tg_lr_scheduler.get_lr()) for batch_idx, (inputs, targets) in enumerate(trainloader): inputs, targets = inputs.to(device), targets.to(device) tg_optimizer.zero_grad() outputs = tg_model(inputs) if iteration == start_iteration: loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets) else: ref_outputs = ref_model(inputs) loss1 = nn.KLDivLoss()(F.log_softmax(outputs[:,:num_old_classes]/T, dim=1), \ F.softmax(ref_outputs.detach()/T, dim=1)) * T * T * beta * num_old_classes loss2 = nn.CrossEntropyLoss(weight_per_class)(outputs, targets) loss = loss1 + loss2 loss.backward() tg_optimizer.step() train_loss += loss.item() if iteration > start_iteration: train_loss1 += loss1.item() train_loss2 += loss2.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() #if iteration == 0: # msg = 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % \ # (train_loss/(batch_idx+1), 100.*correct/total, correct, total) #else: # msg = 'Loss1: %.3f Loss2: %.3f Loss: %.3f | Acc: %.3f%% (%d/%d)' % \ # (loss1.item(), loss2.item(), train_loss/(batch_idx+1), 100.*correct/total, correct, total) #progress_bar(batch_idx, len(trainloader), msg) if iteration == start_iteration: print('Train set: {}, Train Loss: {:.4f} Acc: {:.4f}'.format(\ len(trainloader), train_loss/(batch_idx+1), 100.*correct/total)) else: print('Train set: {}, Train Loss1: {:.4f}, Train Loss2: {:.4f},\ Train Loss: {:.4f} Acc: {:.4f}'.format(len(trainloader), \ train_loss1/(batch_idx+1), train_loss2/(batch_idx+1), train_loss/(batch_idx+1), 100.*correct/total)) #eval tg_model.eval() test_loss = 0 correct = 0 total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs, targets = inputs.to(device), targets.to(device) outputs = tg_model(inputs) loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() #progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' # % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) print('Test set: {} Test Loss: {:.4f} Acc: {:.4f}'.format(\ len(testloader), test_loss/(batch_idx+1), 100.*correct/total)) return tg_model
5,014
41.5
107
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/utils_incremental/compute_accuracy.py
#!/usr/bin/env python # coding=utf-8 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import datasets, models, transforms from torch.autograd import Variable import numpy as np import time import os import copy import argparse from PIL import Image from scipy.spatial.distance import cdist from sklearn.metrics import confusion_matrix from utils_pytorch import * def compute_accuracy(tg_model, tg_feature_model, class_means, evalloader, scale=None, print_info=True, device=None): if device is None: device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") tg_model.eval() tg_feature_model.eval() #evalset = torchvision.datasets.CIFAR100(root='./data', train=False, # download=False, transform=transform_test) #evalset.test_data = input_data.astype('uint8') #evalset.test_labels = input_labels #evalloader = torch.utils.data.DataLoader(evalset, batch_size=128, # shuffle=False, num_workers=2) correct = 0 correct_icarl = 0 correct_ncm = 0 total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(evalloader): inputs, targets = inputs.to(device), targets.to(device) total += targets.size(0) outputs = tg_model(inputs) outputs = F.softmax(outputs, dim=1) if scale is not None: assert(scale.shape[0] == 1) assert(outputs.shape[1] == scale.shape[1]) outputs = outputs / scale.repeat(outputs.shape[0], 1).type(torch.FloatTensor).to(device) _, predicted = outputs.max(1) correct += predicted.eq(targets).sum().item() outputs_feature = np.squeeze(tg_feature_model(inputs)) # Compute score for iCaRL sqd_icarl = cdist(class_means[:,:,0].T, outputs_feature, 'sqeuclidean') score_icarl = torch.from_numpy((-sqd_icarl).T).to(device) _, predicted_icarl = score_icarl.max(1) correct_icarl += predicted_icarl.eq(targets).sum().item() # Compute score for NCM sqd_ncm = cdist(class_means[:,:,1].T, outputs_feature, 'sqeuclidean') score_ncm = torch.from_numpy((-sqd_ncm).T).to(device) _, predicted_ncm = score_ncm.max(1) correct_ncm += predicted_ncm.eq(targets).sum().item() # print(sqd_icarl.shape, score_icarl.shape, predicted_icarl.shape, \ # sqd_ncm.shape, score_ncm.shape, predicted_ncm.shape) if print_info: print(" top 1 accuracy CNN :\t\t{:.2f} %".format(100.*correct/total)) print(" top 1 accuracy iCaRL :\t\t{:.2f} %".format(100.*correct_icarl/total)) print(" top 1 accuracy NCM :\t\t{:.2f} %".format(100.*correct_ncm/total)) cnn_acc = 100.*correct/total icarl_acc = 100.*correct_icarl/total ncm_acc = 100.*correct_ncm/total return [cnn_acc, icarl_acc, ncm_acc]
3,097
40.306667
116
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/utils_incremental/__init__.py
#!/usr/bin/env python # coding=utf-8 # for incremental train and eval
70
16.75
32
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/utils_incremental/compute_features.py
#!/usr/bin/env python # coding=utf-8 #!/usr/bin/env python # coding=utf-8 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import datasets, models, transforms from torch.autograd import Variable import numpy as np import time import os import copy import argparse from PIL import Image from scipy.spatial.distance import cdist from sklearn.metrics import confusion_matrix from utils_pytorch import * def compute_features(tg_feature_model, evalloader, num_samples, num_features, device=None): if device is None: device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") tg_feature_model.eval() #evalset = torchvision.datasets.CIFAR100(root='./data', train=False, # download=False, transform=transform_test) #evalset.test_data = input_data.astype('uint8') #evalset.test_labels = np.zeros(input_data.shape[0]) #evalloader = torch.utils.data.DataLoader(evalset, batch_size=128, # shuffle=False, num_workers=2) features = np.zeros([num_samples, num_features]) start_idx = 0 with torch.no_grad(): for inputs, targets in evalloader: inputs = inputs.to(device) features[start_idx:start_idx+inputs.shape[0], :] = np.squeeze(tg_feature_model(inputs)) start_idx = start_idx+inputs.shape[0] assert(start_idx==num_samples) return features
1,503
33.181818
99
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/utils_incremental/incremental_train_and_eval_LF.py
#!/usr/bin/env python # coding=utf-8 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import datasets, models, transforms from torch.autograd import Variable import numpy as np import time import os import copy import argparse from PIL import Image from scipy.spatial.distance import cdist from sklearn.metrics import confusion_matrix from utils_pytorch import * cur_features = [] ref_features = [] def get_ref_features(self, inputs, outputs): global ref_features ref_features = inputs[0] def get_cur_features(self, inputs, outputs): global cur_features cur_features = inputs[0] def incremental_train_and_eval_LF(epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iteration, \ lamda, \ fix_bn=False, weight_per_class=None, device=None): if device is None: device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") #trainset.train_data = X_train.astype('uint8') #trainset.train_labels = Y_train #trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, # shuffle=True, num_workers=2) #testset.test_data = X_valid.astype('uint8') #testset.test_labels = Y_valid #testloader = torch.utils.data.DataLoader(testset, batch_size=100, # shuffle=False, num_workers=2) #print('Max and Min of train labels: {}, {}'.format(min(Y_train), max(Y_train))) #print('Max and Min of valid labels: {}, {}'.format(min(Y_valid), max(Y_valid))) if iteration > start_iteration: ref_model.eval() handle_ref_features = ref_model.fc.register_forward_hook(get_ref_features) handle_cur_features = tg_model.fc.register_forward_hook(get_cur_features) for epoch in range(epochs): #train tg_model.train() if fix_bn: for m in tg_model.modules(): if isinstance(m, nn.BatchNorm2d): m.eval() #m.weight.requires_grad = False #m.bias.requires_grad = False train_loss = 0 train_loss1 = 0 train_loss2 = 0 correct = 0 total = 0 tg_lr_scheduler.step() print('\nEpoch: %d, LR: ' % epoch, end='') print(tg_lr_scheduler.get_lr()) for batch_idx, (inputs, targets) in enumerate(trainloader): inputs, targets = inputs.to(device), targets.to(device) tg_optimizer.zero_grad() outputs = tg_model(inputs) if iteration == start_iteration: loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets) else: ref_outputs = ref_model(inputs) loss1 = nn.CosineEmbeddingLoss()(cur_features, ref_features.detach(), \ torch.ones(inputs.shape[0]).to(device)) * lamda loss2 = nn.CrossEntropyLoss(weight_per_class)(outputs, targets) loss = loss1 + loss2 loss.backward() tg_optimizer.step() train_loss += loss.item() if iteration > start_iteration: train_loss1 += loss1.item() train_loss2 += loss2.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() #if iteration == 0: # msg = 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % \ # (train_loss/(batch_idx+1), 100.*correct/total, correct, total) #else: # msg = 'Loss1: %.3f Loss2: %.3f Loss: %.3f | Acc: %.3f%% (%d/%d)' % \ # (loss1.item(), loss2.item(), train_loss/(batch_idx+1), 100.*correct/total, correct, total) #progress_bar(batch_idx, len(trainloader), msg) if iteration == start_iteration: print('Train set: {}, Train Loss: {:.4f} Acc: {:.4f}'.format(\ len(trainloader), train_loss/(batch_idx+1), 100.*correct/total)) else: print('Train set: {}, Train Loss1: {:.4f}, Train Loss2: {:.4f},\ Train Loss: {:.4f} Acc: {:.4f}'.format(len(trainloader), \ train_loss1/(batch_idx+1), train_loss2/(batch_idx+1), train_loss/(batch_idx+1), 100.*correct/total)) #eval tg_model.eval() test_loss = 0 correct = 0 total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs, targets = inputs.to(device), targets.to(device) outputs = tg_model(inputs) loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() #progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' # % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) print('Test set: {} Test Loss: {:.4f} Acc: {:.4f}'.format(\ len(testloader), test_loss/(batch_idx+1), 100.*correct/total)) if iteration > start_iteration: print("Removing register_forward_hook") handle_ref_features.remove() handle_cur_features.remove() return tg_model
5,489
39.970149
107
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/utils_incremental/incremental_train_and_eval_MR_LF.py
#!/usr/bin/env python # coding=utf-8 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import datasets, models, transforms from torch.autograd import Variable import numpy as np import time import os import copy import argparse from PIL import Image from scipy.spatial.distance import cdist from sklearn.metrics import confusion_matrix from utils_pytorch import * cur_features = [] ref_features = [] old_scores = [] new_scores = [] def get_ref_features(self, inputs, outputs): global ref_features ref_features = inputs[0] def get_cur_features(self, inputs, outputs): global cur_features cur_features = inputs[0] def get_old_scores_before_scale(self, inputs, outputs): global old_scores old_scores = outputs def get_new_scores_before_scale(self, inputs, outputs): global new_scores new_scores = outputs def incremental_train_and_eval_MR_LF(epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iteration, \ lamda, \ dist, K, lw_mr, \ fix_bn=False, weight_per_class=None, device=None): if device is None: device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") #trainset.train_data = X_train.astype('uint8') #trainset.train_labels = Y_train #trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, # shuffle=True, num_workers=2) #testset.test_data = X_valid.astype('uint8') #testset.test_labels = Y_valid #testloader = torch.utils.data.DataLoader(testset, batch_size=100, # shuffle=False, num_workers=2) #print('Max and Min of train labels: {}, {}'.format(min(Y_train), max(Y_train))) #print('Max and Min of valid labels: {}, {}'.format(min(Y_valid), max(Y_valid))) if iteration > start_iteration: ref_model.eval() num_old_classes = ref_model.fc.out_features handle_ref_features = ref_model.fc.register_forward_hook(get_ref_features) handle_cur_features = tg_model.fc.register_forward_hook(get_cur_features) handle_old_scores_bs = tg_model.fc.fc1.register_forward_hook(get_old_scores_before_scale) handle_new_scores_bs = tg_model.fc.fc2.register_forward_hook(get_new_scores_before_scale) for epoch in range(epochs): #train tg_model.train() if fix_bn: for m in tg_model.modules(): if isinstance(m, nn.BatchNorm2d): m.eval() #m.weight.requires_grad = False #m.bias.requires_grad = False train_loss = 0 train_loss1 = 0 train_loss2 = 0 train_loss3 = 0 correct = 0 total = 0 tg_lr_scheduler.step() print('\nEpoch: %d, LR: ' % epoch, end='') print(tg_lr_scheduler.get_lr()) for batch_idx, (inputs, targets) in enumerate(trainloader): inputs, targets = inputs.to(device), targets.to(device) tg_optimizer.zero_grad() outputs = tg_model(inputs) if iteration == start_iteration: loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets) else: ref_outputs = ref_model(inputs) loss1 = nn.CosineEmbeddingLoss()(cur_features, ref_features.detach(), \ torch.ones(inputs.shape[0]).to(device)) * lamda loss2 = nn.CrossEntropyLoss(weight_per_class)(outputs, targets) ################################################# #scores before scale, [-1, 1] outputs_bs = torch.cat((old_scores, new_scores), dim=1) #print(tg_model.fc.fc1.in_features, tg_model.fc.fc1.out_features) #print(tg_model.fc.fc2.in_features, tg_model.fc.fc2.out_features) #print(old_scores.size(), new_scores.size(), outputs_bs.size(), outputs.size()) assert(outputs_bs.size()==outputs.size()) #get groud truth scores gt_index = torch.zeros(outputs_bs.size()).to(device) gt_index = gt_index.scatter(1, targets.view(-1,1), 1).ge(0.5) gt_scores = outputs_bs.masked_select(gt_index) #get top-K scores on novel classes max_novel_scores = outputs_bs[:, num_old_classes:].topk(K, dim=1)[0] #the index of hard samples, i.e., samples of old classes hard_index = targets.lt(num_old_classes) hard_num = torch.nonzero(hard_index).size(0) #print("hard examples size: ", hard_num) if hard_num > 0: gt_scores = gt_scores[hard_index].view(-1, 1).repeat(1, K) max_novel_scores = max_novel_scores[hard_index] assert(gt_scores.size() == max_novel_scores.size()) assert(gt_scores.size(0) == hard_num) #print("hard example gt scores: ", gt_scores.size(), gt_scores) #print("hard example max novel scores: ", max_novel_scores.size(), max_novel_scores) loss3 = nn.MarginRankingLoss(margin=dist)(gt_scores.view(-1, 1), \ max_novel_scores.view(-1, 1), torch.ones(hard_num*K).to(device)) * lw_mr else: loss3 = torch.zeros(1).to(device) ################################################# loss = loss1 + loss2 + loss3 loss.backward() tg_optimizer.step() train_loss += loss.item() if iteration > start_iteration: train_loss1 += loss1.item() train_loss2 += loss2.item() train_loss3 += loss3.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() #if iteration == 0: # msg = 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % \ # (train_loss/(batch_idx+1), 100.*correct/total, correct, total) #else: # msg = 'Loss1: %.3f Loss2: %.3f Loss: %.3f | Acc: %.3f%% (%d/%d)' % \ # (loss1.item(), loss2.item(), train_loss/(batch_idx+1), 100.*correct/total, correct, total) #progress_bar(batch_idx, len(trainloader), msg) if iteration == start_iteration: print('Train set: {}, Train Loss: {:.4f} Acc: {:.4f}'.format(\ len(trainloader), train_loss/(batch_idx+1), 100.*correct/total)) else: print('Train set: {}, Train Loss1: {:.4f}, Train Loss2: {:.4f}, Train Loss3: {:.4f},\ Train Loss: {:.4f} Acc: {:.4f}'.format(len(trainloader), \ train_loss1/(batch_idx+1), train_loss2/(batch_idx+1), train_loss3/(batch_idx+1), train_loss/(batch_idx+1), 100.*correct/total)) #eval tg_model.eval() test_loss = 0 correct = 0 total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs, targets = inputs.to(device), targets.to(device) outputs = tg_model(inputs) loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() #progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' # % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) print('Test set: {} Test Loss: {:.4f} Acc: {:.4f}'.format(\ len(testloader), test_loss/(batch_idx+1), 100.*correct/total)) if iteration > start_iteration: print("Removing register_forward_hook") handle_ref_features.remove() handle_cur_features.remove() handle_old_scores_bs.remove() handle_new_scores_bs.remove() return tg_model
8,171
44.149171
107
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/imagenet-class-incremental/gen_imagenet_subset.py
#!/usr/bin/env python # coding=utf-8 import argparse import os import random import shutil import time import warnings import numpy as np import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim import torch.utils.data import torch.utils.data.distributed import torchvision.transforms as transforms import torchvision.datasets as datasets import torchvision.models as models data_dir = 'data/imagenet/data/' # Data loading code traindir = os.path.join(data_dir, 'train') train_dataset = datasets.ImageFolder(traindir, None) classes = train_dataset.classes print("the number of total classes: {}".format(len(classes))) seed = 1993 np.random.seed(seed) subset_num = 100 subset_classes = np.random.choice(classes, subset_num, replace=False) print("the number of subset classes: {}".format(len(subset_classes))) print(subset_classes) des_root_dir = 'data/seed_{}_subset_{}_imagenet/data/'.format(seed, subset_num) if not os.path.exists(des_root_dir): os.makedirs(des_root_dir) phase_list = ['train', 'val'] for phase in phase_list: if not os.path.exists(os.path.join(des_root_dir, phase)): os.mkdir(os.path.join(des_root_dir, phase)) for sc in subset_classes: src_dir = os.path.join(data_dir, phase, sc) des_dir = os.path.join(des_root_dir, phase, sc) cmd = "cp -r {} {}".format(src_dir, des_dir) print(cmd) os.system(cmd) print("Hello World")
1,499
26.777778
79
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/imagenet-class-incremental/resnet.py
import torch.nn as nn import math import torch.utils.model_zoo as model_zoo __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'] model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, num_classes=1000): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AvgPool2d(7, stride=1) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x def resnet18(pretrained=False, **kwargs): """Constructs a ResNet-18 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model def resnet34(pretrained=False, **kwargs): """Constructs a ResNet-34 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) return model def resnet50(pretrained=False, **kwargs): """Constructs a ResNet-50 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) return model def resnet101(pretrained=False, **kwargs): """Constructs a ResNet-101 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) return model def resnet152(pretrained=False, **kwargs): """Constructs a ResNet-152 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) return model
6,582
29.906103
90
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/imagenet-class-incremental/utils_pytorch.py
#!/usr/bin/env python # coding=utf-8 from __future__ import print_function, division import torch import torch.nn as nn import torch.nn.init as init from collections import OrderedDict import numpy as np import os import os.path as osp import sys import time import math import subprocess try: import cPickle as pickle except: import pickle def savepickle(data, file_path): mkdir_p(osp.dirname(file_path), delete=False) print('pickle into', file_path) with open(file_path, 'wb') as f: pickle.dump(data, f, pickle.HIGHEST_PROTOCOL) def unpickle(file_path): with open(file_path, 'rb') as f: data = pickle.load(f) return data def mkdir_p(path, delete=False, print_info=True): if path == '': return if delete: subprocess.call(('rm -r ' + path).split()) if not osp.exists(path): if print_info: print('mkdir -p ' + path) subprocess.call(('mkdir -p ' + path).split()) def get_mean_and_std(dataset): '''Compute the mean and std value of dataset.''' dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2) mean = torch.zeros(3) std = torch.zeros(3) print('==> Computing mean and std..') for inputs, targets in dataloader: for i in range(3): mean[i] += inputs[:,i,:,:].mean() std[i] += inputs[:,i,:,:].std() mean.div_(len(dataset)) std.div_(len(dataset)) return mean, std def init_params(net): '''Init layer parameters.''' for m in net.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=1e-3) if m.bias is not None: init.constant_(m.bias, 0) _, term_width = os.popen('stty size', 'r').read().split() term_width = int(term_width) TOTAL_BAR_LENGTH = 65. last_time = time.time() begin_time = last_time def progress_bar(current, total, msg=None): global last_time, begin_time if current == 0: begin_time = time.time() # Reset for new bar. cur_len = int(TOTAL_BAR_LENGTH*current/total) rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1 sys.stdout.write(' [') for i in range(cur_len): sys.stdout.write('=') sys.stdout.write('>') for i in range(rest_len): sys.stdout.write('.') sys.stdout.write(']') cur_time = time.time() step_time = cur_time - last_time last_time = cur_time tot_time = cur_time - begin_time L = [] L.append(' Step: %s' % format_time(step_time)) L.append(' | Tot: %s' % format_time(tot_time)) if msg: L.append(' | ' + msg) msg = ''.join(L) sys.stdout.write(msg) for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3): sys.stdout.write(' ') # Go back to the center of the bar. for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2): sys.stdout.write('\b') sys.stdout.write(' %d/%d ' % (current+1, total)) if current < total-1: sys.stdout.write('\r') else: sys.stdout.write('\n') sys.stdout.flush() def format_time(seconds): days = int(seconds / 3600/24) seconds = seconds - days*3600*24 hours = int(seconds / 3600) seconds = seconds - hours*3600 minutes = int(seconds / 60) seconds = seconds - minutes*60 secondsf = int(seconds) seconds = seconds - secondsf millis = int(seconds*1000) f = '' i = 1 if days > 0: f += str(days) + 'D' i += 1 if hours > 0 and i <= 2: f += str(hours) + 'h' i += 1 if minutes > 0 and i <= 2: f += str(minutes) + 'm' i += 1 if secondsf > 0 and i <= 2: f += str(secondsf) + 's' i += 1 if millis > 0 and i <= 2: f += str(millis) + 'ms' i += 1 if f == '': f = '0ms' return f
4,102
26.172185
96
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/imagenet-class-incremental/eval_cumul_acc.py
#!/usr/bin/env python # coding=utf-8 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import datasets, models, transforms from torch.autograd import Variable import numpy as np import time import os import sys import copy import argparse from PIL import Image try: import cPickle as pickle except: import pickle from scipy.spatial.distance import cdist import utils_pytorch from utils_imagenet.utils_dataset import split_images_labels from utils_imagenet.utils_dataset import merge_images_labels from utils_incremental.compute_features import compute_features from utils_incremental.compute_accuracy import compute_accuracy from utils_incremental.compute_confusion_matrix import compute_confusion_matrix device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") ######### Modifiable Settings ########## parser = argparse.ArgumentParser() parser.add_argument('--datadir', default='data/seed_1993_subset_100_imagenet/data', type=str) parser.add_argument('--num_classes', default=100, type=int) parser.add_argument('--nb_cl', default=10, type=int, \ help='Classes per group') parser.add_argument('--ckp_prefix', \ default='checkpoint/class_incremental_imagenet_nb_cl_fg_50_nb_cl_10_nb_protos_200_run_0_', \ type=str) parser.add_argument('--order', \ default='./checkpoint/seed_1993_subset_100_imagenet_order_run_0.pkl', \ type=str) parser.add_argument('--nb_cl_fg', default=50, type=int, \ help='the number of classes in first group') args = parser.parse_args() print(args) order = utils_pytorch.unpickle(args.order) order_list = list(order) # transform_test = transforms.Compose([ # transforms.ToTensor(), # transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)), # ]) # evalset = torchvision.datasets.CIFAR100(root='./data', train=False, # download=False, transform=transform_test) # input_data = evalset.test_data # input_labels = evalset.test_labels # map_input_labels = np.array([order_list.index(i) for i in input_labels]) valdir = os.path.join(args.datadir, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) evalset = datasets.ImageFolder(valdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])) input_data, input_labels = split_images_labels(evalset.imgs) map_input_labels = np.array([order_list.index(i) for i in input_labels]) # evalset.test_labels = map_input_labels # evalloader = torch.utils.data.DataLoader(evalset, batch_size=128, # shuffle=False, num_workers=2) cnn_cumul_acc = [] icarl_cumul_acc = [] ncm_cumul_acc = [] num_classes = [] nb_cl = args.nb_cl start_iter = int(args.nb_cl_fg/nb_cl)-1 for iteration in range(start_iter, int(args.num_classes/nb_cl)): # print("###########################################################") # print("For iteration {}".format(iteration)) # print("###########################################################") ckp_name = '{}iteration_{}_model.pth'.format(args.ckp_prefix, iteration) class_means_name = '{}iteration_{}_class_means.pth'.format(args.ckp_prefix, iteration) if not os.path.exists(ckp_name): break tg_model = torch.load(ckp_name) tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1]) class_means = torch.load(class_means_name) current_means = class_means[:, order[:(iteration+1)*nb_cl]] indices = np.array([i in range(0, (iteration+1)*nb_cl) for i in map_input_labels]) # evalset.test_data = input_data[indices] # evalset.test_labels = map_input_labels[indices] # print('Max and Min of valid labels: {}, {}'.format(min(evalset.test_labels), max(evalset.test_labels))) current_eval_set = merge_images_labels(input_data[indices], map_input_labels[indices]) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=128, shuffle=False, num_workers=8, pin_memory=True) print("###########################################################") acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader, print_info=True) print("###########################################################") cnn_cumul_acc.append(acc[0]) icarl_cumul_acc.append(acc[1]) ncm_cumul_acc.append(acc[2]) num_classes.append((iteration+1)*nb_cl) print("###########################################################") print(' CNN acc: \t iCaRL acc \t NCM acc') print("###########################################################") for i in range(len(cnn_cumul_acc)): print("{:.2f} ".format(cnn_cumul_acc[i]), end='') print("[{:.2f}] ".format(np.mean(cnn_cumul_acc[-1])), end='') print("[{:.2f}] ".format(np.mean(cnn_cumul_acc)), end='') print("[{:.2f}] ".format(np.sum(np.array(cnn_cumul_acc)*np.array(num_classes)) / np.sum(num_classes)), end='') print("") for i in range(len(icarl_cumul_acc)): print("{:.2f} ".format(icarl_cumul_acc[i]), end='') print("[{:.2f}] ".format(np.mean(icarl_cumul_acc[-1])), end='') print("[{:.2f}] ".format(np.mean(icarl_cumul_acc)), end='') print("[{:.2f}] ".format(np.sum(np.array(icarl_cumul_acc)*np.array(num_classes)) / np.sum(num_classes)), end='') print("") for i in range(len(cnn_cumul_acc)): print("{:.2f} ".format(ncm_cumul_acc[i]), end='') print("[{:.2f}] ".format(np.mean(ncm_cumul_acc[-1])), end='') print("[{:.2f}] ".format(np.mean(ncm_cumul_acc)), end='') print("[{:.2f}] ".format(np.sum(np.array(ncm_cumul_acc)*np.array(num_classes)) / np.sum(num_classes)), end='') print("") print("###########################################################") print("") print('Number of classes', num_classes) print("###########################################################") print("Final acc on all classes") print("CNN:{:.2f}\t iCaRL:{:.2f}\t NCM:{:.2f}".format(cnn_cumul_acc[-1], icarl_cumul_acc[-1], ncm_cumul_acc[-1])) print("###########################################################") print("Average acc in each phase") print("CNN:{:.2f}\t iCaRL:{:.2f}\t NCM:{:.2f}".format(np.mean(cnn_cumul_acc), np.mean(icarl_cumul_acc), np.mean(ncm_cumul_acc))) print("###########################################################") print("Weighted average acc in each phase") print("CNN:{:.2f}\t iCaRL:{:.2f}\t NCM:{:.2f}".format( np.sum(np.array(cnn_cumul_acc)*np.array(num_classes)) / np.sum(num_classes), np.sum(np.array(icarl_cumul_acc)*np.array(num_classes)) / np.sum(num_classes), np.sum(np.array(ncm_cumul_acc)*np.array(num_classes)) / np.sum(num_classes) )) print("###########################################################")
6,830
45.469388
128
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/imagenet-class-incremental/class_incremental_imagenet.py
#!/usr/bin/env python # coding=utf-8 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import datasets, models, transforms from torch.autograd import Variable import numpy as np import time import os import sys import copy import argparse from PIL import Image try: import cPickle as pickle except: import pickle import utils_pytorch from utils_imagenet.utils_dataset import split_images_labels from utils_imagenet.utils_dataset import merge_images_labels from utils_incremental.compute_features import compute_features from utils_incremental.compute_accuracy import compute_accuracy from utils_incremental.compute_confusion_matrix import compute_confusion_matrix from utils_incremental.incremental_train_and_eval import incremental_train_and_eval ######### Modifiable Settings ########## parser = argparse.ArgumentParser() parser.add_argument('--dataset', default='seed_1993_subset_100_imagenet', type=str) parser.add_argument('--datadir', default='data/seed_1993_subset_100_imagenet/data', type=str) parser.add_argument('--num_classes', default=100, type=int) parser.add_argument('--num_workers', default=16, type=int, \ help='the number of workers for loading data') parser.add_argument('--nb_cl_fg', default=50, type=int, \ help='the number of classes in first group') parser.add_argument('--nb_cl', default=10, type=int, \ help='Classes per group') parser.add_argument('--nb_protos', default=20, type=int, \ help='Number of prototypes per class at the end') parser.add_argument('--nb_runs', default=1, type=int, \ help='Number of runs (random ordering of classes at each run)') parser.add_argument('--ckp_prefix', default=os.path.basename(sys.argv[0])[:-3], type=str, \ help='Checkpoint prefix') parser.add_argument('--epochs', default=90, type=int, \ help='Epochs') parser.add_argument('--T', default=2, type=float, \ help='Temporature for distialltion') parser.add_argument('--beta', default=0.25, type=float, \ help='Beta for distialltion') parser.add_argument('--resume', action='store_true', \ help='resume from checkpoint') parser.add_argument('--fix_budget', action='store_true', \ help='fix budget') parser.add_argument('--rs_ratio', default=0, type=float, \ help='The ratio for resample') parser.add_argument('--random_seed', default=1993, type=int, \ help='random seed') args = parser.parse_args() ######################################## assert(args.nb_cl_fg % args.nb_cl == 0) assert(args.nb_cl_fg >= args.nb_cl) train_batch_size = 128 # Batch size for train test_batch_size = 50 # Batch size for test eval_batch_size = 128 # Batch size for eval base_lr = 0.1 # Initial learning rate lr_strat = [30, 60] # Epochs where learning rate gets decreased lr_factor = 0.1 # Learning rate decrease factor custom_weight_decay = 1e-4 # Weight Decay custom_momentum = 0.9 # Momentum args.ckp_prefix = '{}_nb_cl_fg_{}_nb_cl_{}_nb_protos_{}'.format(args.ckp_prefix, args.nb_cl_fg, args.nb_cl, args.nb_protos) np.random.seed(args.random_seed) # Fix the random seed print(args) ######################################## device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") #transform_train = transforms.Compose([ # transforms.RandomCrop(32, padding=4), # transforms.RandomHorizontalFlip(), # transforms.ToTensor(), # transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)), #]) #transform_test = transforms.Compose([ # transforms.ToTensor(), # transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)), #]) #trainset = torchvision.datasets.CIFAR100(root='./data', train=True, # download=True, transform=transform_train) #testset = torchvision.datasets.CIFAR100(root='./data', train=False, # download=True, transform=transform_test) #evalset = torchvision.datasets.CIFAR100(root='./data', train=False, # download=False, transform=transform_test) # Data loading code traindir = os.path.join(args.datadir, 'train') valdir = os.path.join(args.datadir, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) trainset = datasets.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ])) testset = datasets.ImageFolder(valdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])) evalset = datasets.ImageFolder(valdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])) # Initialization dictionary_size = 1500 top1_acc_list_cumul = np.zeros((int(args.num_classes/args.nb_cl),3,args.nb_runs)) top1_acc_list_ori = np.zeros((int(args.num_classes/args.nb_cl),3,args.nb_runs)) #X_train_total = np.array(trainset.train_data) #Y_train_total = np.array(trainset.train_labels) #X_valid_total = np.array(testset.test_data) #Y_valid_total = np.array(testset.test_labels) X_train_total, Y_train_total = split_images_labels(trainset.imgs) X_valid_total, Y_valid_total = split_images_labels(testset.imgs) # Launch the different runs for iteration_total in range(args.nb_runs): # Select the order for the class learning order_name = "./checkpoint/seed_{}_{}_order_run_{}.pkl".format(args.random_seed, args.dataset, iteration_total) print("Order name:{}".format(order_name)) if os.path.exists(order_name): print("Loading orders") order = utils_pytorch.unpickle(order_name) else: print("Generating orders") order = np.arange(args.num_classes) np.random.shuffle(order) utils_pytorch.savepickle(order, order_name) order_list = list(order) print(order_list) # Initialization of the variables for this run X_valid_cumuls = [] X_protoset_cumuls = [] X_train_cumuls = [] Y_valid_cumuls = [] Y_protoset_cumuls = [] Y_train_cumuls = [] alpha_dr_herding = np.zeros((int(args.num_classes/args.nb_cl),dictionary_size,args.nb_cl),np.float32) # The following contains all the training samples of the different classes # because we want to compare our method with the theoretical case where all the training samples are stored # prototypes = np.zeros((args.num_classes,dictionary_size,X_train_total.shape[1],X_train_total.shape[2],X_train_total.shape[3])) prototypes = [[] for i in range(args.num_classes)] for orde in range(args.num_classes): prototypes[orde] = X_train_total[np.where(Y_train_total==order[orde])] prototypes = np.array(prototypes) start_iter = int(args.nb_cl_fg/args.nb_cl)-1 for iteration in range(start_iter, int(args.num_classes/args.nb_cl)): #init model if iteration == start_iter: ############################################################ last_iter = 0 ############################################################ tg_model = models.resnet18(num_classes=args.nb_cl_fg) ref_model = None else: ############################################################ last_iter = iteration ############################################################ #increment classes ref_model = copy.deepcopy(tg_model) in_features = tg_model.fc.in_features out_features = tg_model.fc.out_features new_fc = nn.Linear(in_features, out_features+args.nb_cl) new_fc.weight.data[:out_features] = tg_model.fc.weight.data new_fc.bias.data[:out_features] = tg_model.fc.bias.data tg_model.fc = new_fc # Prepare the training data for the current batch of classes actual_cl = order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] indices_train_10 = np.array([i in order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] for i in Y_train_total]) indices_test_10 = np.array([i in order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] for i in Y_valid_total]) X_train = X_train_total[indices_train_10] X_valid = X_valid_total[indices_test_10] X_valid_cumuls.append(X_valid) X_train_cumuls.append(X_train) X_valid_cumul = np.concatenate(X_valid_cumuls) X_train_cumul = np.concatenate(X_train_cumuls) Y_train = Y_train_total[indices_train_10] Y_valid = Y_valid_total[indices_test_10] Y_valid_cumuls.append(Y_valid) Y_train_cumuls.append(Y_train) Y_valid_cumul = np.concatenate(Y_valid_cumuls) Y_train_cumul = np.concatenate(Y_train_cumuls) # Add the stored exemplars to the training data if iteration == start_iter: X_valid_ori = X_valid Y_valid_ori = Y_valid else: X_protoset = np.concatenate(X_protoset_cumuls) Y_protoset = np.concatenate(Y_protoset_cumuls) if args.rs_ratio > 0: #1/rs_ratio = (len(X_train)+len(X_protoset)*scale_factor)/(len(X_protoset)*scale_factor) scale_factor = (len(X_train) * args.rs_ratio) / (len(X_protoset) * (1 - args.rs_ratio)) rs_sample_weights = np.concatenate((np.ones(len(X_train)), np.ones(len(X_protoset))*scale_factor)) #number of samples per epoch #rs_num_samples = len(X_train) + len(X_protoset) rs_num_samples = int(len(X_train) / (1 - args.rs_ratio)) print("X_train:{}, X_protoset:{}, rs_num_samples:{}".format(len(X_train), len(X_protoset), rs_num_samples)) X_train = np.concatenate((X_train,X_protoset),axis=0) Y_train = np.concatenate((Y_train,Y_protoset)) # Launch the training loop print('Batch of classes number {0} arrives ...'.format(iteration+1)) map_Y_train = np.array([order_list.index(i) for i in Y_train]) map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul]) ############################################################ #trainset.train_data = X_train.astype('uint8') #trainset.train_labels = map_Y_train current_train_imgs = merge_images_labels(X_train, map_Y_train) trainset.imgs = trainset.samples = current_train_imgs if iteration > start_iter and args.rs_ratio > 0 and scale_factor > 1: print("Weights from sampling:", rs_sample_weights) index1 = np.where(rs_sample_weights>1)[0] index2 = np.where(map_Y_train<iteration*args.nb_cl)[0] assert((index1==index2).all()) train_sampler = torch.utils.data.sampler.WeightedRandomSampler(rs_sample_weights, rs_num_samples) #trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, \ # shuffle=False, sampler=train_sampler, num_workers=2) trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, \ shuffle=False, sampler=train_sampler, num_workers=args.num_workers, pin_memory=True) else: #trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, # shuffle=True, num_workers=2) trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True) #testset.test_data = X_valid_cumul.astype('uint8') #testset.test_labels = map_Y_valid_cumul current_test_images = merge_images_labels(X_valid_cumul, map_Y_valid_cumul) testset.imgs = testset.samples = current_test_images testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size, shuffle=False, num_workers=2) print('Max and Min of train labels: {}, {}'.format(min(map_Y_train), max(map_Y_train))) print('Max and Min of valid labels: {}, {}'.format(min(map_Y_valid_cumul), max(map_Y_valid_cumul))) ############################################################## ckp_name = './checkpoint/{}_run_{}_iteration_{}_model.pth'.format(args.ckp_prefix, iteration_total, iteration) print('ckp_name', ckp_name) if args.resume and os.path.exists(ckp_name): print("###############################") print("Loading models from checkpoint") tg_model = torch.load(ckp_name) print("###############################") else: tg_params = tg_model.parameters() tg_model = tg_model.to(device) if iteration > start_iter: ref_model = ref_model.to(device) tg_optimizer = optim.SGD(tg_params, lr=base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay) tg_lr_scheduler = lr_scheduler.MultiStepLR(tg_optimizer, milestones=lr_strat, gamma=lr_factor) tg_model = incremental_train_and_eval(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, \ args.T, args.beta) if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') torch.save(tg_model, ckp_name) ### Exemplars if args.fix_budget: nb_protos_cl = int(np.ceil(args.nb_protos*args.num_classes*1.0/args.nb_cl/(iteration+1))) else: nb_protos_cl = args.nb_protos tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1]) num_features = tg_model.fc.in_features # Herding print('Updating exemplar set...') for iter_dico in range(last_iter*args.nb_cl, (iteration+1)*args.nb_cl): # Possible exemplars in the feature space and projected on the L2 sphere # evalset.test_data = prototypes[iter_dico].astype('uint8') # evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels current_eval_set = merge_images_labels(prototypes[iter_dico], np.zeros(len(prototypes[iter_dico]))) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) num_samples = len(prototypes[iter_dico]) mapped_prototypes = compute_features(tg_feature_model, evalloader, num_samples, num_features) D = mapped_prototypes.T D = D/np.linalg.norm(D,axis=0) # Herding procedure : ranking of the potential exemplars mu = np.mean(D,axis=1) index1 = int(iter_dico/args.nb_cl) index2 = iter_dico % args.nb_cl alpha_dr_herding[index1,:,index2] = alpha_dr_herding[index1,:,index2]*0 w_t = mu iter_herding = 0 iter_herding_eff = 0 while not(np.sum(alpha_dr_herding[index1,:,index2]!=0)==min(nb_protos_cl,500)) and iter_herding_eff<1000: tmp_t = np.dot(w_t,D) ind_max = np.argmax(tmp_t) iter_herding_eff += 1 if alpha_dr_herding[index1,ind_max,index2] == 0: alpha_dr_herding[index1,ind_max,index2] = 1+iter_herding iter_herding += 1 w_t = w_t+mu-D[:,ind_max] # Prepare the protoset X_protoset_cumuls = [] Y_protoset_cumuls = [] # Class means for iCaRL and NCM + Storing the selected exemplars in the protoset print('Computing mean-of_exemplars and theoretical mean...') # class_means = np.zeros((64,100,2)) class_means = np.zeros((num_features, args.num_classes, 2)) for iteration2 in range(iteration+1): for iter_dico in range(args.nb_cl): current_cl = order[range(iteration2*args.nb_cl,(iteration2+1)*args.nb_cl)] # Collect data in the feature space for each class # evalset.test_data = prototypes[iteration2*args.nb_cl+iter_dico].astype('uint8') # evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels current_eval_set = merge_images_labels(prototypes[iteration2*args.nb_cl+iter_dico], \ np.zeros(len(prototypes[iteration2*args.nb_cl+iter_dico]))) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) num_samples = len(prototypes[iteration2*args.nb_cl+iter_dico]) mapped_prototypes = compute_features(tg_feature_model, evalloader, num_samples, num_features) D = mapped_prototypes.T D = D/np.linalg.norm(D,axis=0) # Flipped version also # evalset.test_data = prototypes[iteration2*args.nb_cl+iter_dico][:,:,:,::-1].astype('uint8') # evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, # shuffle=False, num_workers=2) # mapped_prototypes2 = compute_features(tg_feature_model, evalloader, num_samples, num_features) # D2 = mapped_prototypes2.T # D2 = D2/np.linalg.norm(D2,axis=0) D2 = D # iCaRL alph = alpha_dr_herding[iteration2,:,iter_dico] assert((alph[num_samples:]==0).all()) alph = alph[:num_samples] alph = (alph>0)*(alph<nb_protos_cl+1)*1. # X_protoset_cumuls.append(prototypes[iteration2*args.nb_cl+iter_dico,np.where(alph==1)[0]]) X_protoset_cumuls.append(prototypes[iteration2*args.nb_cl+iter_dico][np.where(alph==1)[0]]) Y_protoset_cumuls.append(order[iteration2*args.nb_cl+iter_dico]*np.ones(len(np.where(alph==1)[0]))) alph = alph/np.sum(alph) class_means[:,current_cl[iter_dico],0] = (np.dot(D,alph)+np.dot(D2,alph))/2 class_means[:,current_cl[iter_dico],0] /= np.linalg.norm(class_means[:,current_cl[iter_dico],0]) # Normal NCM # alph = np.ones(dictionary_size)/dictionary_size alph = np.ones(num_samples)/num_samples class_means[:,current_cl[iter_dico],1] = (np.dot(D,alph)+np.dot(D2,alph))/2 class_means[:,current_cl[iter_dico],1] /= np.linalg.norm(class_means[:,current_cl[iter_dico],1]) torch.save(class_means, \ './checkpoint/{}_run_{}_iteration_{}_class_means.pth'.format(args.ckp_prefix,iteration_total, iteration)) current_means = class_means[:, order[range(0,(iteration+1)*args.nb_cl)]] ############################################################## # Calculate validation error of model on the first nb_cl classes: map_Y_valid_ori = np.array([order_list.index(i) for i in Y_valid_ori]) print('Computing accuracy on the original batch of classes...') # evalset.test_data = X_valid_ori.astype('uint8') # evalset.test_labels = map_Y_valid_ori current_eval_set = merge_images_labels(X_valid_ori, map_Y_valid_ori) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) ori_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader) top1_acc_list_ori[iteration, :, iteration_total] = np.array(ori_acc).T ############################################################## # Calculate validation error of model on the cumul of classes: map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul]) print('Computing cumulative accuracy...') # evalset.test_data = X_valid_cumul.astype('uint8') # evalset.test_labels = map_Y_valid_cumul current_eval_set = merge_images_labels(X_valid_cumul, map_Y_valid_cumul) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) cumul_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader) top1_acc_list_cumul[iteration, :, iteration_total] = np.array(cumul_acc).T ############################################################## # Calculate confusion matrix print('Computing confusion matrix...') cm = compute_confusion_matrix(tg_model, tg_feature_model, current_means, evalloader) cm_name = './checkpoint/{}_run_{}_iteration_{}_confusion_matrix.pth'.format(args.ckp_prefix,iteration_total, iteration) with open(cm_name, 'wb') as f: pickle.dump(cm, f, 2) #for reading with Python 2 ############################################################## # Final save of the data torch.save(top1_acc_list_ori, \ './checkpoint/{}_run_{}_top1_acc_list_ori.pth'.format(args.ckp_prefix, iteration_total)) torch.save(top1_acc_list_cumul, \ './checkpoint/{}_run_{}_top1_acc_list_cumul.pth'.format(args.ckp_prefix, iteration_total))
22,015
52.307506
132
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/imagenet-class-incremental/modified_resnet.py
import torch.nn as nn import math import torch.utils.model_zoo as model_zoo import modified_linear def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, last=False): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride self.last = last def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual if not self.last: #remove ReLU in the last layer out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, num_classes=1000): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2, last_phase=True) self.avgpool = nn.AvgPool2d(7, stride=1) self.fc = modified_linear.CosineLinear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, blocks, stride=1, last_phase=False): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion if last_phase: for i in range(1, blocks-1): layers.append(block(self.inplanes, planes)) layers.append(block(self.inplanes, planes, last=True)) else: for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x def resnet18(pretrained=False, **kwargs): """Constructs a ResNet-18 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) return model
3,850
32.198276
88
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/imagenet-class-incremental/cbf_class_incremental_cosine_imagenet.py
#!/usr/bin/env python # coding=utf-8 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import datasets, models, transforms from torch.autograd import Variable import numpy as np import time import os import sys import copy import argparse from PIL import Image try: import cPickle as pickle except: import pickle import math import modified_resnet import modified_linear import utils_pytorch from utils_imagenet.utils_dataset import split_images_labels from utils_imagenet.utils_dataset import merge_images_labels from utils_incremental.compute_features import compute_features from utils_incremental.compute_accuracy import compute_accuracy from utils_incremental.compute_confusion_matrix import compute_confusion_matrix from utils_incremental.incremental_train_and_eval import incremental_train_and_eval from utils_incremental.incremental_train_and_eval_MS import incremental_train_and_eval_MS from utils_incremental.incremental_train_and_eval_LF import incremental_train_and_eval_LF from utils_incremental.incremental_train_and_eval_MR_LF import incremental_train_and_eval_MR_LF from utils_incremental.incremental_train_and_eval_AMR_LF import incremental_train_and_eval_AMR_LF ######### Modifiable Settings ########## parser = argparse.ArgumentParser() parser.add_argument('--dataset', default='seed_1993_subset_100_imagenet', type=str) parser.add_argument('--datadir', default='data/seed_1993_subset_100_imagenet/data', type=str) parser.add_argument('--num_classes', default=100, type=int) parser.add_argument('--num_workers', default=16, type=int, \ help='the number of workers for loading data') parser.add_argument('--nb_cl_fg', default=50, type=int, \ help='the number of classes in first group') parser.add_argument('--nb_cl', default=10, type=int, \ help='Classes per group') parser.add_argument('--nb_protos', default=20, type=int, \ help='Number of prototypes per class at the end') parser.add_argument('--nb_runs', default=1, type=int, \ help='Number of runs (random ordering of classes at each run)') parser.add_argument('--ckp_prefix', default=os.path.basename(sys.argv[0])[:-3], type=str, \ help='Checkpoint prefix') parser.add_argument('--epochs', default=90, type=int, \ help='Epochs') parser.add_argument('--T', default=2, type=float, \ help='Temporature for distialltion') parser.add_argument('--beta', default=0.25, type=float, \ help='Beta for distialltion') parser.add_argument('--resume', action='store_true', \ help='resume from checkpoint') parser.add_argument('--fix_budget', action='store_true', \ help='fix budget') ######################################## parser.add_argument('--mimic_score', action='store_true', \ help='To mimic scores for cosine embedding') parser.add_argument('--lw_ms', default=1, type=float, \ help='loss weight for mimicking score') ######################################## #improved class incremental learning parser.add_argument('--rs_ratio', default=0, type=float, \ help='The ratio for resample') parser.add_argument('--imprint_weights', action='store_true', \ help='Imprint the weights for novel classes') parser.add_argument('--less_forget', action='store_true', \ help='Less forgetful') parser.add_argument('--lamda', default=5, type=float, \ help='Lamda for LF') parser.add_argument('--adapt_lamda', action='store_true', \ help='Adaptively change lamda') parser.add_argument('--mr_loss', action='store_true', \ help='Margin ranking loss v1') parser.add_argument('--amr_loss', action='store_true', \ help='Margin ranking loss v2') parser.add_argument('--dist', default=0.5, type=float, \ help='Dist for MarginRankingLoss') parser.add_argument('--K', default=2, type=int, \ help='K for MarginRankingLoss') parser.add_argument('--lw_mr', default=1, type=float, \ help='loss weight for margin ranking loss') ######################################## parser.add_argument('--random_seed', default=1993, type=int, \ help='random seed') ######################################## parser.add_argument('--cb_finetune', action='store_true', \ help='class balance finetune') parser.add_argument('--ft_epochs', default=20, type=int, \ help='Epochs for class balance finetune') parser.add_argument('--ft_base_lr', default=0.01, type=float, \ help='Base learning rate for class balance finetune') parser.add_argument('--ft_lr_strat', default=[10], type=int, nargs='+', \ help='Lr_strat for class balance finetune') parser.add_argument('--ft_flag', default=2, type=int, \ help='Flag for class balance finetune') args = parser.parse_args() ######################################## assert(args.nb_cl_fg % args.nb_cl == 0) assert(args.nb_cl_fg >= args.nb_cl) train_batch_size = 128 # Batch size for train test_batch_size = 50 # Batch size for test eval_batch_size = 128 # Batch size for eval base_lr = 0.1 # Initial learning rate lr_strat = [30, 60] # Epochs where learning rate gets decreased lr_factor = 0.1 # Learning rate decrease factor custom_weight_decay = 1e-4 # Weight Decay custom_momentum = 0.9 # Momentum args.ckp_prefix = '{}_nb_cl_fg_{}_nb_cl_{}_nb_protos_{}'.format(args.ckp_prefix, args.nb_cl_fg, args.nb_cl, args.nb_protos) np.random.seed(args.random_seed) # Fix the random seed print(args) ######################################## device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") #transform_train = transforms.Compose([ # transforms.RandomCrop(32, padding=4), # transforms.RandomHorizontalFlip(), # transforms.ToTensor(), # transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)), #]) #transform_test = transforms.Compose([ # transforms.ToTensor(), # transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)), #]) #trainset = torchvision.datasets.CIFAR100(root='./data', train=True, # download=True, transform=transform_train) #testset = torchvision.datasets.CIFAR100(root='./data', train=False, # download=True, transform=transform_test) #evalset = torchvision.datasets.CIFAR100(root='./data', train=False, # download=False, transform=transform_test) # Data loading code traindir = os.path.join(args.datadir, 'train') valdir = os.path.join(args.datadir, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) trainset = datasets.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ])) testset = datasets.ImageFolder(valdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])) evalset = datasets.ImageFolder(valdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])) # Initialization dictionary_size = 1500 top1_acc_list_cumul = np.zeros((int(args.num_classes/args.nb_cl),3,args.nb_runs)) top1_acc_list_ori = np.zeros((int(args.num_classes/args.nb_cl),3,args.nb_runs)) #X_train_total = np.array(trainset.train_data) #Y_train_total = np.array(trainset.train_labels) #X_valid_total = np.array(testset.test_data) #Y_valid_total = np.array(testset.test_labels) X_train_total, Y_train_total = split_images_labels(trainset.imgs) X_valid_total, Y_valid_total = split_images_labels(testset.imgs) # Launch the different runs for iteration_total in range(args.nb_runs): # Select the order for the class learning order_name = "./checkpoint/seed_{}_{}_order_run_{}.pkl".format(args.random_seed, args.dataset, iteration_total) print("Order name:{}".format(order_name)) if os.path.exists(order_name): print("Loading orders") order = utils_pytorch.unpickle(order_name) else: print("Generating orders") order = np.arange(args.num_classes) np.random.shuffle(order) utils_pytorch.savepickle(order, order_name) order_list = list(order) print(order_list) # Initialization of the variables for this run X_valid_cumuls = [] X_protoset_cumuls = [] X_train_cumuls = [] Y_valid_cumuls = [] Y_protoset_cumuls = [] Y_train_cumuls = [] alpha_dr_herding = np.zeros((int(args.num_classes/args.nb_cl),dictionary_size,args.nb_cl),np.float32) # The following contains all the training samples of the different classes # because we want to compare our method with the theoretical case where all the training samples are stored # prototypes = np.zeros((args.num_classes,dictionary_size,X_train_total.shape[1],X_train_total.shape[2],X_train_total.shape[3])) prototypes = [[] for i in range(args.num_classes)] for orde in range(args.num_classes): prototypes[orde] = X_train_total[np.where(Y_train_total==order[orde])] prototypes = np.array(prototypes) start_iter = int(args.nb_cl_fg/args.nb_cl)-1 for iteration in range(start_iter, int(args.num_classes/args.nb_cl)): #init model if iteration == start_iter: ############################################################ last_iter = 0 ############################################################ tg_model = modified_resnet.resnet18(num_classes=args.nb_cl_fg) in_features = tg_model.fc.in_features out_features = tg_model.fc.out_features print("in_features:", in_features, "out_features:", out_features) ref_model = None elif iteration == start_iter+1: ############################################################ last_iter = iteration ############################################################ #increment classes ref_model = copy.deepcopy(tg_model) in_features = tg_model.fc.in_features out_features = tg_model.fc.out_features print("in_features:", in_features, "out_features:", out_features) new_fc = modified_linear.SplitCosineLinear(in_features, out_features, args.nb_cl) new_fc.fc1.weight.data = tg_model.fc.weight.data new_fc.sigma.data = tg_model.fc.sigma.data tg_model.fc = new_fc lamda_mult = out_features*1.0 / args.nb_cl else: ############################################################ last_iter = iteration ############################################################ ref_model = copy.deepcopy(tg_model) in_features = tg_model.fc.in_features out_features1 = tg_model.fc.fc1.out_features out_features2 = tg_model.fc.fc2.out_features print("in_features:", in_features, "out_features1:", \ out_features1, "out_features2:", out_features2) new_fc = modified_linear.SplitCosineLinear(in_features, out_features1+out_features2, args.nb_cl) new_fc.fc1.weight.data[:out_features1] = tg_model.fc.fc1.weight.data new_fc.fc1.weight.data[out_features1:] = tg_model.fc.fc2.weight.data new_fc.sigma.data = tg_model.fc.sigma.data tg_model.fc = new_fc lamda_mult = (out_features1+out_features2)*1.0 / (args.nb_cl) if iteration > start_iter and args.less_forget and args.adapt_lamda: #cur_lamda = lamda_base * sqrt(num_old_classes/num_new_classes) cur_lamda = args.lamda * math.sqrt(lamda_mult) else: cur_lamda = args.lamda if iteration > start_iter and args.less_forget: print("###############################") print("Lamda for less forget is set to ", cur_lamda) print("###############################") # Prepare the training data for the current batch of classes actual_cl = order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] indices_train_10 = np.array([i in order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] for i in Y_train_total]) indices_test_10 = np.array([i in order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] for i in Y_valid_total]) X_train = X_train_total[indices_train_10] X_valid = X_valid_total[indices_test_10] X_valid_cumuls.append(X_valid) X_train_cumuls.append(X_train) X_valid_cumul = np.concatenate(X_valid_cumuls) X_train_cumul = np.concatenate(X_train_cumuls) Y_train = Y_train_total[indices_train_10] Y_valid = Y_valid_total[indices_test_10] Y_valid_cumuls.append(Y_valid) Y_train_cumuls.append(Y_train) Y_valid_cumul = np.concatenate(Y_valid_cumuls) Y_train_cumul = np.concatenate(Y_train_cumuls) # Add the stored exemplars to the training data if iteration == start_iter: X_valid_ori = X_valid Y_valid_ori = Y_valid else: X_protoset = np.concatenate(X_protoset_cumuls) Y_protoset = np.concatenate(Y_protoset_cumuls) if args.rs_ratio > 0: #1/rs_ratio = (len(X_train)+len(X_protoset)*scale_factor)/(len(X_protoset)*scale_factor) scale_factor = (len(X_train) * args.rs_ratio) / (len(X_protoset) * (1 - args.rs_ratio)) rs_sample_weights = np.concatenate((np.ones(len(X_train)), np.ones(len(X_protoset))*scale_factor)) #number of samples per epoch #rs_num_samples = len(X_train) + len(X_protoset) rs_num_samples = int(len(X_train) / (1 - args.rs_ratio)) print("X_train:{}, X_protoset:{}, rs_num_samples:{}".format(len(X_train), len(X_protoset), rs_num_samples)) X_train = np.concatenate((X_train,X_protoset),axis=0) Y_train = np.concatenate((Y_train,Y_protoset)) # Launch the training loop print('Batch of classes number {0} arrives ...'.format(iteration+1)) map_Y_train = np.array([order_list.index(i) for i in Y_train]) map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul]) #imprint weights if iteration > start_iter and args.imprint_weights: #input: tg_model, X_train, map_Y_train #class_start = iteration*nb_cl class_end = (iteration+1)*nb_cl print("Imprint weights") ######################################### #compute the average norm of old embdding old_embedding_norm = tg_model.fc.fc1.weight.data.norm(dim=1, keepdim=True) average_old_embedding_norm = torch.mean(old_embedding_norm, dim=0).to('cpu').type(torch.DoubleTensor) ######################################### tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1]) num_features = tg_model.fc.in_features novel_embedding = torch.zeros((args.nb_cl, num_features)) for cls_idx in range(iteration*args.nb_cl, (iteration+1)*args.nb_cl): cls_indices = np.array([i == cls_idx for i in map_Y_train]) assert(len(np.where(cls_indices==1)[0])<=dictionary_size) #evalset.test_data = X_train[cls_indices].astype('uint8') #evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels current_eval_set = merge_images_labels(X_train[cls_indices], np.zeros(len(X_train[cls_indices]))) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=2) num_samples = len(X_train[cls_indices]) cls_features = compute_features(tg_feature_model, evalloader, num_samples, num_features) #cls_features = cls_features.T #cls_features = cls_features / np.linalg.norm(cls_features,axis=0) #cls_embedding = np.mean(cls_features, axis=1) norm_features = F.normalize(torch.from_numpy(cls_features), p=2, dim=1) cls_embedding = torch.mean(norm_features, dim=0) #novel_embedding[cls_idx-iteration*args.nb_cl] = cls_embedding novel_embedding[cls_idx-iteration*args.nb_cl] = F.normalize(cls_embedding, p=2, dim=0) * average_old_embedding_norm tg_model.to(device) #torch.save(tg_model, "tg_model_before_imprint_weights.pth") tg_model.fc.fc2.weight.data = novel_embedding.to(device) #torch.save(tg_model, "tg_model_after_imprint_weights.pth") ############################################################ #trainset.train_data = X_train.astype('uint8') #trainset.train_labels = map_Y_train current_train_imgs = merge_images_labels(X_train, map_Y_train) trainset.imgs = trainset.samples = current_train_imgs if iteration > start_iter and args.rs_ratio > 0 and scale_factor > 1: print("Weights from sampling:", rs_sample_weights) index1 = np.where(rs_sample_weights>1)[0] index2 = np.where(map_Y_train<iteration*args.nb_cl)[0] assert((index1==index2).all()) train_sampler = torch.utils.data.sampler.WeightedRandomSampler(rs_sample_weights, rs_num_samples) #trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, \ # shuffle=False, sampler=train_sampler, num_workers=2) trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, \ shuffle=False, sampler=train_sampler, num_workers=args.num_workers, pin_memory=True) else: #trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, # shuffle=True, num_workers=2) trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True) #testset.test_data = X_valid_cumul.astype('uint8') #testset.test_labels = map_Y_valid_cumul current_test_imgs = merge_images_labels(X_valid_cumul, map_Y_valid_cumul) testset.imgs = testset.samples = current_test_imgs testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size, shuffle=False, num_workers=2) print('Max and Min of train labels: {}, {}'.format(min(map_Y_train), max(map_Y_train))) print('Max and Min of valid labels: {}, {}'.format(min(map_Y_valid_cumul), max(map_Y_valid_cumul))) ############################################################## ckp_name = './checkpoint/{}_run_{}_iteration_{}_model.pth'.format(args.ckp_prefix, iteration_total, iteration) print('ckp_name', ckp_name) if args.resume and os.path.exists(ckp_name): print("###############################") print("Loading models from checkpoint") tg_model = torch.load(ckp_name) print("###############################") else: ############################### if iteration > start_iter and args.less_forget: #fix the embedding of old classes ignored_params = list(map(id, tg_model.fc.fc1.parameters())) base_params = filter(lambda p: id(p) not in ignored_params, \ tg_model.parameters()) tg_params =[{'params': base_params, 'lr': base_lr, 'weight_decay': custom_weight_decay}, \ {'params': tg_model.fc.fc1.parameters(), 'lr': 0, 'weight_decay': 0}] else: tg_params = tg_model.parameters() ############################### tg_model = tg_model.to(device) if iteration > start_iter: ref_model = ref_model.to(device) tg_optimizer = optim.SGD(tg_params, lr=base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay) tg_lr_scheduler = lr_scheduler.MultiStepLR(tg_optimizer, milestones=lr_strat, gamma=lr_factor) ############################### if args.less_forget and args.mr_loss: print("incremental_train_and_eval_MR_LF") tg_model = incremental_train_and_eval_MR_LF(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, \ cur_lamda, \ args.dist, args.K, args.lw_mr) elif args.less_forget and args.amr_loss: print("incremental_train_and_eval_AMR_LF") tg_model = incremental_train_and_eval_AMR_LF(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, \ cur_lamda, \ args.dist, args.K, args.lw_mr) else: if args.less_forget: print("incremental_train_and_eval_LF") tg_model = incremental_train_and_eval_LF(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, \ cur_lamda) else: if args.mimic_score: print("incremental_train_and_eval_MS") tg_model = incremental_train_and_eval_MS(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, args.lw_ms) else: print("incremental_train_and_eval") tg_model = incremental_train_and_eval(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, args.T, args.beta) if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') torch.save(tg_model, ckp_name) ### Exemplars if args.fix_budget: nb_protos_cl = int(np.ceil(args.nb_protos*args.num_classes*1.0/args.nb_cl/(iteration+1))) else: nb_protos_cl = args.nb_protos tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1]) num_features = tg_model.fc.in_features # Herding print('Updating exemplar set...') for iter_dico in range(last_iter*args.nb_cl, (iteration+1)*args.nb_cl): # Possible exemplars in the feature space and projected on the L2 sphere # evalset.test_data = prototypes[iter_dico].astype('uint8') # evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels current_eval_set = merge_images_labels(prototypes[iter_dico], np.zeros(len(prototypes[iter_dico]))) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) num_samples = len(prototypes[iter_dico]) mapped_prototypes = compute_features(tg_feature_model, evalloader, num_samples, num_features) D = mapped_prototypes.T D = D/np.linalg.norm(D,axis=0) # Herding procedure : ranking of the potential exemplars mu = np.mean(D,axis=1) index1 = int(iter_dico/args.nb_cl) index2 = iter_dico % args.nb_cl alpha_dr_herding[index1,:,index2] = alpha_dr_herding[index1,:,index2]*0 w_t = mu iter_herding = 0 iter_herding_eff = 0 while not(np.sum(alpha_dr_herding[index1,:,index2]!=0)==min(nb_protos_cl,500)) and iter_herding_eff<1000: tmp_t = np.dot(w_t,D) ind_max = np.argmax(tmp_t) iter_herding_eff += 1 if alpha_dr_herding[index1,ind_max,index2] == 0: alpha_dr_herding[index1,ind_max,index2] = 1+iter_herding iter_herding += 1 w_t = w_t+mu-D[:,ind_max] # Prepare the protoset X_protoset_cumuls = [] Y_protoset_cumuls = [] # Class means for iCaRL and NCM + Storing the selected exemplars in the protoset print('Computing mean-of_exemplars and theoretical mean...') # class_means = np.zeros((64,100,2)) class_means = np.zeros((num_features, args.num_classes, 2)) for iteration2 in range(iteration+1): for iter_dico in range(args.nb_cl): current_cl = order[range(iteration2*args.nb_cl,(iteration2+1)*args.nb_cl)] # Collect data in the feature space for each class # evalset.test_data = prototypes[iteration2*args.nb_cl+iter_dico].astype('uint8') # evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels current_eval_set = merge_images_labels(prototypes[iteration2*args.nb_cl+iter_dico], \ np.zeros(len(prototypes[iteration2*args.nb_cl+iter_dico]))) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) num_samples = len(prototypes[iteration2*args.nb_cl+iter_dico]) mapped_prototypes = compute_features(tg_feature_model, evalloader, num_samples, num_features) D = mapped_prototypes.T D = D/np.linalg.norm(D,axis=0) # Flipped version also # evalset.test_data = prototypes[iteration2*args.nb_cl+iter_dico][:,:,:,::-1].astype('uint8') # evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, # shuffle=False, num_workers=2) # mapped_prototypes2 = compute_features(tg_feature_model, evalloader, num_samples, num_features) # D2 = mapped_prototypes2.T # D2 = D2/np.linalg.norm(D2,axis=0) D2 = D # iCaRL alph = alpha_dr_herding[iteration2,:,iter_dico] assert((alph[num_samples:]==0).all()) alph = alph[:num_samples] alph = (alph>0)*(alph<nb_protos_cl+1)*1. # X_protoset_cumuls.append(prototypes[iteration2*args.nb_cl+iter_dico,np.where(alph==1)[0]]) X_protoset_cumuls.append(prototypes[iteration2*args.nb_cl+iter_dico][np.where(alph==1)[0]]) Y_protoset_cumuls.append(order[iteration2*args.nb_cl+iter_dico]*np.ones(len(np.where(alph==1)[0]))) alph = alph/np.sum(alph) class_means[:,current_cl[iter_dico],0] = (np.dot(D,alph)+np.dot(D2,alph))/2 class_means[:,current_cl[iter_dico],0] /= np.linalg.norm(class_means[:,current_cl[iter_dico],0]) # Normal NCM # alph = np.ones(dictionary_size)/dictionary_size alph = np.ones(num_samples)/num_samples class_means[:,current_cl[iter_dico],1] = (np.dot(D,alph)+np.dot(D2,alph))/2 class_means[:,current_cl[iter_dico],1] /= np.linalg.norm(class_means[:,current_cl[iter_dico],1]) # torch.save(class_means, \ # './checkpoint/{}_run_{}_iteration_{}_class_means.pth'.format(args.ckp_prefix,iteration_total, iteration)) class_means_name = './checkpoint/{}_run_{}_iteration_{}_class_means.pth'.format(args.ckp_prefix,iteration_total, iteration) torch.save(class_means, class_means_name) current_means = class_means[:, order[range(0,(iteration+1)*args.nb_cl)]] ############################################################## # Calculate validation error of model on the first nb_cl classes: map_Y_valid_ori = np.array([order_list.index(i) for i in Y_valid_ori]) print('Computing accuracy on the original batch of classes...') # evalset.test_data = X_valid_ori.astype('uint8') # evalset.test_labels = map_Y_valid_ori current_eval_set = merge_images_labels(X_valid_ori, map_Y_valid_ori) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) ori_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader) top1_acc_list_ori[iteration, :, iteration_total] = np.array(ori_acc).T ############################################################## # Calculate validation error of model on the cumul of classes: map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul]) print('Computing cumulative accuracy...') # evalset.test_data = X_valid_cumul.astype('uint8') # evalset.test_labels = map_Y_valid_cumul current_eval_set = merge_images_labels(X_valid_cumul, map_Y_valid_cumul) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) cumul_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader) top1_acc_list_cumul[iteration, :, iteration_total] = np.array(cumul_acc).T ############################################################## # Calculate confusion matrix # print('Computing confusion matrix...') # cm = compute_confusion_matrix(tg_model, tg_feature_model, current_means, evalloader) # cm_name = './checkpoint/{}_run_{}_iteration_{}_confusion_matrix.pth'.format(args.ckp_prefix,iteration_total, iteration) # with open(cm_name, 'wb') as f: # pickle.dump(cm, f, 2) #for reading with Python 2 ############################################################## if iteration == start_iter and args.cb_finetune: #for the convenience of evaluation torch.save(tg_model, ckp_name.replace("/checkpoint/", "/checkpoint/AFTER_CBF_")) torch.save(class_means, class_means_name.replace("/checkpoint/", "/checkpoint/AFTER_CBF_")) if iteration > start_iter and args.cb_finetune: # Class balance finetuning on the protoset print("###############################") print("Class balance finetuning on the protoset") print("###############################") map_Y_protoset_cumuls = np.array([order_list.index(i) for i in np.concatenate(Y_protoset_cumuls)]) current_train_imgs = merge_images_labels(np.concatenate(X_protoset_cumuls), map_Y_protoset_cumuls) trainset.imgs = trainset.samples = current_train_imgs trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True) print('Min and Max of train labels: {}, {}'.format(min(map_Y_protoset_cumuls), max(map_Y_protoset_cumuls))) ############################### print('Computing accuracy on the protoset...') current_eval_set = merge_images_labels(np.concatenate(X_protoset_cumuls), map_Y_protoset_cumuls) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1]) cbf_proto_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader) ############################### print('Computing accuracy on the old protoset...') indices = np.array([i in range(0, iteration*args.nb_cl) for i in map_Y_protoset_cumuls]) current_eval_set = merge_images_labels(np.concatenate(X_protoset_cumuls)[indices], map_Y_protoset_cumuls[indices]) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) print('Min and Max of eval labels: {}, {}'.format(min(map_Y_protoset_cumuls[indices]), max(map_Y_protoset_cumuls[indices]))) tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1]) cbf_proto_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader) ############################### print('Computing accuracy on the new protoset...') indices = np.array([i in range(iteration*args.nb_cl, (iteration+1)*args.nb_cl) for i in map_Y_protoset_cumuls]) current_eval_set = merge_images_labels(np.concatenate(X_protoset_cumuls)[indices], map_Y_protoset_cumuls[indices]) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) print('Min and Max of eval labels: {}, {}'.format(min(map_Y_protoset_cumuls[indices]), max(map_Y_protoset_cumuls[indices]))) tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1]) cbf_proto_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader) ############################################################## #tg_params = tg_model.parameters() if args.ft_flag == 0: #everything is not updated ignored_params = list(map(id, tg_model.fc.parameters())) base_params = filter(lambda p: id(p) not in ignored_params, tg_model.parameters()) tg_params =[{'params': base_params, 'lr': 0, 'weight_decay': 0}, \ {'params': tg_model.fc.fc1.parameters(), 'lr': 0, 'weight_decay': 0}, \ {'params': tg_model.fc.fc2.parameters(), 'lr': 0, 'weight_decay': 0}] fix_bn_flag = True tg_model = tg_model.to(device) ref_model = ref_model.to(device) tg_ft_optimizer = optim.SGD(tg_params, lr=args.ft_base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay) tg_ft_lr_scheduler = lr_scheduler.MultiStepLR(tg_ft_optimizer, milestones=args.ft_lr_strat, gamma=lr_factor) tg_model = incremental_train_and_eval_MR_LF(args.ft_epochs, tg_model, ref_model, tg_ft_optimizer, tg_ft_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, \ cur_lamda, \ args.dist, args.K, args.lw_mr, \ fix_bn=fix_bn_flag) elif args.ft_flag == 1: #only the novel embeddings are updated with the feature extractor fixed ignored_params = list(map(id, tg_model.fc.parameters())) base_params = filter(lambda p: id(p) not in ignored_params, tg_model.parameters()) tg_params =[{'params': base_params, 'lr': 0, 'weight_decay': 0}, \ {'params': tg_model.fc.fc1.parameters(), 'lr': 0, 'weight_decay': 0}, \ {'params': tg_model.fc.fc2.parameters(), 'lr': args.ft_base_lr, 'weight_decay': custom_weight_decay}] fix_bn_flag = True tg_model = tg_model.to(device) ref_model = ref_model.to(device) tg_ft_optimizer = optim.SGD(tg_params, lr=args.ft_base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay) tg_ft_lr_scheduler = lr_scheduler.MultiStepLR(tg_ft_optimizer, milestones=args.ft_lr_strat, gamma=lr_factor) tg_model = incremental_train_and_eval_MR_LF(args.ft_epochs, tg_model, ref_model, tg_ft_optimizer, tg_ft_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, \ cur_lamda, \ args.dist, args.K, args.lw_mr, \ fix_bn=fix_bn_flag) elif args.ft_flag == 2: #both the old and novel embeddings are updated with the feature extractor fixed ignored_params = list(map(id, tg_model.fc.parameters())) base_params = filter(lambda p: id(p) not in ignored_params, tg_model.parameters()) tg_params =[{'params': base_params, 'lr': 0, 'weight_decay': 0}, \ {'params': tg_model.fc.fc1.parameters(), 'lr': args.ft_base_lr, 'weight_decay': custom_weight_decay}, \ {'params': tg_model.fc.fc2.parameters(), 'lr': args.ft_base_lr, 'weight_decay': custom_weight_decay}] fix_bn_flag = True tg_model = tg_model.to(device) ref_model = ref_model.to(device) tg_ft_optimizer = optim.SGD(tg_params, lr=args.ft_base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay) tg_ft_lr_scheduler = lr_scheduler.MultiStepLR(tg_ft_optimizer, milestones=args.ft_lr_strat, gamma=lr_factor) tg_model = incremental_train_and_eval_MR_LF(args.ft_epochs, tg_model, ref_model, tg_ft_optimizer, tg_ft_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, \ cur_lamda, \ args.dist, args.K, args.lw_mr, \ fix_bn=fix_bn_flag) elif args.ft_flag == 3: #everything is updated ignored_params = list(map(id, tg_model.fc.parameters())) base_params = filter(lambda p: id(p) not in ignored_params, tg_model.parameters()) tg_params =[{'params': base_params, 'lr': args.ft_base_lr, 'weight_decay': custom_weight_decay}, \ {'params': tg_model.fc.fc1.parameters(), 'lr': args.ft_base_lr, 'weight_decay': custom_weight_decay}, \ {'params': tg_model.fc.fc2.parameters(), 'lr': args.ft_base_lr, 'weight_decay': custom_weight_decay}] fix_bn_flag = False tg_model = tg_model.to(device) ref_model = ref_model.to(device) tg_ft_optimizer = optim.SGD(tg_params, lr=args.ft_base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay) tg_ft_lr_scheduler = lr_scheduler.MultiStepLR(tg_ft_optimizer, milestones=args.ft_lr_strat, gamma=lr_factor) tg_model = incremental_train_and_eval_MR_LF(args.ft_epochs, tg_model, ref_model, tg_ft_optimizer, tg_ft_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, \ cur_lamda, \ args.dist, args.K, args.lw_mr, \ fix_bn=fix_bn_flag) #both the old and novel embeddings are updated with the feature extractor fixed #the MR loss is removed in the CBF (ft_flag=4) and removed in all the training (ft_flag=5) #the differences lie in the models for CBF elif args.ft_flag == 4 or args.ft_flag == 5: ignored_params = list(map(id, tg_model.fc.parameters())) base_params = filter(lambda p: id(p) not in ignored_params, tg_model.parameters()) tg_params =[{'params': base_params, 'lr': 0, 'weight_decay': 0}, \ {'params': tg_model.fc.fc1.parameters(), 'lr': args.ft_base_lr, 'weight_decay': custom_weight_decay}, \ {'params': tg_model.fc.fc2.parameters(), 'lr': args.ft_base_lr, 'weight_decay': custom_weight_decay}] fix_bn_flag = True tg_model = tg_model.to(device) ref_model = ref_model.to(device) tg_ft_optimizer = optim.SGD(tg_params, lr=args.ft_base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay) tg_ft_lr_scheduler = lr_scheduler.MultiStepLR(tg_ft_optimizer, milestones=args.ft_lr_strat, gamma=lr_factor) tg_model = incremental_train_and_eval_LF(args.ft_epochs, tg_model, ref_model, tg_ft_optimizer, tg_ft_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, \ cur_lamda, \ fix_bn=fix_bn_flag) else: print("Unknown ft_flag") sys.exit() ############################### # tg_model = tg_model.to(device) # ref_model = ref_model.to(device) # tg_ft_optimizer = optim.SGD(tg_params, lr=args.ft_base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay) # tg_ft_lr_scheduler = lr_scheduler.MultiStepLR(tg_ft_optimizer, milestones=args.ft_lr_strat, gamma=lr_factor) # tg_model = incremental_train_and_eval_MR_LF(args.ft_epochs, tg_model, ref_model, tg_ft_optimizer, tg_ft_lr_scheduler, \ # trainloader, testloader, \ # iteration, start_iter, \ # cur_lamda, \ # args.dist, args.K, args.lw_mr, \ # fix_bn=fix_bn_flag) torch.save(tg_model, ckp_name.replace("/checkpoint/", "/checkpoint/AFTER_CBF_")) torch.save(class_means, class_means_name.replace("/checkpoint/", "/checkpoint/AFTER_CBF_")) ############################################################### ############################### print('Computing accuracy on the protoset...') current_eval_set = merge_images_labels(np.concatenate(X_protoset_cumuls), map_Y_protoset_cumuls) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1]) cbf_proto_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader) ############################### print('Computing accuracy on the old protoset...') indices = np.array([i in range(0, iteration*args.nb_cl) for i in map_Y_protoset_cumuls]) current_eval_set = merge_images_labels(np.concatenate(X_protoset_cumuls)[indices], map_Y_protoset_cumuls[indices]) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) print('Min and Max of eval labels: {}, {}'.format(min(map_Y_protoset_cumuls[indices]), max(map_Y_protoset_cumuls[indices]))) tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1]) cbf_proto_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader) ############################### print('Computing accuracy on the new protoset...') indices = np.array([i in range(iteration*args.nb_cl, (iteration+1)*args.nb_cl) for i in map_Y_protoset_cumuls]) current_eval_set = merge_images_labels(np.concatenate(X_protoset_cumuls)[indices], map_Y_protoset_cumuls[indices]) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) print('Min and Max of eval labels: {}, {}'.format(min(map_Y_protoset_cumuls[indices]), max(map_Y_protoset_cumuls[indices]))) tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1]) cbf_proto_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader) ############################### ############################################################## # Calculate validation error of model on the first nb_cl classes: map_Y_valid_ori = np.array([order_list.index(i) for i in Y_valid_ori]) print('Computing accuracy on the original batch of classes...') # evalset.test_data = X_valid_ori.astype('uint8') # evalset.test_labels = map_Y_valid_ori current_eval_set = merge_images_labels(X_valid_ori, map_Y_valid_ori) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) ori_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader) top1_acc_list_ori[iteration, :, iteration_total] = np.array(ori_acc).T ############################################################## # Calculate validation error of model on the cumul of classes: map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul]) print('Computing cumulative accuracy...') # evalset.test_data = X_valid_cumul.astype('uint8') # evalset.test_labels = map_Y_valid_cumul current_eval_set = merge_images_labels(X_valid_cumul, map_Y_valid_cumul) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) cumul_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader) top1_acc_list_cumul[iteration, :, iteration_total] = np.array(cumul_acc).T ############################################################## # Final save of the data torch.save(top1_acc_list_ori, \ './checkpoint/{}_run_{}_top1_acc_list_ori.pth'.format(args.ckp_prefix, iteration_total)) torch.save(top1_acc_list_cumul, \ './checkpoint/{}_run_{}_top1_acc_list_cumul.pth'.format(args.ckp_prefix, iteration_total))
47,336
60.08
136
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/imagenet-class-incremental/class_incremental_cosine_imagenet.py
#!/usr/bin/env python # coding=utf-8 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import datasets, models, transforms from torch.autograd import Variable import numpy as np import time import os import sys import copy import argparse from PIL import Image try: import cPickle as pickle except: import pickle import math import modified_resnet import modified_linear import utils_pytorch from utils_imagenet.utils_dataset import split_images_labels from utils_imagenet.utils_dataset import merge_images_labels from utils_incremental.compute_features import compute_features from utils_incremental.compute_accuracy import compute_accuracy from utils_incremental.compute_confusion_matrix import compute_confusion_matrix from utils_incremental.incremental_train_and_eval import incremental_train_and_eval from utils_incremental.incremental_train_and_eval_MS import incremental_train_and_eval_MS from utils_incremental.incremental_train_and_eval_LF import incremental_train_and_eval_LF from utils_incremental.incremental_train_and_eval_MR_LF import incremental_train_and_eval_MR_LF from utils_incremental.incremental_train_and_eval_AMR_LF import incremental_train_and_eval_AMR_LF ######### Modifiable Settings ########## parser = argparse.ArgumentParser() parser.add_argument('--dataset', default='seed_1993_subset_100_imagenet', type=str) parser.add_argument('--datadir', default='data/seed_1993_subset_100_imagenet/data', type=str) parser.add_argument('--num_classes', default=100, type=int) parser.add_argument('--num_workers', default=16, type=int, \ help='the number of workers for loading data') parser.add_argument('--nb_cl_fg', default=50, type=int, \ help='the number of classes in first group') parser.add_argument('--nb_cl', default=10, type=int, \ help='Classes per group') parser.add_argument('--nb_protos', default=20, type=int, \ help='Number of prototypes per class at the end') parser.add_argument('--nb_runs', default=1, type=int, \ help='Number of runs (random ordering of classes at each run)') parser.add_argument('--ckp_prefix', default=os.path.basename(sys.argv[0])[:-3], type=str, \ help='Checkpoint prefix') parser.add_argument('--epochs', default=90, type=int, \ help='Epochs') parser.add_argument('--T', default=2, type=float, \ help='Temporature for distialltion') parser.add_argument('--beta', default=0.25, type=float, \ help='Beta for distialltion') parser.add_argument('--resume', action='store_true', \ help='resume from checkpoint') parser.add_argument('--fix_budget', action='store_true', \ help='fix budget') ######################################## parser.add_argument('--mimic_score', action='store_true', \ help='To mimic scores for cosine embedding') parser.add_argument('--lw_ms', default=1, type=float, \ help='loss weight for mimicking score') ######################################## #improved class incremental learning parser.add_argument('--rs_ratio', default=0, type=float, \ help='The ratio for resample') parser.add_argument('--imprint_weights', action='store_true', \ help='Imprint the weights for novel classes') parser.add_argument('--less_forget', action='store_true', \ help='Less forgetful') parser.add_argument('--lamda', default=5, type=float, \ help='Lamda for LF') parser.add_argument('--adapt_lamda', action='store_true', \ help='Adaptively change lamda') parser.add_argument('--mr_loss', action='store_true', \ help='Margin ranking loss v1') parser.add_argument('--amr_loss', action='store_true', \ help='Margin ranking loss v2') parser.add_argument('--dist', default=0.5, type=float, \ help='Dist for MarginRankingLoss') parser.add_argument('--K', default=2, type=int, \ help='K for MarginRankingLoss') parser.add_argument('--lw_mr', default=1, type=float, \ help='loss weight for margin ranking loss') ######################################## parser.add_argument('--random_seed', default=1993, type=int, \ help='random seed') args = parser.parse_args() ######################################## assert(args.nb_cl_fg % args.nb_cl == 0) assert(args.nb_cl_fg >= args.nb_cl) train_batch_size = 128 # Batch size for train test_batch_size = 50 # Batch size for test eval_batch_size = 128 # Batch size for eval base_lr = 0.1 # Initial learning rate lr_strat = [30, 60] # Epochs where learning rate gets decreased lr_factor = 0.1 # Learning rate decrease factor custom_weight_decay = 1e-4 # Weight Decay custom_momentum = 0.9 # Momentum args.ckp_prefix = '{}_nb_cl_fg_{}_nb_cl_{}_nb_protos_{}'.format(args.ckp_prefix, args.nb_cl_fg, args.nb_cl, args.nb_protos) np.random.seed(args.random_seed) # Fix the random seed print(args) ######################################## device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") #transform_train = transforms.Compose([ # transforms.RandomCrop(32, padding=4), # transforms.RandomHorizontalFlip(), # transforms.ToTensor(), # transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)), #]) #transform_test = transforms.Compose([ # transforms.ToTensor(), # transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)), #]) #trainset = torchvision.datasets.CIFAR100(root='./data', train=True, # download=True, transform=transform_train) #testset = torchvision.datasets.CIFAR100(root='./data', train=False, # download=True, transform=transform_test) #evalset = torchvision.datasets.CIFAR100(root='./data', train=False, # download=False, transform=transform_test) # Data loading code traindir = os.path.join(args.datadir, 'train') valdir = os.path.join(args.datadir, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) trainset = datasets.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ])) testset = datasets.ImageFolder(valdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])) evalset = datasets.ImageFolder(valdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])) # Initialization dictionary_size = 1500 top1_acc_list_cumul = np.zeros((int(args.num_classes/args.nb_cl),3,args.nb_runs)) top1_acc_list_ori = np.zeros((int(args.num_classes/args.nb_cl),3,args.nb_runs)) #X_train_total = np.array(trainset.train_data) #Y_train_total = np.array(trainset.train_labels) #X_valid_total = np.array(testset.test_data) #Y_valid_total = np.array(testset.test_labels) X_train_total, Y_train_total = split_images_labels(trainset.imgs) X_valid_total, Y_valid_total = split_images_labels(testset.imgs) # Launch the different runs for iteration_total in range(args.nb_runs): # Select the order for the class learning order_name = "./checkpoint/seed_{}_{}_order_run_{}.pkl".format(args.random_seed, args.dataset, iteration_total) print("Order name:{}".format(order_name)) if os.path.exists(order_name): print("Loading orders") order = utils_pytorch.unpickle(order_name) else: print("Generating orders") order = np.arange(args.num_classes) np.random.shuffle(order) utils_pytorch.savepickle(order, order_name) order_list = list(order) print(order_list) # Initialization of the variables for this run X_valid_cumuls = [] X_protoset_cumuls = [] X_train_cumuls = [] Y_valid_cumuls = [] Y_protoset_cumuls = [] Y_train_cumuls = [] alpha_dr_herding = np.zeros((int(args.num_classes/args.nb_cl),dictionary_size,args.nb_cl),np.float32) # The following contains all the training samples of the different classes # because we want to compare our method with the theoretical case where all the training samples are stored # prototypes = np.zeros((args.num_classes,dictionary_size,X_train_total.shape[1],X_train_total.shape[2],X_train_total.shape[3])) prototypes = [[] for i in range(args.num_classes)] for orde in range(args.num_classes): prototypes[orde] = X_train_total[np.where(Y_train_total==order[orde])] prototypes = np.array(prototypes) start_iter = int(args.nb_cl_fg/args.nb_cl)-1 for iteration in range(start_iter, int(args.num_classes/args.nb_cl)): #init model if iteration == start_iter: ############################################################ last_iter = 0 ############################################################ tg_model = modified_resnet.resnet18(num_classes=args.nb_cl_fg) in_features = tg_model.fc.in_features out_features = tg_model.fc.out_features print("in_features:", in_features, "out_features:", out_features) ref_model = None elif iteration == start_iter+1: ############################################################ last_iter = iteration ############################################################ #increment classes ref_model = copy.deepcopy(tg_model) in_features = tg_model.fc.in_features out_features = tg_model.fc.out_features print("in_features:", in_features, "out_features:", out_features) new_fc = modified_linear.SplitCosineLinear(in_features, out_features, args.nb_cl) new_fc.fc1.weight.data = tg_model.fc.weight.data new_fc.sigma.data = tg_model.fc.sigma.data tg_model.fc = new_fc lamda_mult = out_features*1.0 / args.nb_cl else: ############################################################ last_iter = iteration ############################################################ ref_model = copy.deepcopy(tg_model) in_features = tg_model.fc.in_features out_features1 = tg_model.fc.fc1.out_features out_features2 = tg_model.fc.fc2.out_features print("in_features:", in_features, "out_features1:", \ out_features1, "out_features2:", out_features2) new_fc = modified_linear.SplitCosineLinear(in_features, out_features1+out_features2, args.nb_cl) new_fc.fc1.weight.data[:out_features1] = tg_model.fc.fc1.weight.data new_fc.fc1.weight.data[out_features1:] = tg_model.fc.fc2.weight.data new_fc.sigma.data = tg_model.fc.sigma.data tg_model.fc = new_fc lamda_mult = (out_features1+out_features2)*1.0 / (args.nb_cl) if iteration > start_iter and args.less_forget and args.adapt_lamda: #cur_lamda = lamda_base * sqrt(num_old_classes/num_new_classes) cur_lamda = args.lamda * math.sqrt(lamda_mult) else: cur_lamda = args.lamda if iteration > start_iter and args.less_forget: print("###############################") print("Lamda for less forget is set to ", cur_lamda) print("###############################") # Prepare the training data for the current batch of classes actual_cl = order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] indices_train_10 = np.array([i in order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] for i in Y_train_total]) indices_test_10 = np.array([i in order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] for i in Y_valid_total]) X_train = X_train_total[indices_train_10] X_valid = X_valid_total[indices_test_10] X_valid_cumuls.append(X_valid) X_train_cumuls.append(X_train) X_valid_cumul = np.concatenate(X_valid_cumuls) X_train_cumul = np.concatenate(X_train_cumuls) Y_train = Y_train_total[indices_train_10] Y_valid = Y_valid_total[indices_test_10] Y_valid_cumuls.append(Y_valid) Y_train_cumuls.append(Y_train) Y_valid_cumul = np.concatenate(Y_valid_cumuls) Y_train_cumul = np.concatenate(Y_train_cumuls) # Add the stored exemplars to the training data if iteration == start_iter: X_valid_ori = X_valid Y_valid_ori = Y_valid else: X_protoset = np.concatenate(X_protoset_cumuls) Y_protoset = np.concatenate(Y_protoset_cumuls) if args.rs_ratio > 0: #1/rs_ratio = (len(X_train)+len(X_protoset)*scale_factor)/(len(X_protoset)*scale_factor) scale_factor = (len(X_train) * args.rs_ratio) / (len(X_protoset) * (1 - args.rs_ratio)) rs_sample_weights = np.concatenate((np.ones(len(X_train)), np.ones(len(X_protoset))*scale_factor)) #number of samples per epoch #rs_num_samples = len(X_train) + len(X_protoset) rs_num_samples = int(len(X_train) / (1 - args.rs_ratio)) print("X_train:{}, X_protoset:{}, rs_num_samples:{}".format(len(X_train), len(X_protoset), rs_num_samples)) X_train = np.concatenate((X_train,X_protoset),axis=0) Y_train = np.concatenate((Y_train,Y_protoset)) # Launch the training loop print('Batch of classes number {0} arrives ...'.format(iteration+1)) map_Y_train = np.array([order_list.index(i) for i in Y_train]) map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul]) #imprint weights if iteration > start_iter and args.imprint_weights: #input: tg_model, X_train, map_Y_train #class_start = iteration*nb_cl class_end = (iteration+1)*nb_cl print("Imprint weights") ######################################### #compute the average norm of old embdding old_embedding_norm = tg_model.fc.fc1.weight.data.norm(dim=1, keepdim=True) average_old_embedding_norm = torch.mean(old_embedding_norm, dim=0).to('cpu').type(torch.DoubleTensor) ######################################### tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1]) num_features = tg_model.fc.in_features novel_embedding = torch.zeros((args.nb_cl, num_features)) for cls_idx in range(iteration*args.nb_cl, (iteration+1)*args.nb_cl): cls_indices = np.array([i == cls_idx for i in map_Y_train]) assert(len(np.where(cls_indices==1)[0])<=dictionary_size) #evalset.test_data = X_train[cls_indices].astype('uint8') #evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels current_eval_set = merge_images_labels(X_train[cls_indices], np.zeros(len(X_train[cls_indices]))) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=2) num_samples = len(X_train[cls_indices]) cls_features = compute_features(tg_feature_model, evalloader, num_samples, num_features) #cls_features = cls_features.T #cls_features = cls_features / np.linalg.norm(cls_features,axis=0) #cls_embedding = np.mean(cls_features, axis=1) norm_features = F.normalize(torch.from_numpy(cls_features), p=2, dim=1) cls_embedding = torch.mean(norm_features, dim=0) #novel_embedding[cls_idx-iteration*args.nb_cl] = cls_embedding novel_embedding[cls_idx-iteration*args.nb_cl] = F.normalize(cls_embedding, p=2, dim=0) * average_old_embedding_norm tg_model.to(device) #torch.save(tg_model, "tg_model_before_imprint_weights.pth") tg_model.fc.fc2.weight.data = novel_embedding.to(device) #torch.save(tg_model, "tg_model_after_imprint_weights.pth") ############################################################ #trainset.train_data = X_train.astype('uint8') #trainset.train_labels = map_Y_train current_train_imgs = merge_images_labels(X_train, map_Y_train) trainset.imgs = trainset.samples = current_train_imgs if iteration > start_iter and args.rs_ratio > 0 and scale_factor > 1: print("Weights from sampling:", rs_sample_weights) index1 = np.where(rs_sample_weights>1)[0] index2 = np.where(map_Y_train<iteration*args.nb_cl)[0] assert((index1==index2).all()) train_sampler = torch.utils.data.sampler.WeightedRandomSampler(rs_sample_weights, rs_num_samples) #trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, \ # shuffle=False, sampler=train_sampler, num_workers=2) trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, \ shuffle=False, sampler=train_sampler, num_workers=args.num_workers, pin_memory=True) else: #trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, # shuffle=True, num_workers=2) trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True) #testset.test_data = X_valid_cumul.astype('uint8') #testset.test_labels = map_Y_valid_cumul current_test_imgs = merge_images_labels(X_valid_cumul, map_Y_valid_cumul) testset.imgs = testset.samples = current_test_imgs testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size, shuffle=False, num_workers=2) print('Max and Min of train labels: {}, {}'.format(min(map_Y_train), max(map_Y_train))) print('Max and Min of valid labels: {}, {}'.format(min(map_Y_valid_cumul), max(map_Y_valid_cumul))) ############################################################## ckp_name = './checkpoint/{}_run_{}_iteration_{}_model.pth'.format(args.ckp_prefix, iteration_total, iteration) print('ckp_name', ckp_name) if args.resume and os.path.exists(ckp_name): print("###############################") print("Loading models from checkpoint") tg_model = torch.load(ckp_name) print("###############################") else: ############################### if iteration > start_iter and args.less_forget: #fix the embedding of old classes ignored_params = list(map(id, tg_model.fc.fc1.parameters())) base_params = filter(lambda p: id(p) not in ignored_params, \ tg_model.parameters()) tg_params =[{'params': base_params, 'lr': base_lr, 'weight_decay': custom_weight_decay}, \ {'params': tg_model.fc.fc1.parameters(), 'lr': 0, 'weight_decay': 0}] else: tg_params = tg_model.parameters() ############################### tg_model = tg_model.to(device) if iteration > start_iter: ref_model = ref_model.to(device) tg_optimizer = optim.SGD(tg_params, lr=base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay) tg_lr_scheduler = lr_scheduler.MultiStepLR(tg_optimizer, milestones=lr_strat, gamma=lr_factor) ############################### if args.less_forget and args.mr_loss: print("incremental_train_and_eval_MR_LF") tg_model = incremental_train_and_eval_MR_LF(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, \ cur_lamda, \ args.dist, args.K, args.lw_mr) elif args.less_forget and args.amr_loss: print("incremental_train_and_eval_AMR_LF") tg_model = incremental_train_and_eval_AMR_LF(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, \ cur_lamda, \ args.dist, args.K, args.lw_mr) else: if args.less_forget: print("incremental_train_and_eval_LF") tg_model = incremental_train_and_eval_LF(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, \ cur_lamda) else: if args.mimic_score: print("incremental_train_and_eval_MS") tg_model = incremental_train_and_eval_MS(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, args.lw_ms) else: print("incremental_train_and_eval") tg_model = incremental_train_and_eval(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, args.T, args.beta) if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') torch.save(tg_model, ckp_name) ### Exemplars if args.fix_budget: nb_protos_cl = int(np.ceil(args.nb_protos*args.num_classes*1.0/args.nb_cl/(iteration+1))) else: nb_protos_cl = args.nb_protos tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1]) num_features = tg_model.fc.in_features # Herding print('Updating exemplar set...') for iter_dico in range(last_iter*args.nb_cl, (iteration+1)*args.nb_cl): # Possible exemplars in the feature space and projected on the L2 sphere # evalset.test_data = prototypes[iter_dico].astype('uint8') # evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels current_eval_set = merge_images_labels(prototypes[iter_dico], np.zeros(len(prototypes[iter_dico]))) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) num_samples = len(prototypes[iter_dico]) mapped_prototypes = compute_features(tg_feature_model, evalloader, num_samples, num_features) D = mapped_prototypes.T D = D/np.linalg.norm(D,axis=0) # Herding procedure : ranking of the potential exemplars mu = np.mean(D,axis=1) index1 = int(iter_dico/args.nb_cl) index2 = iter_dico % args.nb_cl alpha_dr_herding[index1,:,index2] = alpha_dr_herding[index1,:,index2]*0 w_t = mu iter_herding = 0 iter_herding_eff = 0 while not(np.sum(alpha_dr_herding[index1,:,index2]!=0)==min(nb_protos_cl,500)) and iter_herding_eff<1000: tmp_t = np.dot(w_t,D) ind_max = np.argmax(tmp_t) iter_herding_eff += 1 if alpha_dr_herding[index1,ind_max,index2] == 0: alpha_dr_herding[index1,ind_max,index2] = 1+iter_herding iter_herding += 1 w_t = w_t+mu-D[:,ind_max] # Prepare the protoset X_protoset_cumuls = [] Y_protoset_cumuls = [] # Class means for iCaRL and NCM + Storing the selected exemplars in the protoset print('Computing mean-of_exemplars and theoretical mean...') # class_means = np.zeros((64,100,2)) class_means = np.zeros((num_features, args.num_classes, 2)) for iteration2 in range(iteration+1): for iter_dico in range(args.nb_cl): current_cl = order[range(iteration2*args.nb_cl,(iteration2+1)*args.nb_cl)] # Collect data in the feature space for each class # evalset.test_data = prototypes[iteration2*args.nb_cl+iter_dico].astype('uint8') # evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels current_eval_set = merge_images_labels(prototypes[iteration2*args.nb_cl+iter_dico], \ np.zeros(len(prototypes[iteration2*args.nb_cl+iter_dico]))) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) num_samples = len(prototypes[iteration2*args.nb_cl+iter_dico]) mapped_prototypes = compute_features(tg_feature_model, evalloader, num_samples, num_features) D = mapped_prototypes.T D = D/np.linalg.norm(D,axis=0) # Flipped version also # evalset.test_data = prototypes[iteration2*args.nb_cl+iter_dico][:,:,:,::-1].astype('uint8') # evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, # shuffle=False, num_workers=2) # mapped_prototypes2 = compute_features(tg_feature_model, evalloader, num_samples, num_features) # D2 = mapped_prototypes2.T # D2 = D2/np.linalg.norm(D2,axis=0) D2 = D # iCaRL alph = alpha_dr_herding[iteration2,:,iter_dico] assert((alph[num_samples:]==0).all()) alph = alph[:num_samples] alph = (alph>0)*(alph<nb_protos_cl+1)*1. # X_protoset_cumuls.append(prototypes[iteration2*args.nb_cl+iter_dico,np.where(alph==1)[0]]) X_protoset_cumuls.append(prototypes[iteration2*args.nb_cl+iter_dico][np.where(alph==1)[0]]) Y_protoset_cumuls.append(order[iteration2*args.nb_cl+iter_dico]*np.ones(len(np.where(alph==1)[0]))) alph = alph/np.sum(alph) class_means[:,current_cl[iter_dico],0] = (np.dot(D,alph)+np.dot(D2,alph))/2 class_means[:,current_cl[iter_dico],0] /= np.linalg.norm(class_means[:,current_cl[iter_dico],0]) # Normal NCM # alph = np.ones(dictionary_size)/dictionary_size alph = np.ones(num_samples)/num_samples class_means[:,current_cl[iter_dico],1] = (np.dot(D,alph)+np.dot(D2,alph))/2 class_means[:,current_cl[iter_dico],1] /= np.linalg.norm(class_means[:,current_cl[iter_dico],1]) torch.save(class_means, \ './checkpoint/{}_run_{}_iteration_{}_class_means.pth'.format(args.ckp_prefix,iteration_total, iteration)) current_means = class_means[:, order[range(0,(iteration+1)*args.nb_cl)]] ############################################################## # Calculate validation error of model on the first nb_cl classes: map_Y_valid_ori = np.array([order_list.index(i) for i in Y_valid_ori]) print('Computing accuracy on the original batch of classes...') # evalset.test_data = X_valid_ori.astype('uint8') # evalset.test_labels = map_Y_valid_ori current_eval_set = merge_images_labels(X_valid_ori, map_Y_valid_ori) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) ori_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader) top1_acc_list_ori[iteration, :, iteration_total] = np.array(ori_acc).T ############################################################## # Calculate validation error of model on the cumul of classes: map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul]) print('Computing cumulative accuracy...') # evalset.test_data = X_valid_cumul.astype('uint8') # evalset.test_labels = map_Y_valid_cumul current_eval_set = merge_images_labels(X_valid_cumul, map_Y_valid_cumul) evalset.imgs = evalset.samples = current_eval_set evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True) cumul_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader) top1_acc_list_cumul[iteration, :, iteration_total] = np.array(cumul_acc).T ############################################################## # Calculate confusion matrix # print('Computing confusion matrix...') # cm = compute_confusion_matrix(tg_model, tg_feature_model, current_means, evalloader) # cm_name = './checkpoint/{}_run_{}_iteration_{}_confusion_matrix.pth'.format(args.ckp_prefix,iteration_total, iteration) # with open(cm_name, 'wb') as f: # pickle.dump(cm, f, 2) #for reading with Python 2 ############################################################## # Final save of the data torch.save(top1_acc_list_ori, \ './checkpoint/{}_run_{}_top1_acc_list_ori.pth'.format(args.ckp_prefix, iteration_total)) torch.save(top1_acc_list_cumul, \ './checkpoint/{}_run_{}_top1_acc_list_cumul.pth'.format(args.ckp_prefix, iteration_total))
30,418
53.809009
132
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/imagenet-class-incremental/gen_resized_imagenet.py
#!/usr/bin/env python # coding=utf-8 import argparse import os import random import shutil import time import warnings import numpy as np import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim import torch.utils.data import torch.utils.data.distributed import torchvision.transforms as transforms import torchvision.datasets as datasets import torchvision.models as models from PIL import Image src_root_dir = 'data/imagenet/data/' des_root_dir = 'data/imagenet_resized_256/data/' if not os.path.exists(des_root_dir): os.makedirs(des_root_dir) phase_list = ['train', 'val'] for phase in phase_list: if not os.path.exists(os.path.join(des_root_dir, phase)): os.mkdir(os.path.join(des_root_dir, phase)) data_dir = os.path.join(src_root_dir, phase) tg_dataset = datasets.ImageFolder(data_dir) for cls_name in tg_dataset.classes: if not os.path.exists(os.path.join(des_root_dir, phase, cls_name)): os.mkdir(os.path.join(des_root_dir, phase, cls_name)) cnt = 0 for item in tg_dataset.imgs: img_path = item[0] img = Image.open(img_path) img = img.convert('RGB') save_path = img_path.replace('imagenet', 'imagenet_resized_256') resized_img = img.resize((256,256), Image.BILINEAR) resized_img.save(save_path) cnt = cnt+1 if cnt % 1000 == 0: print(cnt, save_path) print("Hello World")
1,507
28
75
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/imagenet-class-incremental/modified_linear.py
import math import torch from torch.nn.parameter import Parameter from torch.nn import functional as F from torch.nn import Module class CosineLinear(Module): def __init__(self, in_features, out_features, sigma=True): super(CosineLinear, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.Tensor(out_features, in_features)) if sigma: self.sigma = Parameter(torch.Tensor(1)) else: self.register_parameter('sigma', None) self.reset_parameters() def reset_parameters(self): stdv = 1. / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.sigma is not None: self.sigma.data.fill_(1) #for initializaiton of sigma def forward(self, input): #w_norm = self.weight.data.norm(dim=1, keepdim=True) #w_norm = w_norm.expand_as(self.weight).add_(self.epsilon) #x_norm = input.data.norm(dim=1, keepdim=True) #x_norm = x_norm.expand_as(input).add_(self.epsilon) #w = self.weight.div(w_norm) #x = input.div(x_norm) out = F.linear(F.normalize(input, p=2,dim=1), \ F.normalize(self.weight, p=2, dim=1)) if self.sigma is not None: out = self.sigma * out return out class SplitCosineLinear(Module): #consists of two fc layers and concatenate their outputs def __init__(self, in_features, out_features1, out_features2, sigma=True): super(SplitCosineLinear, self).__init__() self.in_features = in_features self.out_features = out_features1 + out_features2 self.fc1 = CosineLinear(in_features, out_features1, False) self.fc2 = CosineLinear(in_features, out_features2, False) if sigma: self.sigma = Parameter(torch.Tensor(1)) self.sigma.data.fill_(1) else: self.register_parameter('sigma', None) def forward(self, x): out1 = self.fc1(x) out2 = self.fc2(x) out = torch.cat((out1, out2), dim=1) #concatenate along the channel if self.sigma is not None: out = self.sigma * out return out
2,235
36.898305
78
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/imagenet-class-incremental/utils_imagenet/train_and_eval.py
import argparse import os import shutil import time import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim import torch.utils.data import torch.utils.data.distributed import torchvision.transforms as transforms import torchvision.datasets as datasets import torchvision.models as models from .utils_train import * def train_and_eval(epochs, start_epoch, model, optimizer, lr_scheduler, \ train_loader, val_loader, gpu=None): for epoch in range(start_epoch, epochs): #adjust_learning_rate(optimizer, epoch) lr_scheduler.step() print('\nEpoch: %d, LR: ' % epoch, end='') print(lr_scheduler.get_lr()) # train for one epoch train(train_loader, model, optimizer, epoch, gpu) # evaluate on validation set validate(val_loader, model, gpu) return model def train(train_loader, model, optimizer, epoch, gpu=None): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to train mode model.train() criterion = nn.CrossEntropyLoss().cuda(gpu) end = time.time() for i, (input, target) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) if gpu is not None: input = input.cuda(gpu, non_blocking=True) target = target.cuda(gpu, non_blocking=True) # compute output output = model(input) loss = criterion(output, target) # measure accuracy and record loss prec1, prec5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), input.size(0)) top1.update(prec1[0], input.size(0)) top5.update(prec5[0], input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % 10 == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1, top5=top5))
2,629
30.309524
75
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/imagenet-class-incremental/utils_imagenet/__init__.py
#!/usr/bin/env python # coding=utf-8 # for incremental-class train and eval
76
18.25
38
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/imagenet-class-incremental/utils_imagenet/utils_dataset.py
import argparse import os import shutil import time import numpy as np #split trainset.imgs def split_images_labels(imgs): images = [] labels = [] for item in imgs: images.append(item[0]) labels.append(item[1]) return np.array(images), np.array(labels) #merge into trainset.imgs def merge_images_labels(images, labels): images = list(images) labels = list(labels) assert(len(images)==len(labels)) imgs = [] for i in range(len(images)): item = (images[i], labels[i]) imgs.append(item) return imgs
576
19.607143
45
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/imagenet-class-incremental/utils_imagenet/utils_train.py
import argparse import os import shutil import time import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim import torch.utils.data import torch.utils.data.distributed import torchvision.transforms as transforms import torchvision.datasets as datasets import torchvision.models as models def validate(val_loader, model, gpu=None): batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to evaluate mode model.eval() criterion = nn.CrossEntropyLoss().cuda(gpu) with torch.no_grad(): end = time.time() for i, (input, target) in enumerate(val_loader): if gpu is not None: input = input.cuda(gpu, non_blocking=True) target = target.cuda(gpu, non_blocking=True) # compute output output = model(input) loss = criterion(output, target) # measure accuracy and record loss prec1, prec5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), input.size(0)) top1.update(prec1[0], input.size(0)) top5.update(prec5[0], input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % 10 == 0: print('Test: [{0}/{1}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( i, len(val_loader), batch_time=batch_time, loss=losses, top1=top1, top5=top5)) print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}' .format(top1=top1, top5=top5)) return top1.avg class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def accuracy(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res
2,897
29.505263
78
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/cifar100-class-incremental/utils_pytorch.py
#!/usr/bin/env python # coding=utf-8 from __future__ import print_function, division import torch import torch.nn as nn import torch.nn.init as init from collections import OrderedDict import numpy as np import os import os.path as osp import sys import time import math import subprocess try: import cPickle as pickle except: import pickle def savepickle(data, file_path): mkdir_p(osp.dirname(file_path), delete=False) print('pickle into', file_path) with open(file_path, 'wb') as f: pickle.dump(data, f, pickle.HIGHEST_PROTOCOL) def unpickle(file_path): with open(file_path, 'rb') as f: data = pickle.load(f) return data def mkdir_p(path, delete=False, print_info=True): if path == '': return if delete: subprocess.call(('rm -r ' + path).split()) if not osp.exists(path): if print_info: print('mkdir -p ' + path) subprocess.call(('mkdir -p ' + path).split()) def get_mean_and_std(dataset): '''Compute the mean and std value of dataset.''' dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2) mean = torch.zeros(3) std = torch.zeros(3) print('==> Computing mean and std..') for inputs, targets in dataloader: for i in range(3): mean[i] += inputs[:,i,:,:].mean() std[i] += inputs[:,i,:,:].std() mean.div_(len(dataset)) std.div_(len(dataset)) return mean, std def init_params(net): '''Init layer parameters.''' for m in net.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=1e-3) if m.bias is not None: init.constant_(m.bias, 0) _, term_width = os.popen('stty size', 'r').read().split() term_width = int(term_width) TOTAL_BAR_LENGTH = 65. last_time = time.time() begin_time = last_time def progress_bar(current, total, msg=None): global last_time, begin_time if current == 0: begin_time = time.time() # Reset for new bar. cur_len = int(TOTAL_BAR_LENGTH*current/total) rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1 sys.stdout.write(' [') for i in range(cur_len): sys.stdout.write('=') sys.stdout.write('>') for i in range(rest_len): sys.stdout.write('.') sys.stdout.write(']') cur_time = time.time() step_time = cur_time - last_time last_time = cur_time tot_time = cur_time - begin_time L = [] L.append(' Step: %s' % format_time(step_time)) L.append(' | Tot: %s' % format_time(tot_time)) if msg: L.append(' | ' + msg) msg = ''.join(L) sys.stdout.write(msg) for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3): sys.stdout.write(' ') # Go back to the center of the bar. for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2): sys.stdout.write('\b') sys.stdout.write(' %d/%d ' % (current+1, total)) if current < total-1: sys.stdout.write('\r') else: sys.stdout.write('\n') sys.stdout.flush() def format_time(seconds): days = int(seconds / 3600/24) seconds = seconds - days*3600*24 hours = int(seconds / 3600) seconds = seconds - hours*3600 minutes = int(seconds / 60) seconds = seconds - minutes*60 secondsf = int(seconds) seconds = seconds - secondsf millis = int(seconds*1000) f = '' i = 1 if days > 0: f += str(days) + 'D' i += 1 if hours > 0 and i <= 2: f += str(hours) + 'h' i += 1 if minutes > 0 and i <= 2: f += str(minutes) + 'm' i += 1 if secondsf > 0 and i <= 2: f += str(secondsf) + 's' i += 1 if millis > 0 and i <= 2: f += str(millis) + 'ms' i += 1 if f == '': f = '0ms' return f
4,102
26.172185
96
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/cifar100-class-incremental/eval_cumul_acc.py
#!/usr/bin/env python # coding=utf-8 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import datasets, models, transforms from torch.autograd import Variable import numpy as np import time import os import sys import copy import argparse from PIL import Image try: import cPickle as pickle except: import pickle from scipy.spatial.distance import cdist import modified_resnet_cifar import modified_linear import utils_pytorch from utils_incremental.compute_features import compute_features from utils_incremental.compute_accuracy import compute_accuracy from utils_incremental.compute_confusion_matrix import compute_confusion_matrix device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") ######### Modifiable Settings ########## parser = argparse.ArgumentParser() parser.add_argument('--nb_cl', default=10, type=int, \ help='Classes per group') parser.add_argument('--ckp_prefix', \ default='checkpoint/class_incremental_cifar100_nb_cl_10_nb_protos_20_run_0_', \ type=str) parser.add_argument('--order', \ default='./checkpoint/cifar100_order_run_0.pkl', \ type=str) parser.add_argument('--nb_cl_fg', default=50, type=int, \ help='the number of classes in first group') args = parser.parse_args() print(args) order = utils_pytorch.unpickle(args.order) order_list = list(order) transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)), ]) evalset = torchvision.datasets.CIFAR100(root='./data', train=False, download=False, transform=transform_test) input_data = evalset.test_data input_labels = evalset.test_labels map_input_labels = np.array([order_list.index(i) for i in input_labels]) #evalset.test_labels = map_input_labels #evalloader = torch.utils.data.DataLoader(evalset, batch_size=128, # shuffle=False, num_workers=2) cnn_cumul_acc = [] icarl_cumul_acc = [] ncm_cumul_acc = [] num_classes = [] nb_cl = args.nb_cl start_iter = int(args.nb_cl_fg/nb_cl)-1 for iteration in range(start_iter, int(100/nb_cl)): #print("###########################################################") #print("For iteration {}".format(iteration)) #print("###########################################################") ckp_name = '{}iteration_{}_model.pth'.format(args.ckp_prefix, iteration) class_means_name = '{}iteration_{}_class_means.pth'.format(args.ckp_prefix, iteration) tg_model = torch.load(ckp_name) tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1]) class_means = torch.load(class_means_name) current_means = class_means[:, order[:(iteration+1)*nb_cl]] indices = np.array([i in range(0, (iteration+1)*nb_cl) for i in map_input_labels]) evalset.test_data = input_data[indices] evalset.test_labels = map_input_labels[indices] #print('Max and Min of valid labels: {}, {}'.format(min(evalset.test_labels), max(evalset.test_labels))) evalloader = torch.utils.data.DataLoader(evalset, batch_size=128, shuffle=False, num_workers=2) acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader, print_info=False) cnn_cumul_acc.append(acc[0]) icarl_cumul_acc.append(acc[1]) ncm_cumul_acc.append(acc[2]) num_classes.append((iteration+1)*nb_cl) print("###########################################################") print(' CNN acc: \t iCaRL acc \t NCM acc') print("###########################################################") for i in range(len(cnn_cumul_acc)): print("{:.2f} ".format(cnn_cumul_acc[i]), end='') print("[{:.2f}] ".format(np.mean(cnn_cumul_acc[-1])), end='') print("[{:.2f}] ".format(np.mean(cnn_cumul_acc)), end='') print("[{:.2f}] ".format(np.sum(np.array(cnn_cumul_acc)*np.array(num_classes)) / np.sum(num_classes)), end='') print("") for i in range(len(icarl_cumul_acc)): print("{:.2f} ".format(icarl_cumul_acc[i]), end='') print("[{:.2f}] ".format(np.mean(icarl_cumul_acc[-1])), end='') print("[{:.2f}] ".format(np.mean(icarl_cumul_acc)), end='') print("[{:.2f}] ".format(np.sum(np.array(icarl_cumul_acc)*np.array(num_classes)) / np.sum(num_classes)), end='') print("") for i in range(len(cnn_cumul_acc)): print("{:.2f} ".format(ncm_cumul_acc[i]), end='') print("[{:.2f}] ".format(np.mean(ncm_cumul_acc[-1])), end='') print("[{:.2f}] ".format(np.mean(ncm_cumul_acc)), end='') print("[{:.2f}] ".format(np.sum(np.array(ncm_cumul_acc)*np.array(num_classes)) / np.sum(num_classes)), end='') print("") print("###########################################################") print("") print('Number of classes', num_classes) print("###########################################################") print("Final acc on all classes") print("CNN:{:.2f}\t iCaRL:{:.2f}\t NCM:{:.2f}".format(cnn_cumul_acc[-1], icarl_cumul_acc[-1], ncm_cumul_acc[-1])) print("###########################################################") print("Average acc in each phase") print("CNN:{:.2f}\t iCaRL:{:.2f}\t NCM:{:.2f}".format(np.mean(cnn_cumul_acc), np.mean(icarl_cumul_acc), np.mean(ncm_cumul_acc))) print("###########################################################") print("Weighted average acc in each phase") print("CNN:{:.2f}\t iCaRL:{:.2f}\t NCM:{:.2f}".format( np.sum(np.array(cnn_cumul_acc)*np.array(num_classes)) / np.sum(num_classes), np.sum(np.array(icarl_cumul_acc)*np.array(num_classes)) / np.sum(num_classes), np.sum(np.array(ncm_cumul_acc)*np.array(num_classes)) / np.sum(num_classes) )) print("###########################################################")
5,687
43.4375
128
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/cifar100-class-incremental/class_incremental_cifar100.py
#!/usr/bin/env python # coding=utf-8 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import datasets, models, transforms from torch.autograd import Variable import numpy as np import time import os import sys import copy import argparse from PIL import Image try: import cPickle as pickle except: import pickle import resnet_cifar import utils_pytorch from utils_incremental.compute_features import compute_features from utils_incremental.compute_accuracy import compute_accuracy from utils_incremental.compute_confusion_matrix import compute_confusion_matrix from utils_incremental.incremental_train_and_eval import incremental_train_and_eval ######### Modifiable Settings ########## parser = argparse.ArgumentParser() parser.add_argument('--dataset', default='cifar100', type=str) parser.add_argument('--num_classes', default=100, type=int) parser.add_argument('--nb_cl_fg', default=50, type=int, \ help='the number of classes in first group') parser.add_argument('--nb_cl', default=10, type=int, \ help='Classes per group') parser.add_argument('--nb_protos', default=20, type=int, \ help='Number of prototypes per class at the end') parser.add_argument('--nb_runs', default=1, type=int, \ help='Number of runs (random ordering of classes at each run)') parser.add_argument('--ckp_prefix', default=os.path.basename(sys.argv[0])[:-3], type=str, \ help='Checkpoint prefix') parser.add_argument('--epochs', default=160, type=int, \ help='Epochs') parser.add_argument('--T', default=2, type=float, \ help='Temperature for distialltion') parser.add_argument('--beta', default=0.25, type=float, \ help='Beta for distialltion') parser.add_argument('--resume', action='store_true', \ help='resume from checkpoint') parser.add_argument('--fix_budget', action='store_true', \ help='fix budget') parser.add_argument('--rs_ratio', default=0, type=float, \ help='The ratio for resample') parser.add_argument('--random_seed', default=1993, type=int, \ help='random seed') args = parser.parse_args() ######################################## assert(args.nb_cl_fg % args.nb_cl == 0) assert(args.nb_cl_fg >= args.nb_cl) train_batch_size = 128 # Batch size for train test_batch_size = 100 # Batch size for test eval_batch_size = 128 # Batch size for eval base_lr = 0.1 # Initial learning rate lr_strat = [80, 120] # Epochs where learning rate gets decreased lr_factor = 0.1 # Learning rate decrease factor custom_weight_decay = 5e-4 # Weight Decay custom_momentum = 0.9 # Momentum args.ckp_prefix = '{}_nb_cl_fg_{}_nb_cl_{}_nb_protos_{}'.format(args.ckp_prefix, args.nb_cl_fg, args.nb_cl, args.nb_protos) np.random.seed(args.random_seed) # Fix the random seed print(args) ######################################## device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)), ]) transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)), ]) trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train) testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test) evalset = torchvision.datasets.CIFAR100(root='./data', train=False, download=False, transform=transform_test) # Initialization dictionary_size = 500 top1_acc_list_cumul = np.zeros((int(args.num_classes/args.nb_cl),3,args.nb_runs)) top1_acc_list_ori = np.zeros((int(args.num_classes/args.nb_cl),3,args.nb_runs)) X_train_total = np.array(trainset.train_data) Y_train_total = np.array(trainset.train_labels) X_valid_total = np.array(testset.test_data) Y_valid_total = np.array(testset.test_labels) # Launch the different runs for iteration_total in range(args.nb_runs): # Select the order for the class learning order_name = "./checkpoint/seed_{}_{}_order_run_{}.pkl".format(args.random_seed, args.dataset, iteration_total) print("Order name:{}".format(order_name)) if os.path.exists(order_name): print("Loading orders") order = utils_pytorch.unpickle(order_name) else: print("Generating orders") order = np.arange(args.num_classes) np.random.shuffle(order) utils_pytorch.savepickle(order, order_name) order_list = list(order) print(order_list) # Initialization of the variables for this run X_valid_cumuls = [] X_protoset_cumuls = [] X_train_cumuls = [] Y_valid_cumuls = [] Y_protoset_cumuls = [] Y_train_cumuls = [] alpha_dr_herding = np.zeros((int(args.num_classes/args.nb_cl),dictionary_size,args.nb_cl),np.float32) # The following contains all the training samples of the different classes # because we want to compare our method with the theoretical case where all the training samples are stored prototypes = np.zeros((args.num_classes,dictionary_size,X_train_total.shape[1],X_train_total.shape[2],X_train_total.shape[3])) for orde in range(args.num_classes): prototypes[orde,:,:,:,:] = X_train_total[np.where(Y_train_total==order[orde])] start_iter = int(args.nb_cl_fg/args.nb_cl)-1 for iteration in range(start_iter, int(args.num_classes/args.nb_cl)): #init model if iteration == start_iter: ############################################################ last_iter = 0 ############################################################ tg_model = resnet_cifar.resnet32(num_classes=args.nb_cl_fg) ref_model = None else: ############################################################ last_iter = iteration ############################################################ #increment classes ref_model = copy.deepcopy(tg_model) in_features = tg_model.fc.in_features out_features = tg_model.fc.out_features new_fc = nn.Linear(in_features, out_features+args.nb_cl) new_fc.weight.data[:out_features] = tg_model.fc.weight.data new_fc.bias.data[:out_features] = tg_model.fc.bias.data tg_model.fc = new_fc # Prepare the training data for the current batch of classes actual_cl = order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] indices_train_10 = np.array([i in order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] for i in Y_train_total]) indices_test_10 = np.array([i in order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] for i in Y_valid_total]) X_train = X_train_total[indices_train_10] X_valid = X_valid_total[indices_test_10] X_valid_cumuls.append(X_valid) X_train_cumuls.append(X_train) X_valid_cumul = np.concatenate(X_valid_cumuls) X_train_cumul = np.concatenate(X_train_cumuls) Y_train = Y_train_total[indices_train_10] Y_valid = Y_valid_total[indices_test_10] Y_valid_cumuls.append(Y_valid) Y_train_cumuls.append(Y_train) Y_valid_cumul = np.concatenate(Y_valid_cumuls) Y_train_cumul = np.concatenate(Y_train_cumuls) # Add the stored exemplars to the training data if iteration == start_iter: X_valid_ori = X_valid Y_valid_ori = Y_valid else: X_protoset = np.concatenate(X_protoset_cumuls) Y_protoset = np.concatenate(Y_protoset_cumuls) if args.rs_ratio > 0: #1/rs_ratio = (len(X_train)+len(X_protoset)*scale_factor)/(len(X_protoset)*scale_factor) scale_factor = (len(X_train) * args.rs_ratio) / (len(X_protoset) * (1 - args.rs_ratio)) rs_sample_weights = np.concatenate((np.ones(len(X_train)), np.ones(len(X_protoset))*scale_factor)) #number of samples per epoch, undersample on the new classes #rs_num_samples = len(X_train) + len(X_protoset) rs_num_samples = int(len(X_train) / (1 - args.rs_ratio)) print("X_train:{}, X_protoset:{}, rs_num_samples:{}".format(len(X_train), len(X_protoset), rs_num_samples)) X_train = np.concatenate((X_train,X_protoset),axis=0) Y_train = np.concatenate((Y_train,Y_protoset)) # Launch the training loop print('Batch of classes number {0} arrives ...'.format(iteration+1)) map_Y_train = np.array([order_list.index(i) for i in Y_train]) map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul]) ############################################################ trainset.train_data = X_train.astype('uint8') trainset.train_labels = map_Y_train if iteration > start_iter and args.rs_ratio > 0 and scale_factor > 1: print("Weights from sampling:", rs_sample_weights) index1 = np.where(rs_sample_weights>1)[0] index2 = np.where(map_Y_train<iteration*args.nb_cl)[0] assert((index1==index2).all()) train_sampler = torch.utils.data.sampler.WeightedRandomSampler(rs_sample_weights, rs_num_samples) trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, \ shuffle=False, sampler=train_sampler, num_workers=2) else: trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, shuffle=True, num_workers=2) testset.test_data = X_valid_cumul.astype('uint8') testset.test_labels = map_Y_valid_cumul testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size, shuffle=False, num_workers=2) print('Max and Min of train labels: {}, {}'.format(min(map_Y_train), max(map_Y_train))) print('Max and Min of valid labels: {}, {}'.format(min(map_Y_valid_cumul), max(map_Y_valid_cumul))) ############################################################## ckp_name = './checkpoint/{}_run_{}_iteration_{}_model.pth'.format(args.ckp_prefix, iteration_total, iteration) print('ckp_name', ckp_name) if args.resume and os.path.exists(ckp_name): print("###############################") print("Loading models from checkpoint") tg_model = torch.load(ckp_name) print("###############################") else: tg_params = tg_model.parameters() tg_model = tg_model.to(device) if iteration > start_iter: ref_model = ref_model.to(device) tg_optimizer = optim.SGD(tg_params, lr=base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay) tg_lr_scheduler = lr_scheduler.MultiStepLR(tg_optimizer, milestones=lr_strat, gamma=lr_factor) tg_model = incremental_train_and_eval(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, \ args.T, args.beta) if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') torch.save(tg_model, ckp_name) ### Exemplars if args.fix_budget: nb_protos_cl = int(np.ceil(args.nb_protos*100./args.nb_cl/(iteration+1))) else: nb_protos_cl = args.nb_protos tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1]) num_features = tg_model.fc.in_features # Herding print('Updating exemplar set...') for iter_dico in range(last_iter*args.nb_cl, (iteration+1)*args.nb_cl): # Possible exemplars in the feature space and projected on the L2 sphere evalset.test_data = prototypes[iter_dico].astype('uint8') evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=2) num_samples = evalset.test_data.shape[0] mapped_prototypes = compute_features(tg_feature_model, evalloader, num_samples, num_features) D = mapped_prototypes.T D = D/np.linalg.norm(D,axis=0) # Herding procedure : ranking of the potential exemplars mu = np.mean(D,axis=1) index1 = int(iter_dico/args.nb_cl) index2 = iter_dico % args.nb_cl alpha_dr_herding[index1,:,index2] = alpha_dr_herding[index1,:,index2]*0 w_t = mu iter_herding = 0 iter_herding_eff = 0 while not(np.sum(alpha_dr_herding[index1,:,index2]!=0)==min(nb_protos_cl,500)) and iter_herding_eff<1000: tmp_t = np.dot(w_t,D) ind_max = np.argmax(tmp_t) iter_herding_eff += 1 if alpha_dr_herding[index1,ind_max,index2] == 0: alpha_dr_herding[index1,ind_max,index2] = 1+iter_herding iter_herding += 1 w_t = w_t+mu-D[:,ind_max] # Prepare the protoset X_protoset_cumuls = [] Y_protoset_cumuls = [] # Class means for iCaRL and NCM + Storing the selected exemplars in the protoset print('Computing mean-of_exemplars and theoretical mean...') class_means = np.zeros((64,100,2)) for iteration2 in range(iteration+1): for iter_dico in range(args.nb_cl): current_cl = order[range(iteration2*args.nb_cl,(iteration2+1)*args.nb_cl)] # Collect data in the feature space for each class evalset.test_data = prototypes[iteration2*args.nb_cl+iter_dico].astype('uint8') evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=2) num_samples = evalset.test_data.shape[0] mapped_prototypes = compute_features(tg_feature_model, evalloader, num_samples, num_features) D = mapped_prototypes.T D = D/np.linalg.norm(D,axis=0) # Flipped version also evalset.test_data = prototypes[iteration2*args.nb_cl+iter_dico][:,:,:,::-1].astype('uint8') evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=2) mapped_prototypes2 = compute_features(tg_feature_model, evalloader, num_samples, num_features) D2 = mapped_prototypes2.T D2 = D2/np.linalg.norm(D2,axis=0) # iCaRL alph = alpha_dr_herding[iteration2,:,iter_dico] alph = (alph>0)*(alph<nb_protos_cl+1)*1. X_protoset_cumuls.append(prototypes[iteration2*args.nb_cl+iter_dico,np.where(alph==1)[0]]) Y_protoset_cumuls.append(order[iteration2*args.nb_cl+iter_dico]*np.ones(len(np.where(alph==1)[0]))) alph = alph/np.sum(alph) class_means[:,current_cl[iter_dico],0] = (np.dot(D,alph)+np.dot(D2,alph))/2 class_means[:,current_cl[iter_dico],0] /= np.linalg.norm(class_means[:,current_cl[iter_dico],0]) # Normal NCM alph = np.ones(dictionary_size)/dictionary_size class_means[:,current_cl[iter_dico],1] = (np.dot(D,alph)+np.dot(D2,alph))/2 class_means[:,current_cl[iter_dico],1] /= np.linalg.norm(class_means[:,current_cl[iter_dico],1]) torch.save(class_means, \ './checkpoint/{}_run_{}_iteration_{}_class_means.pth'.format(args.ckp_prefix,iteration_total, iteration)) current_means = class_means[:, order[range(0,(iteration+1)*args.nb_cl)]] ############################################################## # Calculate validation error of model on the first nb_cl classes: map_Y_valid_ori = np.array([order_list.index(i) for i in Y_valid_ori]) print('Computing accuracy on the original batch of classes...') evalset.test_data = X_valid_ori.astype('uint8') evalset.test_labels = map_Y_valid_ori evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=2) ori_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader) top1_acc_list_ori[iteration, :, iteration_total] = np.array(ori_acc).T ############################################################## # Calculate validation error of model on the cumul of classes: map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul]) print('Computing cumulative accuracy...') evalset.test_data = X_valid_cumul.astype('uint8') evalset.test_labels = map_Y_valid_cumul evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=2) cumul_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader) top1_acc_list_cumul[iteration, :, iteration_total] = np.array(cumul_acc).T ############################################################## # Calculate confusion matrix print('Computing confusion matrix...') cm = compute_confusion_matrix(tg_model, tg_feature_model, current_means, evalloader) cm_name = './checkpoint/{}_run_{}_iteration_{}_confusion_matrix.pth'.format(args.ckp_prefix,iteration_total, iteration) with open(cm_name, 'wb') as f: pickle.dump(cm, f, 2) #for reading with Python 2 ############################################################## # Final save of the data torch.save(top1_acc_list_ori, \ './checkpoint/{}_run_{}_top1_acc_list_ori.pth'.format(args.ckp_prefix, iteration_total)) torch.save(top1_acc_list_cumul, \ './checkpoint/{}_run_{}_top1_acc_list_cumul.pth'.format(args.ckp_prefix, iteration_total))
18,765
51.565826
130
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/cifar100-class-incremental/resnet_cifar.py
import torch.nn as nn import math import torch.utils.model_zoo as model_zoo def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, num_classes=10): self.inplanes = 16 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(16) self.relu = nn.ReLU(inplace=True) self.layer1 = self._make_layer(block, 16, layers[0]) self.layer2 = self._make_layer(block, 32, layers[1], stride=2) self.layer3 = self._make_layer(block, 64, layers[2], stride=2) self.avgpool = nn.AvgPool2d(8, stride=1) self.fc = nn.Linear(64 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x def resnet20(pretrained=False, **kwargs): n = 3 model = ResNet(BasicBlock, [n, n, n], **kwargs) return model def resnet32(pretrained=False, **kwargs): n = 5 model = ResNet(BasicBlock, [n, n, n], **kwargs) return model def resnet56(pretrained=False, **kwargs): n = 9 model = ResNet(Bottleneck, [n, n, n], **kwargs) return model
4,525
29.375839
90
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/cifar100-class-incremental/modified_resnet_cifar.py
#remove ReLU in the last layer, and use cosine layer to replace nn.Linear import torch.nn as nn import math import torch.utils.model_zoo as model_zoo import modified_linear def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, last=False): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride self.last = last def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual if not self.last: #remove ReLU in the last layer out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, num_classes=10): self.inplanes = 16 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(16) self.relu = nn.ReLU(inplace=True) self.layer1 = self._make_layer(block, 16, layers[0]) self.layer2 = self._make_layer(block, 32, layers[1], stride=2) self.layer3 = self._make_layer(block, 64, layers[2], stride=2, last_phase=True) self.avgpool = nn.AvgPool2d(8, stride=1) self.fc = modified_linear.CosineLinear(64 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, blocks, stride=1, last_phase=False): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion if last_phase: for i in range(1, blocks-1): layers.append(block(self.inplanes, planes)) layers.append(block(self.inplanes, planes, last=True)) else: for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x def resnet20(pretrained=False, **kwargs): n = 3 model = ResNet(BasicBlock, [n, n, n], **kwargs) return model def resnet32(pretrained=False, **kwargs): n = 5 model = ResNet(BasicBlock, [n, n, n], **kwargs) return model
3,716
31.893805
87
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/cifar100-class-incremental/class_incremental_cosine_cifar100.py
#!/usr/bin/env python # coding=utf-8 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import datasets, models, transforms from torch.autograd import Variable import numpy as np import time import os import sys import copy import argparse from PIL import Image try: import cPickle as pickle except: import pickle import math import modified_resnet_cifar import modified_linear import utils_pytorch from utils_incremental.compute_features import compute_features from utils_incremental.compute_accuracy import compute_accuracy from utils_incremental.compute_confusion_matrix import compute_confusion_matrix from utils_incremental.incremental_train_and_eval import incremental_train_and_eval from utils_incremental.incremental_train_and_eval_MS import incremental_train_and_eval_MS from utils_incremental.incremental_train_and_eval_LF import incremental_train_and_eval_LF from utils_incremental.incremental_train_and_eval_MR_LF import incremental_train_and_eval_MR_LF from utils_incremental.incremental_train_and_eval_AMR_LF import incremental_train_and_eval_AMR_LF ######### Modifiable Settings ########## parser = argparse.ArgumentParser() parser.add_argument('--dataset', default='cifar100', type=str) parser.add_argument('--num_classes', default=100, type=int) parser.add_argument('--nb_cl_fg', default=50, type=int, \ help='the number of classes in first group') parser.add_argument('--nb_cl', default=10, type=int, \ help='Classes per group') parser.add_argument('--nb_protos', default=20, type=int, \ help='Number of prototypes per class at the end') parser.add_argument('--nb_runs', default=1, type=int, \ help='Number of runs (random ordering of classes at each run)') parser.add_argument('--ckp_prefix', default=os.path.basename(sys.argv[0])[:-3], type=str, \ help='Checkpoint prefix') parser.add_argument('--epochs', default=160, type=int, \ help='Epochs') parser.add_argument('--T', default=2, type=float, \ help='Temporature for distialltion') parser.add_argument('--beta', default=0.25, type=float, \ help='Beta for distialltion') parser.add_argument('--resume', action='store_true', \ help='resume from checkpoint') parser.add_argument('--fix_budget', action='store_true', \ help='fix budget') ######################################## parser.add_argument('--mimic_score', action='store_true', \ help='To mimic scores for cosine embedding') parser.add_argument('--lw_ms', default=1, type=float, \ help='loss weight for mimicking score') ######################################## #improved class incremental learning parser.add_argument('--rs_ratio', default=0, type=float, \ help='The ratio for resample') parser.add_argument('--imprint_weights', action='store_true', \ help='Imprint the weights for novel classes') parser.add_argument('--less_forget', action='store_true', \ help='Less forgetful') parser.add_argument('--lamda', default=5, type=float, \ help='Lamda for LF') parser.add_argument('--adapt_lamda', action='store_true', \ help='Adaptively change lamda') parser.add_argument('--mr_loss', action='store_true', \ help='Margin ranking loss v1') parser.add_argument('--amr_loss', action='store_true', \ help='Margin ranking loss v2') parser.add_argument('--dist', default=0.5, type=float, \ help='Dist for MarginRankingLoss') parser.add_argument('--K', default=2, type=int, \ help='K for MarginRankingLoss') parser.add_argument('--lw_mr', default=1, type=float, \ help='loss weight for margin ranking loss') ######################################## parser.add_argument('--random_seed', default=1993, type=int, \ help='random seed') args = parser.parse_args() ######################################## assert(args.nb_cl_fg % args.nb_cl == 0) assert(args.nb_cl_fg >= args.nb_cl) train_batch_size = 128 # Batch size for train test_batch_size = 100 # Batch size for test eval_batch_size = 128 # Batch size for eval base_lr = 0.1 # Initial learning rate lr_strat = [80, 120] # Epochs where learning rate gets decreased lr_factor = 0.1 # Learning rate decrease factor custom_weight_decay = 5e-4 # Weight Decay custom_momentum = 0.9 # Momentum args.ckp_prefix = '{}_nb_cl_fg_{}_nb_cl_{}_nb_protos_{}'.format(args.ckp_prefix, args.nb_cl_fg, args.nb_cl, args.nb_protos) np.random.seed(args.random_seed) # Fix the random seed print(args) ######################################## device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)), ]) transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)), ]) trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train) testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test) evalset = torchvision.datasets.CIFAR100(root='./data', train=False, download=False, transform=transform_test) # Initialization dictionary_size = 500 top1_acc_list_cumul = np.zeros((int(args.num_classes/args.nb_cl),3,args.nb_runs)) top1_acc_list_ori = np.zeros((int(args.num_classes/args.nb_cl),3,args.nb_runs)) X_train_total = np.array(trainset.train_data) Y_train_total = np.array(trainset.train_labels) X_valid_total = np.array(testset.test_data) Y_valid_total = np.array(testset.test_labels) # Launch the different runs for iteration_total in range(args.nb_runs): # Select the order for the class learning order_name = "./checkpoint/seed_{}_{}_order_run_{}.pkl".format(args.random_seed, args.dataset, iteration_total) print("Order name:{}".format(order_name)) if os.path.exists(order_name): print("Loading orders") order = utils_pytorch.unpickle(order_name) else: print("Generating orders") order = np.arange(args.num_classes) np.random.shuffle(order) utils_pytorch.savepickle(order, order_name) order_list = list(order) print(order_list) # Initialization of the variables for this run X_valid_cumuls = [] X_protoset_cumuls = [] X_train_cumuls = [] Y_valid_cumuls = [] Y_protoset_cumuls = [] Y_train_cumuls = [] alpha_dr_herding = np.zeros((int(args.num_classes/args.nb_cl),dictionary_size,args.nb_cl),np.float32) # The following contains all the training samples of the different classes # because we want to compare our method with the theoretical case where all the training samples are stored prototypes = np.zeros((args.num_classes,dictionary_size,X_train_total.shape[1],X_train_total.shape[2],X_train_total.shape[3])) for orde in range(args.num_classes): prototypes[orde,:,:,:,:] = X_train_total[np.where(Y_train_total==order[orde])] start_iter = int(args.nb_cl_fg/args.nb_cl)-1 for iteration in range(start_iter, int(args.num_classes/args.nb_cl)): #init model if iteration == start_iter: ############################################################ last_iter = 0 ############################################################ tg_model = modified_resnet_cifar.resnet32(num_classes=args.nb_cl_fg) in_features = tg_model.fc.in_features out_features = tg_model.fc.out_features print("in_features:", in_features, "out_features:", out_features) ref_model = None elif iteration == start_iter+1: ############################################################ last_iter = iteration ############################################################ #increment classes ref_model = copy.deepcopy(tg_model) in_features = tg_model.fc.in_features out_features = tg_model.fc.out_features print("in_features:", in_features, "out_features:", out_features) new_fc = modified_linear.SplitCosineLinear(in_features, out_features, args.nb_cl) new_fc.fc1.weight.data = tg_model.fc.weight.data new_fc.sigma.data = tg_model.fc.sigma.data tg_model.fc = new_fc lamda_mult = out_features*1.0 / args.nb_cl else: ############################################################ last_iter = iteration ############################################################ ref_model = copy.deepcopy(tg_model) in_features = tg_model.fc.in_features out_features1 = tg_model.fc.fc1.out_features out_features2 = tg_model.fc.fc2.out_features print("in_features:", in_features, "out_features1:", \ out_features1, "out_features2:", out_features2) new_fc = modified_linear.SplitCosineLinear(in_features, out_features1+out_features2, args.nb_cl) new_fc.fc1.weight.data[:out_features1] = tg_model.fc.fc1.weight.data new_fc.fc1.weight.data[out_features1:] = tg_model.fc.fc2.weight.data new_fc.sigma.data = tg_model.fc.sigma.data tg_model.fc = new_fc lamda_mult = (out_features1+out_features2)*1.0 / (args.nb_cl) if iteration > start_iter and args.less_forget and args.adapt_lamda: #cur_lamda = lamda_base * sqrt(num_old_classes/num_new_classes) cur_lamda = args.lamda * math.sqrt(lamda_mult) else: cur_lamda = args.lamda if iteration > start_iter and args.less_forget: print("###############################") print("Lamda for less forget is set to ", cur_lamda) print("###############################") # Prepare the training data for the current batch of classes actual_cl = order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] indices_train_10 = np.array([i in order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] for i in Y_train_total]) indices_test_10 = np.array([i in order[range(last_iter*args.nb_cl,(iteration+1)*args.nb_cl)] for i in Y_valid_total]) X_train = X_train_total[indices_train_10] X_valid = X_valid_total[indices_test_10] X_valid_cumuls.append(X_valid) X_train_cumuls.append(X_train) X_valid_cumul = np.concatenate(X_valid_cumuls) X_train_cumul = np.concatenate(X_train_cumuls) Y_train = Y_train_total[indices_train_10] Y_valid = Y_valid_total[indices_test_10] Y_valid_cumuls.append(Y_valid) Y_train_cumuls.append(Y_train) Y_valid_cumul = np.concatenate(Y_valid_cumuls) Y_train_cumul = np.concatenate(Y_train_cumuls) # Add the stored exemplars to the training data if iteration == start_iter: X_valid_ori = X_valid Y_valid_ori = Y_valid else: X_protoset = np.concatenate(X_protoset_cumuls) Y_protoset = np.concatenate(Y_protoset_cumuls) if args.rs_ratio > 0: #1/rs_ratio = (len(X_train)+len(X_protoset)*scale_factor)/(len(X_protoset)*scale_factor) scale_factor = (len(X_train) * args.rs_ratio) / (len(X_protoset) * (1 - args.rs_ratio)) rs_sample_weights = np.concatenate((np.ones(len(X_train)), np.ones(len(X_protoset))*scale_factor)) #number of samples per epoch, undersample on the new classes #rs_num_samples = len(X_train) + len(X_protoset) rs_num_samples = int(len(X_train) / (1 - args.rs_ratio)) print("X_train:{}, X_protoset:{}, rs_num_samples:{}".format(len(X_train), len(X_protoset), rs_num_samples)) X_train = np.concatenate((X_train,X_protoset),axis=0) Y_train = np.concatenate((Y_train,Y_protoset)) # Launch the training loop print('Batch of classes number {0} arrives ...'.format(iteration+1)) map_Y_train = np.array([order_list.index(i) for i in Y_train]) map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul]) #imprint weights if iteration > start_iter and args.imprint_weights: #input: tg_model, X_train, map_Y_train #class_start = iteration*nb_cl class_end = (iteration+1)*nb_cl print("Imprint weights") ######################################### #compute the average norm of old embdding old_embedding_norm = tg_model.fc.fc1.weight.data.norm(dim=1, keepdim=True) average_old_embedding_norm = torch.mean(old_embedding_norm, dim=0).to('cpu').type(torch.DoubleTensor) ######################################### tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1]) num_features = tg_model.fc.in_features novel_embedding = torch.zeros((args.nb_cl, num_features)) for cls_idx in range(iteration*args.nb_cl, (iteration+1)*args.nb_cl): cls_indices = np.array([i == cls_idx for i in map_Y_train]) assert(len(np.where(cls_indices==1)[0])==dictionary_size) evalset.test_data = X_train[cls_indices].astype('uint8') evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=2) num_samples = evalset.test_data.shape[0] cls_features = compute_features(tg_feature_model, evalloader, num_samples, num_features) #cls_features = cls_features.T #cls_features = cls_features / np.linalg.norm(cls_features,axis=0) #cls_embedding = np.mean(cls_features, axis=1) norm_features = F.normalize(torch.from_numpy(cls_features), p=2, dim=1) cls_embedding = torch.mean(norm_features, dim=0) #novel_embedding[cls_idx-iteration*args.nb_cl] = cls_embedding novel_embedding[cls_idx-iteration*args.nb_cl] = F.normalize(cls_embedding, p=2, dim=0) * average_old_embedding_norm tg_model.to(device) #torch.save(tg_model, "tg_model_before_imprint_weights.pth") tg_model.fc.fc2.weight.data = novel_embedding.to(device) #torch.save(tg_model, "tg_model_after_imprint_weights.pth") ############################################################ trainset.train_data = X_train.astype('uint8') trainset.train_labels = map_Y_train if iteration > start_iter and args.rs_ratio > 0 and scale_factor > 1: print("Weights from sampling:", rs_sample_weights) index1 = np.where(rs_sample_weights>1)[0] index2 = np.where(map_Y_train<iteration*args.nb_cl)[0] assert((index1==index2).all()) train_sampler = torch.utils.data.sampler.WeightedRandomSampler(rs_sample_weights, rs_num_samples) trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, \ shuffle=False, sampler=train_sampler, num_workers=2) else: trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, shuffle=True, num_workers=2) testset.test_data = X_valid_cumul.astype('uint8') testset.test_labels = map_Y_valid_cumul testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size, shuffle=False, num_workers=2) print('Max and Min of train labels: {}, {}'.format(min(map_Y_train), max(map_Y_train))) print('Max and Min of valid labels: {}, {}'.format(min(map_Y_valid_cumul), max(map_Y_valid_cumul))) ############################################################## ckp_name = './checkpoint/{}_run_{}_iteration_{}_model.pth'.format(args.ckp_prefix, iteration_total, iteration) print('ckp_name', ckp_name) if args.resume and os.path.exists(ckp_name): print("###############################") print("Loading models from checkpoint") tg_model = torch.load(ckp_name) print("###############################") else: ############################### if iteration > start_iter and args.less_forget: #fix the embedding of old classes ignored_params = list(map(id, tg_model.fc.fc1.parameters())) base_params = filter(lambda p: id(p) not in ignored_params, \ tg_model.parameters()) tg_params =[{'params': base_params, 'lr': base_lr, 'weight_decay': custom_weight_decay}, \ {'params': tg_model.fc.fc1.parameters(), 'lr': 0, 'weight_decay': 0}] else: tg_params = tg_model.parameters() ############################### tg_model = tg_model.to(device) if iteration > start_iter: ref_model = ref_model.to(device) tg_optimizer = optim.SGD(tg_params, lr=base_lr, momentum=custom_momentum, weight_decay=custom_weight_decay) tg_lr_scheduler = lr_scheduler.MultiStepLR(tg_optimizer, milestones=lr_strat, gamma=lr_factor) ############################### if args.less_forget and args.mr_loss: print("incremental_train_and_eval_MR_LF") tg_model = incremental_train_and_eval_MR_LF(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, \ cur_lamda, \ args.dist, args.K, args.lw_mr) elif args.less_forget and args.amr_loss: print("incremental_train_and_eval_AMR_LF") tg_model = incremental_train_and_eval_AMR_LF(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, \ cur_lamda, \ args.dist, args.K, args.lw_mr) else: if args.less_forget: print("incremental_train_and_eval_LF") tg_model = incremental_train_and_eval_LF(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, \ cur_lamda) else: if args.mimic_score: print("incremental_train_and_eval_MS") tg_model = incremental_train_and_eval_MS(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, args.lw_ms) else: print("incremental_train_and_eval") tg_model = incremental_train_and_eval(args.epochs, tg_model, ref_model, tg_optimizer, tg_lr_scheduler, \ trainloader, testloader, \ iteration, start_iter, args.T, args.beta) if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') torch.save(tg_model, ckp_name) ### Exemplars if args.fix_budget: nb_protos_cl = int(np.ceil(args.nb_protos*100./args.nb_cl/(iteration+1))) else: nb_protos_cl = args.nb_protos tg_feature_model = nn.Sequential(*list(tg_model.children())[:-1]) num_features = tg_model.fc.in_features # Herding print('Updating exemplar set...') for iter_dico in range(last_iter*args.nb_cl, (iteration+1)*args.nb_cl): # Possible exemplars in the feature space and projected on the L2 sphere evalset.test_data = prototypes[iter_dico].astype('uint8') evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=2) num_samples = evalset.test_data.shape[0] mapped_prototypes = compute_features(tg_feature_model, evalloader, num_samples, num_features) D = mapped_prototypes.T D = D/np.linalg.norm(D,axis=0) # Herding procedure : ranking of the potential exemplars mu = np.mean(D,axis=1) index1 = int(iter_dico/args.nb_cl) index2 = iter_dico % args.nb_cl alpha_dr_herding[index1,:,index2] = alpha_dr_herding[index1,:,index2]*0 w_t = mu iter_herding = 0 iter_herding_eff = 0 while not(np.sum(alpha_dr_herding[index1,:,index2]!=0)==min(nb_protos_cl,500)) and iter_herding_eff<1000: tmp_t = np.dot(w_t,D) ind_max = np.argmax(tmp_t) iter_herding_eff += 1 if alpha_dr_herding[index1,ind_max,index2] == 0: alpha_dr_herding[index1,ind_max,index2] = 1+iter_herding iter_herding += 1 w_t = w_t+mu-D[:,ind_max] # Prepare the protoset X_protoset_cumuls = [] Y_protoset_cumuls = [] # Class means for iCaRL and NCM + Storing the selected exemplars in the protoset print('Computing mean-of_exemplars and theoretical mean...') class_means = np.zeros((64,100,2)) for iteration2 in range(iteration+1): for iter_dico in range(args.nb_cl): current_cl = order[range(iteration2*args.nb_cl,(iteration2+1)*args.nb_cl)] # Collect data in the feature space for each class evalset.test_data = prototypes[iteration2*args.nb_cl+iter_dico].astype('uint8') evalset.test_labels = np.zeros(evalset.test_data.shape[0]) #zero labels evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=2) num_samples = evalset.test_data.shape[0] mapped_prototypes = compute_features(tg_feature_model, evalloader, num_samples, num_features) D = mapped_prototypes.T D = D/np.linalg.norm(D,axis=0) # Flipped version also evalset.test_data = prototypes[iteration2*args.nb_cl+iter_dico][:,:,:,::-1].astype('uint8') evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=2) mapped_prototypes2 = compute_features(tg_feature_model, evalloader, num_samples, num_features) D2 = mapped_prototypes2.T D2 = D2/np.linalg.norm(D2,axis=0) # iCaRL alph = alpha_dr_herding[iteration2,:,iter_dico] alph = (alph>0)*(alph<nb_protos_cl+1)*1. X_protoset_cumuls.append(prototypes[iteration2*args.nb_cl+iter_dico,np.where(alph==1)[0]]) Y_protoset_cumuls.append(order[iteration2*args.nb_cl+iter_dico]*np.ones(len(np.where(alph==1)[0]))) alph = alph/np.sum(alph) class_means[:,current_cl[iter_dico],0] = (np.dot(D,alph)+np.dot(D2,alph))/2 class_means[:,current_cl[iter_dico],0] /= np.linalg.norm(class_means[:,current_cl[iter_dico],0]) # Normal NCM alph = np.ones(dictionary_size)/dictionary_size class_means[:,current_cl[iter_dico],1] = (np.dot(D,alph)+np.dot(D2,alph))/2 class_means[:,current_cl[iter_dico],1] /= np.linalg.norm(class_means[:,current_cl[iter_dico],1]) torch.save(class_means, \ './checkpoint/{}_run_{}_iteration_{}_class_means.pth'.format(args.ckp_prefix,iteration_total, iteration)) current_means = class_means[:, order[range(0,(iteration+1)*args.nb_cl)]] ############################################################## # Calculate validation error of model on the first nb_cl classes: map_Y_valid_ori = np.array([order_list.index(i) for i in Y_valid_ori]) print('Computing accuracy on the original batch of classes...') evalset.test_data = X_valid_ori.astype('uint8') evalset.test_labels = map_Y_valid_ori evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=2) ori_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader) top1_acc_list_ori[iteration, :, iteration_total] = np.array(ori_acc).T ############################################################## # Calculate validation error of model on the cumul of classes: map_Y_valid_cumul = np.array([order_list.index(i) for i in Y_valid_cumul]) print('Computing cumulative accuracy...') evalset.test_data = X_valid_cumul.astype('uint8') evalset.test_labels = map_Y_valid_cumul evalloader = torch.utils.data.DataLoader(evalset, batch_size=eval_batch_size, shuffle=False, num_workers=2) cumul_acc = compute_accuracy(tg_model, tg_feature_model, current_means, evalloader) top1_acc_list_cumul[iteration, :, iteration_total] = np.array(cumul_acc).T ############################################################## # Calculate confusion matrix print('Computing confusion matrix...') cm = compute_confusion_matrix(tg_model, tg_feature_model, current_means, evalloader) cm_name = './checkpoint/{}_run_{}_iteration_{}_confusion_matrix.pth'.format(args.ckp_prefix,iteration_total, iteration) with open(cm_name, 'wb') as f: pickle.dump(cm, f, 2) #for reading with Python 2 ############################################################## # Final save of the data torch.save(top1_acc_list_ori, \ './checkpoint/{}_run_{}_top1_acc_list_ori.pth'.format(args.ckp_prefix, iteration_total)) torch.save(top1_acc_list_cumul, \ './checkpoint/{}_run_{}_top1_acc_list_cumul.pth'.format(args.ckp_prefix, iteration_total))
26,967
53.370968
131
py
CVPR19_Incremental_Learning
CVPR19_Incremental_Learning-master/cifar100-class-incremental/modified_linear.py
import math import torch from torch.nn.parameter import Parameter from torch.nn import functional as F from torch.nn import Module class CosineLinear(Module): def __init__(self, in_features, out_features, sigma=True): super(CosineLinear, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.Tensor(out_features, in_features)) if sigma: self.sigma = Parameter(torch.Tensor(1)) else: self.register_parameter('sigma', None) self.reset_parameters() def reset_parameters(self): stdv = 1. / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.sigma is not None: self.sigma.data.fill_(1) #for initializaiton of sigma def forward(self, input): #w_norm = self.weight.data.norm(dim=1, keepdim=True) #w_norm = w_norm.expand_as(self.weight).add_(self.epsilon) #x_norm = input.data.norm(dim=1, keepdim=True) #x_norm = x_norm.expand_as(input).add_(self.epsilon) #w = self.weight.div(w_norm) #x = input.div(x_norm) out = F.linear(F.normalize(input, p=2,dim=1), \ F.normalize(self.weight, p=2, dim=1)) if self.sigma is not None: out = self.sigma * out return out class SplitCosineLinear(Module): #consists of two fc layers and concatenate their outputs def __init__(self, in_features, out_features1, out_features2, sigma=True): super(SplitCosineLinear, self).__init__() self.in_features = in_features self.out_features = out_features1 + out_features2 self.fc1 = CosineLinear(in_features, out_features1, False) self.fc2 = CosineLinear(in_features, out_features2, False) if sigma: self.sigma = Parameter(torch.Tensor(1)) self.sigma.data.fill_(1) else: self.register_parameter('sigma', None) def forward(self, x): out1 = self.fc1(x) out2 = self.fc2(x) out = torch.cat((out1, out2), dim=1) #concatenate along the channel if self.sigma is not None: out = self.sigma * out return out
2,235
36.898305
78
py
BarchartReverseEngineering
BarchartReverseEngineering-master/generate_random_bar_chart.py
#!/usr/bin/env python # coding: utf-8 # In[1]: import os import matplotlib matplotlib.use("Agg") import random import string import itertools import argparse import numpy as np import matplotlib.pyplot as plt from matplotlib import font_manager from matplotlib import cm from tqdm import tqdm # In[2]: ### random bar chart configuration ### bar_dirction_list = ["horizontal", "vertical"] bar_per_loc_list = [1, 2] # how many bars are there in one ordinal position bar_num_min = 2 bar_num_max = 3 bar_value_min = 10 bar_value_max = 200 axis_label_length_min = 5 axis_label_length_max = 15 axis_label_size_min = 15 axis_label_size_max = 18 legend_position = ["top", "right", "bottom"] legend_length_min = 3 legend_length_max = 6 legend_size_min = 15 legend_size_max = 18 ticks_label_length_min = 1 ticks_label_length_max = 5 ticks_label_size_min = 14 ticks_label_size_max = 16 dpi_min = 50 dpi_max = 80 figsize_min = 6 figsize_max = 8 title_length_min = 5 title_length_max = 15 title_size_min = 18 title_size_max = 20 title_location = ["left", "center", "right"] # fonts_list = font_manager.findSystemFonts() fonts_list = font_manager.findSystemFonts()[0:2] styles = plt.style.available if 'dark_background' in styles: styles.remove('dark_background') # In[3]: def get_random_plot(filename): """ Random bar chart generation method. Inputs: filename(string): name of the chart image which will be saved """ # Outputs ax = None fig = None bars = [] data = None title = None legend = None axis_ticks = None axis_label = None # plot style style = random.choice(styles) plt.style.use(style) # resolution and figure size dpi = random.randint(dpi_min, dpi_max) figsize = [random.randint(figsize_min, figsize_max), random.randint(figsize_min, figsize_max)] fig, ax = plt.subplots(figsize=figsize, dpi=dpi) # bars setting bar_num = random.randint(bar_num_min, bar_num_max) bar_per_loc = random.choice(bar_per_loc_list) bar_direction = random.choice(bar_dirction_list) bar_width = random.choice([0.6, 0.7, 0.8, 0.9]) # generate random data according to bar_per_loc bar_value_range = random.randint(int(bar_value_max * 0.2), bar_value_max) y = np.random.rand(bar_per_loc, bar_num) y = bar_value_min + y * bar_value_range y = y.astype(np.int32) bar_dist = random.choice([0.5, 1, 1.5]) bar_start = random.choice([0, 0.4, 0.8]) # x stores the start position of every group of bars(bar_per_loc bars in one group) x = [bar_start] last = bar_start for i in range(bar_num - 1): last = last + bar_width * bar_per_loc + bar_dist x.append(last) x = np.array(x) data = (x, y) if bar_direction == "horizontal": bar_generator = ax.barh set_hticks = ax.set_yticks set_hticklabels = ax.set_yticklabels get_vticklabels = ax.get_xticklabels # if the bars are horizontal, invert the y axis ax.invert_yaxis() else: bar_generator = ax.bar set_hticks = ax.set_xticks set_hticklabels = ax.set_xticklabels get_vticklabels = ax.get_yticklabels colors = cm.jet(np.random.rand(bar_per_loc)) linewidth = random.choice([0, 1]) for i in range(bar_per_loc): temp = bar_generator(x + bar_width * i, y[i], bar_width, align="edge", color=colors[i], linewidth=linewidth, edgecolor="black") bars.append(temp) # fonts and fonts size font = random.choice(fonts_list) title_size = random.choice(range(title_size_min, title_size_max + 1)) axis_label_size = random.choice(range(axis_label_size_min, axis_label_size_max + 1)) ticks_label_size = random.choice(range(ticks_label_size_min, ticks_label_size_max + 1)) legend_size = random.choice(range(legend_size_min, legend_size_max + 1)) ticks_label_font = font_manager.FontProperties(fname=font, size=ticks_label_size) title_font = font_manager.FontProperties(fname=font, size=title_size) axis_label_font = font_manager.FontProperties(fname=font, size=axis_label_size) legend_font = font_manager.FontProperties(fname=font, size=legend_size) # Title and Label text letter_weights = np.ones((len(string.ascii_letters) + 1)) # increase the weight of white space character letter_weights[-1] = int(len(letter_weights) * 0.2) letter_weights = list(itertools.accumulate(letter_weights)) letters = string.ascii_letters + " " title_length = random.choice(range(title_length_min, title_length_max)) title_text = "".join(random.choices(letters, cum_weights=letter_weights, k=title_length)).strip() xlabel_length = random.choice(range(axis_label_length_min, axis_label_length_max)) xlabel = "".join(random.choices(letters, cum_weights=letter_weights, k=xlabel_length)).strip() ylabel_length = random.choice(range(axis_label_length_min, axis_label_length_max)) ylabel = "".join(random.choices(letters, cum_weights=letter_weights, k=ylabel_length)).strip() ticks_label = [] for i in range(bar_num): ticks_label_length = random.choice(range(ticks_label_length_min, ticks_label_length_max)) ticks_label.append("".join(random.choices(string.ascii_letters, k=ticks_label_length)).strip()) legend_char = [] for i in range(bar_per_loc): legend_length = random.choice(range(legend_length_min, legend_length_max)) legend_char.append("".join(random.choices(letters, k=legend_length)).strip()) # decide whether the switch of axis label, title and legend axis_label_switch = random.choice(["on", "off"]) title_switch = random.choice(["on", "off"]) legend_switch = random.choice(["on", "off"]) legend_pos = random.choice(legend_position) if axis_label_switch == "on": xlabel = ax.set_xlabel(xlabel, fontproperties=axis_label_font) ylabel = ax.set_ylabel(ylabel, fontproperties=axis_label_font) axis_label = (xlabel, ylabel) # set the ticks and tick labels set_hticks(x + (bar_width / 2) * bar_per_loc) hticklabels = set_hticklabels(ticks_label, fontproperties=ticks_label_font) vticklabels = get_vticklabels() for label in vticklabels: label.set_fontproperties(ticks_label_font) axis_ticks = (hticklabels, vticklabels) # set legend, possible positions include: top, bottom, upper right and center right ax_bbox = ax.get_position() tight_rect = [0, 0, 1, 1] if legend_switch == "on": if legend_pos == "top": ax.set_position([ax_bbox.x0, ax_bbox.y0, ax_bbox.width, ax_bbox.height * 0.85]) legend = ax.legend(legend_char, prop=legend_font, ncol=bar_per_loc, loc="lower center", bbox_to_anchor=(0.5, 1)) tight_rect = [0, 0, 1, 0.85] if legend_pos == "bottom": ax.set_position([ax_bbox.x0, ax_bbox.y0 + ax_bbox.height * 0.15, ax_bbox.width, ax_bbox.height * 0.85]) tight_rect = [0, 0.15, 1, 1] if axis_label_switch == "on": legend = ax.legend(legend_char, prop=legend_font, ncol=bar_per_loc, loc="upper center", bbox_to_anchor=(0.5, -0.12)) else: legend = ax.legend(legend_char, prop=legend_font, ncol=bar_per_loc, loc="upper center", bbox_to_anchor=(0.5, -0.05)) if legend_pos == "right": ax.set_position([ax_bbox.x0, ax_bbox.y0, ax_bbox.width * 0.85, ax_bbox.height]) tight_rect = [0, 0, 0.85, 1] if random.choice(["top", "center"]) == "center": legend = ax.legend(legend_char, prop=legend_font, ncol=1, loc="center left", bbox_to_anchor=(1, 0.5)) else: legend = ax.legend(legend_char, prop=legend_font, ncol=1, loc="upper left", bbox_to_anchor=(1, 1)) if title_switch == "on": title_loc = random.choice(title_location) if legend_pos == "top": title = ax.set_title(title_text, fontproperties=title_font, loc="center", y=1.1) else: title = ax.set_title(title_text, fontproperties=title_font, loc=title_loc, y=1.01) plt.tight_layout(rect=tight_rect) fig.savefig(filename, dpi="figure") return fig, ax, bars, data, title, legend, axis_ticks, axis_label, bar_direction # In[4]: def get_bar_pixel(fig_height, bars, data): """ method that return the bounding box of the bars that are arranged according to their x axis positions. Inputs: bars: objects of the bars of the plot, of size(bar_per_loc, bar_nums) data: list(x, y) containing the x coordinates of the bars and the heights of the bars Outputs: bar_coord: a list of dict containing bbox and height(data coordinate) of bars dict looks like {bbox:[top-left-x, top-left-y, bottom-right-x, bottom-right-y], height:h} """ bar_heights = data[1] bar_per_loc = len(bars) bar_nums = len(bars[0]) bar_coord = [] for i in range(bar_nums): for j in range(bar_per_loc): h = bar_heights[j][i] bar = bars[j][i] b_cor = get_bbox_coord(fig_height, bar.get_window_extent()) bar_coord.append({"bbox": b_cor, "height": h}) return bar_coord def get_tick_pixel(fig, fig_height, axis_ticks): """ method that return tick coordinates and tick texts on the xy axes Inputs: fig: matplotlib object of the figure of the plot axis_ticks: tick object """ ticklabel = [] for i in range(2): ticktmp = [] for t in axis_ticks[i]: text = t.get_text() # unclear error: raise "cannot get window extent" sometimes unless passed renderer kwarg b_cor = get_bbox_coord(fig_height, t.get_window_extent(renderer=fig.canvas.get_renderer())) ticktmp.append({"bbox": b_cor, "text": text}) ticklabel.append(ticktmp) return ticklabel def get_label_pixel(fig_height, axis_label): """ """ if axis_label is None: return None al = [] for label in axis_label: text = label.get_text() b_cor = get_bbox_coord(fig_height, label.get_window_extent()) al.append({"bbox": b_cor, "text": text}) return al def get_title_pixel(fig_height, title): """ """ if title is None: return None text = title.get_text() b_cor = get_bbox_coord(fig_height, title.get_window_extent()) return {"bbox": b_cor, "text": text} def get_legend_pixel(fig_height, legend): """ """ if legend is None: return None legend_list = [] text_list = legend.get_texts() for t in text_list: text = t.get_text() b_cor = get_bbox_coord(fig_height, t.get_window_extent()) legend_list.append({"bbox": b_cor, "text": text}) return legend_list def get_bbox_coord(fig_height, bbox): """ take bounding boxs(bottom-left, top-right) and return processed boxs(top-left, bottom-right) the orgin is at top-left of the plot """ cor = bbox.get_points() tmp = cor[0][1] cor[0][1] = fig_height - cor[1][1] cor[1][1] = fig_height - tmp b_cor = [round(cor[0][0]), round(cor[0][1]), round(cor[1][0]), round(cor[1][1])] return [int(i) for i in b_cor] def write_coord(file_obj, plot_name, coord, sep): string_prep = "{plot_name}{seperator}".format(plot_name=plot_name, seperator=sep) string_prep += "{coord}".format(coord=coord) file_obj.write(string_prep) file_obj.write("\n") def get_all_bbox(plot_objs, fig_height): fig, ax, bars, data, title, legend, axis_ticks, axis_label, bar_direction = plot_objs barbbox = get_bar_pixel(fig_height, bars, data) tickbbox = get_tick_pixel(fig, fig_height, axis_ticks) axislabelbbox = get_label_pixel(fig_height, axis_label) titlebbox = get_title_pixel(fig_height, title) legendbbox = get_legend_pixel(fig_height, legend) return barbbox, tickbbox, axislabelbbox, titlebbox, legendbbox def generate_plots(n, train_or_test): # the python script directory # dir_path = os.path.dirname(os.path.realpath(__file__)) dir_path = os.getcwd() outter_dir = os.path.join(dir_path, "data", train_or_test) plot_dir = os.path.join(outter_dir, "plots") if not os.path.exists(outter_dir): os.makedirs(outter_dir) os.mkdir(plot_dir) with open(os.path.join(outter_dir, train_or_test + "_barbbox.idl"), "w") as f_bar, open( os.path.join(outter_dir, train_or_test + "_tickbbox.idl"), "w") as f_tick, open( os.path.join(outter_dir, train_or_test + "_axislabelbbox.idl"), "w") as f_label, open( os.path.join(outter_dir, train_or_test + "_titlebbox.idl"), "w") as f_title, open( os.path.join(outter_dir, train_or_test + "_legendbbox.idl"), "w") as f_legend, open( os.path.join(outter_dir, train_or_test + "_imgsize.idl"), "w") as f_imgsize: file_objs = [f_bar, f_tick, f_label, f_title, f_legend, f_imgsize] # seperator in idl file sep = " -<>- " for i in tqdm(range(n)): try: img_type = random.choice(["jpg", "png"]) img_name = "{}_{}.{}".format(train_or_test, i, img_type) plot_name = os.path.join(plot_dir, img_name) plot_objs = get_random_plot(plot_name) imgsize = list(map(int, plot_objs[0].get_size_inches() * plot_objs[0].dpi)) fig_height = imgsize[1] bboxs_all = get_all_bbox(plot_objs, fig_height) for j, b in enumerate(bboxs_all): write_coord(file_objs[j], img_name, b, sep) # write figure size write_coord(file_objs[-1], img_name, [imgsize, plot_objs[-1]], sep) # close the figure plt.close(plot_objs[0]) except Exception: error_file = os.path.join(outter_dir, "error_log.txt") if not os.path.exists(error_file): os.makedirs(error_file) with open(error_file, "a") as f_error: f_error.write("{} error".format(plot_name)) f_error.write("\n") print(train_or_test + " plot generation done!") if __name__ == "__main__": parser = argparse.ArgumentParser( description="This python script generates random bar charts and their elements' bounding boxes") parser.add_argument("--n_train", help="Number of traning images", required=True, type=int) parser.add_argument("--n_test", help="Number of test images", required=True, type=int) args = vars(parser.parse_args()) print("generating {} training data:".format(args["n_train"])) generate_plots(args["n_train"], "train") print("generating {} test data".format(args["n_test"])) generate_plots(args["n_test"], "test")
14,987
35.556098
117
py
TRSSL
TRSSL-main/train.py
import argparse import os import shutil import time import random import math import numpy as np from datetime import datetime from tqdm import tqdm import torch import torch.backends.cudnn as cudnn import torch.optim as optim import torch.utils.data as data import torch.nn.functional as F from utils.utils import Bar, Logger, AverageMeter, accuracy, interleave, save_checkpoint from tensorboardX import SummaryWriter from datasets.datasets import get_dataset_class from utils.evaluate_utils import hungarian_evaluate from models.build_model import build_model from utils.uncr_util import uncr_generator from utils.sinkhorn_knopp import SinkhornKnopp parser = argparse.ArgumentParser(description='TRSSL Training') # Optimization options parser.add_argument('--epochs', default=200, type=int, metavar='N',help='number of total epochs to run') parser.add_argument('--batch-size', default=256, type=int, metavar='N', help='train batchsize') parser.add_argument('--num-workers', default=4, type=int, help='number of dataloader workers') parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, metavar='LR', help='initial learning rate') parser.add_argument('--wdecay', default=1e-4, type=float, help='weight decay') parser.add_argument('--momentum', default=0.9, type=float, help='momentum') parser.add_argument('--warmup-epochs', default=10, type=int, help='number of warmup epochs') # Checkpoints parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') # Miscs parser.add_argument('--manualSeed', type=int, default=0, help='manual seed') #Method options parser.add_argument('--lbl-percent', type=int, default=10, help='Percentage of labeled data') parser.add_argument('--novel-percent', default=50, type=int, help='Percentage of novel classes, default 50') parser.add_argument('--train-iteration', type=int, default=1024, help='Number of iteration per epoch') parser.add_argument('--out', default='outputs', help='Directory to output the result') parser.add_argument('--alpha', default=0.75, type=float) parser.add_argument('--ema-decay', default=0.999, type=float) parser.add_argument('--dataset', default='cifar10', type=str, choices=['cifar10', 'cifar100', 'tinyimagenet', 'oxfordpets', 'aircraft', 'stanfordcars', 'imagenet100'], help='dataset name') parser.add_argument('--data-root', default=f'data', help='directory to store data') parser.add_argument('--arch', default='resnet18', type=str, choices=['resnet18', 'resnet50'], help='model architecure') parser.add_argument("--num_iters_sk", default=3, type=int, help="number of iters for Sinkhorn") parser.add_argument("--epsilon_sk", default=0.05, type=float, help="epsilon for the Sinkhorn") parser.add_argument("--temperature", default=0.1, type=float, help="softmax temperature") parser.add_argument("--imagenet-classes", default=100, type=int, help="number of ImageNet classes") parser.add_argument('--description', default="default_run", type=str, help='description of the experiment') parser.add_argument('--no-progress', action='store_true', help="don't use progress bar") parser.add_argument("--uncr-freq", default=1, type=int, help="frequency of generating uncertainty scores") parser.add_argument("--threshold", default=0.5, type=float, help="threshold for hard pseudo-labeling") parser.add_argument("--imb-factor", default=1, type=float, help="imbalance factor of the data, default 1") args = parser.parse_args() state = {k: v for k, v in args._get_kwargs()} # Use CUDA os.environ['CUDA_VISIBLE_DEVICES'] use_cuda = torch.cuda.is_available() args.data_root = os.path.join(args.data_root, args.dataset) os.makedirs(args.data_root, exist_ok=True) # Random seed if args.manualSeed is None: args.manualSeed = random.randint(1, 10000) np.random.seed(args.manualSeed) best_acc = 0 # best test accuracy if args.dataset == "cifar10": args.no_class = 10 elif args.dataset == "cifar100": args.no_class = 100 elif args.dataset == "tinyimagenet": args.no_class = 200 elif args.dataset == "stanfordcars": args.no_class = 196 elif args.dataset == "aircraft": args.no_class = 100 elif args.dataset == "oxfordpets": args.no_class = 37 elif args.dataset == "imagenet100": args.no_class = 100 def main(): global best_acc run_started = datetime.today().strftime('%d-%m-%y_%H%M%S') args.exp_name = f'dataset_{args.dataset}_arch_{args.arch}_lbl_percent_{args.lbl_percent}_novel_percent_{args.novel_percent}_{args.description}_{run_started}' args.out = os.path.join(args.out, args.exp_name) os.makedirs(args.out, exist_ok=True) with open(f'{args.out}/parameters.txt', 'a+') as ofile: ofile.write(' | '.join(f'{k}={v}' for k, v in vars(args).items())) # load dataset args.no_seen = args.no_class - int((args.novel_percent*args.no_class)/100) dataset_class = get_dataset_class(args) train_labeled_dataset, train_unlabeled_dataset, uncr_dataset, test_dataset_all, test_dataset_seen, test_dataset_novel = dataset_class.get_dataset() # create dataloaders labeled_trainloader = data.DataLoader(train_labeled_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, drop_last=True) unlabeled_trainloader = data.DataLoader(train_unlabeled_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, drop_last=True) uncr_loader = data.DataLoader(uncr_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers) test_loader_all = data.DataLoader(test_dataset_all, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers) test_loader_seen = data.DataLoader(test_dataset_seen, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers) test_loader_novel = data.DataLoader(test_dataset_novel, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers) # build models model = build_model(args) ema_model = build_model(args, ema=True) # Sinkorn-Knopp sinkhorn = SinkhornKnopp(args) cudnn.benchmark = True print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0)) optimizer = torch.optim.SGD(model.parameters(),lr=args.lr, momentum=args.momentum, weight_decay=args.wdecay) ema_optimizer= WeightEMA(model, ema_model, alpha=args.ema_decay) start_epoch = 0 # Resume title = f'ood-{args.dataset}' if args.resume: # Load checkpoint. print('==> Resuming from checkpoint..') assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!' args.out = os.path.dirname(args.resume) checkpoint = torch.load(args.resume) best_acc = checkpoint['best_acc'] start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) ema_model.load_state_dict(checkpoint['ema_state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) logger = Logger(os.path.join(args.out, 'log.txt'), title=title, resume=True) else: logger = Logger(os.path.join(args.out, 'log.txt'), title=title) logger.set_names(['-Train Loss-', '-Test Acc. Seen-', '-Test Acc. Novel-', '-Test NMI Novel-', '-Test Acc. All-', '-Test NMI All-']) writer = SummaryWriter(args.out) test_accs = [] # Train and val for epoch in range(start_epoch, args.epochs): print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr'])) train_loss = train(args, labeled_trainloader, unlabeled_trainloader, model, optimizer, ema_optimizer, sinkhorn, epoch, use_cuda) all_cluster_results = test_cluster(args, test_loader_all, ema_model, epoch) novel_cluster_results = test_cluster(args, test_loader_novel, ema_model, epoch, offset=args.no_seen) test_acc_seen = test_seen(args, test_loader_seen, ema_model, epoch) if args.uncr_freq > 0: if (epoch+1)%args.uncr_freq == 0: temp_uncr = uncr_generator(args, uncr_loader, ema_model) train_labeled_dataset, train_unlabeled_dataset = dataset_class.get_dataset(temp_uncr=temp_uncr) labeled_trainloader = data.DataLoader(train_labeled_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, drop_last=True) unlabeled_trainloader = data.DataLoader(train_unlabeled_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, drop_last=True) test_acc = all_cluster_results["acc"] is_best = test_acc > best_acc best_acc = max(test_acc, best_acc) print(f'epoch: {epoch}, acc-seen: {test_acc_seen}') print(f'epoch: {epoch}, acc-novel: {novel_cluster_results["acc"]}, nmi-novel: {novel_cluster_results["nmi"]}') print(f'epoch: {epoch}, acc-all: {all_cluster_results["acc"]}, nmi-all: {all_cluster_results["nmi"]}, best-acc: {best_acc}') writer.add_scalar('train/1.train_loss', train_loss, epoch) writer.add_scalar('test/1.acc_seen', test_acc_seen, epoch) writer.add_scalar('test/2.acc_novel', novel_cluster_results['acc'], epoch) writer.add_scalar('test/3.nmi_novel', novel_cluster_results['nmi'], epoch) writer.add_scalar('test/4.acc_all', all_cluster_results['acc'], epoch) writer.add_scalar('test/5.nmi_all', all_cluster_results['nmi'], epoch) # append logger file logger.append([train_loss, test_acc_seen, novel_cluster_results['acc'], novel_cluster_results['nmi'], all_cluster_results['acc'], all_cluster_results['nmi']]) # save model model_to_save = model.module if hasattr(model, "module") else model ema_model_to_save = ema_model.module if hasattr(ema_model, "module") else ema_model save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model_to_save.state_dict(), 'ema_state_dict': ema_model_to_save.state_dict(), 'acc': test_acc, 'best_acc': best_acc, 'optimizer' : optimizer.state_dict(), }, is_best, args.out) test_accs.append(test_acc) logger.close() writer.close() print('Best acc:') print(best_acc) print('Mean acc:') print(np.mean(test_accs[-20:])) def train(args, labeled_trainloader, unlabeled_trainloader, model, optimizer, ema_optimizer, sinkhorn, epoch, use_cuda): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() end = time.time() bar = Bar('Training', max=args.train_iteration) labeled_train_iter = iter(labeled_trainloader) unlabeled_train_iter = iter(unlabeled_trainloader) model.train() for batch_idx in range(args.train_iteration): try: inputs_x, targets_x, _, temp_x = labeled_train_iter.next() except: labeled_train_iter = iter(labeled_trainloader) inputs_x, targets_x, _, temp_x = labeled_train_iter.next() try: (inputs_u, inputs_u2), _, _, temp_u = unlabeled_train_iter.next() except: unlabeled_train_iter = iter(unlabeled_trainloader) (inputs_u, inputs_u2), _, _, temp_u = unlabeled_train_iter.next() # measure data loading time data_time.update(time.time() - end) batch_size = inputs_x.size(0) # Transform label to one-hot targets_x = torch.zeros(batch_size, args.no_class).scatter_(1, targets_x.view(-1,1).long(), 1) if use_cuda: inputs_x, targets_x = inputs_x.cuda(), targets_x.cuda(non_blocking=True) inputs_u, inputs_u2 = inputs_u.cuda(), inputs_u2.cuda() temp_x, temp_u = temp_x.cuda(), temp_u.cuda() # normalize classifier weights with torch.no_grad(): if torch.cuda.device_count() > 1: w = model.module.fc.weight.data.clone() w = F.normalize(w, dim=1, p=2) model.module.fc.weight.copy_(w) else: w = model.fc.weight.data.clone() w = F.normalize(w, dim=1, p=2) model.fc.weight.copy_(w) with torch.no_grad(): # compute guessed labels of unlabel samples outputs_u = model(inputs_u) outputs_u2 = model(inputs_u2) # cross pseudo-labeling targets_u = sinkhorn(outputs_u2) targets_u2 = sinkhorn(outputs_u) # generate hard pseudo-labels for confident novel class samples targets_u_novel = targets_u[:, args.no_seen:] max_pred_novel, _ = torch.max(targets_u_novel, dim=-1) hard_novel_idx1 = torch.where(max_pred_novel>=args.threshold)[0] targets_u2_novel = targets_u2[:,args.no_seen:] max_pred2_novel, _ = torch.max(targets_u2_novel, dim=-1) hard_novel_idx2 = torch.where(max_pred2_novel>=args.threshold)[0] targets_u[hard_novel_idx1] = targets_u[hard_novel_idx1].ge(args.threshold).float() targets_u2[hard_novel_idx2] = targets_u2[hard_novel_idx2].ge(args.threshold).float() # mixup all_inputs = torch.cat([inputs_x, inputs_u, inputs_u2], dim=0) all_targets = torch.cat([targets_x, targets_u, targets_u2], dim=0) all_temp = torch.cat([temp_x, temp_u, temp_u], dim=0) l = np.random.beta(args.alpha, args.alpha) idx = torch.randperm(all_inputs.size(0)) input_a, input_b = all_inputs, all_inputs[idx] target_a, target_b = all_targets, all_targets[idx] temp_a, temp_b = all_temp, all_temp[idx] mixed_input = l * input_a + (1 - l) * input_b mixed_target = l * target_a + (1 - l) * target_b mixed_temp = l * temp_a + (1 - l) * temp_b # interleave labeled and unlabed samples between batches to get correct batchnorm calculation mixed_input = list(torch.split(mixed_input, batch_size)) mixed_input = interleave(mixed_input, batch_size) logits = [model(mixed_input[0])] for input in mixed_input[1:]: logits.append(model(input)) # put interleaved samples back logits = interleave(logits, batch_size) logits_x = logits[0] logits_u = torch.cat(logits[1:], dim=0) logits = torch.cat((logits_x, logits_u), 0) #cross_entropy loss preds = F.log_softmax(logits / mixed_temp.unsqueeze(1), dim=1) loss = -torch.mean(torch.sum(mixed_target * preds, dim=1)) # record loss losses.update(loss.item(), inputs_x.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() ema_optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f}'.format( batch=batch_idx + 1, size=args.train_iteration, data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, ) bar.next() bar.finish() return losses.avg def test_seen(args, test_loader, model, epoch): batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() end = time.time() model.eval() if not args.no_progress: test_loader = tqdm(test_loader) with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(test_loader): inputs = inputs.cuda() targets = targets.cuda() outputs = model(inputs) loss = F.cross_entropy(outputs, targets) prec1, prec5 = accuracy(outputs, targets, topk=(1, 5)) losses.update(loss.item(), inputs.shape[0]) top1.update(prec1.item(), inputs.shape[0]) top5.update(prec5.item(), inputs.shape[0]) batch_time.update(time.time() - end) end = time.time() if not args.no_progress: test_loader.set_description("test epoch: {epoch}/{epochs:4}. itr: {batch:4}/{iter:4}. btime: {bt:.3f}s. loss: {loss:.4f}. top1: {top1:.2f}. top5: {top5:.2f}. ".format( epoch=epoch + 1, epochs=args.epochs, batch=batch_idx + 1, iter=len(test_loader), bt=batch_time.avg, loss=losses.avg, top1=top1.avg, top5=top5.avg, )) if not args.no_progress: test_loader.close() return top1.avg def test_cluster(args, test_loader, model, epoch, offset=0): batch_time = AverageMeter() data_time = AverageMeter() end = time.time() gt_targets =[] predictions = [] model.eval() if not args.no_progress: test_loader = tqdm(test_loader) with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(test_loader): data_time.update(time.time() - end) inputs = inputs.cuda() targets = targets.cuda() outputs = model(inputs) _, max_idx = torch.max(outputs, dim=1) predictions.extend(max_idx.cpu().numpy().tolist()) gt_targets.extend(targets.cpu().numpy().tolist()) batch_time.update(time.time() - end) end = time.time() if not args.no_progress: test_loader.set_description("test epoch: {epoch}/{epochs:4}. itr: {batch:4}/{iter:4}. btime: {bt:.3f}s.".format( epoch=epoch + 1, epochs=args.epochs, batch=batch_idx + 1, iter=len(test_loader), bt=batch_time.avg, )) if not args.no_progress: test_loader.close() predictions = np.array(predictions) gt_targets = np.array(gt_targets) predictions = torch.from_numpy(predictions) gt_targets = torch.from_numpy(gt_targets) eval_output = hungarian_evaluate(predictions, gt_targets, offset) return eval_output class WeightEMA(object): def __init__(self, model, ema_model, alpha=0.999): self.model = model self.ema_model = ema_model self.alpha = alpha self.params = list(model.state_dict().values()) self.ema_params = list(ema_model.state_dict().values()) self.wd = 2e-5 for param, ema_param in zip(self.params, self.ema_params): param.data.copy_(ema_param.data) def step(self): one_minus_alpha = 1.0 - self.alpha for param, ema_param in zip(self.params, self.ema_params): if ema_param.dtype==torch.float32: ema_param.mul_(self.alpha) ema_param.add_(param * one_minus_alpha) # customized weight decay param.mul_(1 - self.wd) if __name__ == '__main__': main()
19,121
40.934211
183
py
TRSSL
TRSSL-main/models/build_model.py
import torch def build_model(args, ema=False): if args.dataset in ['cifar10', 'cifar100']: from . import resnet_cifar as models elif args.dataset == 'tinyimagenet': from . import resnet_tinyimagenet as models else: from . import resnet as models if args.arch == 'resnet18': model = models.resnet18(no_class=args.no_class) if args.arch == 'resnet50': model = models.resnet50(no_class=args.no_class) # use dataparallel if torch.cuda.device_count() > 1: model = nn.DataParallel(model) model = model.cuda() if ema: for param in model.parameters(): param.detach_() return model
692
24.666667
55
py
TRSSL
TRSSL-main/models/resnet.py
import torch from torch import Tensor import torch.nn as nn # from .._internally_replaced_utils import load_state_dict_from_url from typing import Type, Any, Callable, Union, List, Optional import torch.nn.functional as F __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'wide_resnet50_2', 'wide_resnet101_2'] model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-f37072fd.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-b627a593.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-0676ba61.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-63fe2227.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-394f9c45.pth', 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth', 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth', } def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d: """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d: """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) class BasicBlock(nn.Module): expansion: int = 1 def __init__( self, inplanes: int, planes: int, stride: int = 1, downsample: Optional[nn.Module] = None, groups: int = 1, base_width: int = 64, dilation: int = 1, norm_layer: Optional[Callable[..., nn.Module]] = None ) -> None: super(BasicBlock, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: raise ValueError('BasicBlock only supports groups=1 and base_width=64') if dilation > 1: raise NotImplementedError("Dilation > 1 not supported in BasicBlock") # Both self.conv1 and self.downsample layers downsample the input when stride != 1 self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes) self.downsample = downsample self.stride = stride def forward(self, x: Tensor) -> Tensor: identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out class Bottleneck(nn.Module): # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) # while original implementation places the stride at the first 1x1 convolution(self.conv1) # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. # This variant is also known as ResNet V1.5 and improves accuracy according to # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. expansion: int = 4 def __init__( self, inplanes: int, planes: int, stride: int = 1, downsample: Optional[nn.Module] = None, groups: int = 1, base_width: int = 64, dilation: int = 1, norm_layer: Optional[Callable[..., nn.Module]] = None ) -> None: super(Bottleneck, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d width = int(planes * (base_width / 64.)) * groups # Both self.conv2 and self.downsample layers downsample the input when stride != 1 self.conv1 = conv1x1(inplanes, width) self.bn1 = norm_layer(width) self.conv2 = conv3x3(width, width, stride, groups, dilation) self.bn2 = norm_layer(width) self.conv3 = conv1x1(width, planes * self.expansion) self.bn3 = norm_layer(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x: Tensor) -> Tensor: identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out class ResNet(nn.Module): def __init__( self, block: Type[Union[BasicBlock, Bottleneck]], layers: List[int], no_class: int = 1000, zero_init_residual: bool = False, groups: int = 1, width_per_group: int = 64, replace_stride_with_dilation: Optional[List[bool]] = None, norm_layer: Optional[Callable[..., nn.Module]] = None ) -> None: super(ResNet, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer self.inplanes = 64 self.dilation = 1 if replace_stride_with_dilation is None: # each element in the tuple indicates if we should replace # the 2x2 stride with a dilated convolution instead replace_stride_with_dilation = [False, False, False] if len(replace_stride_with_dilation) != 3: raise ValueError("replace_stride_with_dilation should be None " "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) self.groups = groups self.base_width = width_per_group self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(self.inplanes) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]) self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]) self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(512 * block.expansion, no_class, bias=False) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) # Zero-initialize the last BN in each residual branch, # so that the residual branch starts with zeros, and each residual block behaves like an identity. # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type] elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type] def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int, stride: int = 1, dilate: bool = False) -> nn.Sequential: norm_layer = self._norm_layer downsample = None previous_dilation = self.dilation if dilate: self.dilation *= stride stride = 1 if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( conv1x1(self.inplanes, planes * block.expansion, stride), norm_layer(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer)) self.inplanes = planes * block.expansion for _ in range(1, blocks): layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer)) return nn.Sequential(*layers) def _forward_impl(self, x: Tensor) -> Tensor: # See note [TorchScript super()] x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = F.normalize(x) x = self.fc(x) return x def forward(self, x: Tensor) -> Tensor: return self._forward_impl(x) def _resnet( arch: str, block: Type[Union[BasicBlock, Bottleneck]], layers: List[int], pretrained: bool, progress: bool, **kwargs: Any ) -> ResNet: model = ResNet(block, layers, **kwargs) # if pretrained: # state_dict = load_state_dict_from_url(model_urls[arch], # progress=progress) # model.load_state_dict(state_dict) return model def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: r"""ResNet-18 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs) def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: r"""ResNet-34 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs) def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: r"""ResNet-50 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs) def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: r"""ResNet-101 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs) def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: r"""ResNet-152 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs) def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: r"""ResNeXt-50 32x4d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ kwargs['groups'] = 32 kwargs['width_per_group'] = 4 return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs) def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: r"""ResNeXt-101 32x8d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ kwargs['groups'] = 32 kwargs['width_per_group'] = 8 return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs) def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: r"""Wide ResNet-50-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_. The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ kwargs['width_per_group'] = 64 * 2 return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs) def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: r"""Wide ResNet-101-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_. The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ kwargs['width_per_group'] = 64 * 2 return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
15,539
38.846154
111
py
TRSSL
TRSSL-main/models/resnet_tinyimagenet.py
""" This code is based on the Torchvision repository, which was licensed under the BSD 3-Clause. """ import torch import torch.nn as nn import torch.nn.functional as F class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1, is_last=False): super(BasicBlock, self).__init__() self.is_last = is_last self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion * planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion * planes) ) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) out += self.shortcut(x) preact = out out = F.relu(out) if self.is_last: return out, preact else: return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, in_planes, planes, stride=1, is_last=False): super(Bottleneck, self).__init__() self.is_last = is_last self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(self.expansion * planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion * planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion * planes) ) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = F.relu(self.bn2(self.conv2(out))) out = self.bn3(self.conv3(out)) out += self.shortcut(x) preact = out out = F.relu(out) if self.is_last: return out, preact else: return out class ResNet(nn.Module): def __init__(self, block, num_blocks, no_class=10, in_channel=3, zero_init_residual=False): super(ResNet, self).__init__() self.in_planes = 64 self.conv1 = nn.Conv2d(in_channel, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(512*block.expansion, no_class, bias=False) # torch.nn.init.kaiming_normal_(self.classifier) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) # Zero-initialize the last BN in each residual branch, # so that the residual branch starts with zeros, and each residual block behaves # like an identity. This improves the model by 0.2~0.3% according to: # https://arxiv.org/abs/1706.02677 if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0) def _make_layer(self, block, planes, num_blocks, stride): strides = [stride] + [1] * (num_blocks - 1) layers = [] for i in range(num_blocks): stride = strides[i] layers.append(block(self.in_planes, planes, stride)) self.in_planes = planes * block.expansion return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = F.max_pool2d(out, kernel_size=3, stride=2, padding=1) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = self.avgpool(out) out = torch.flatten(out, 1) out = F.normalize(out) out = self.fc(out) return out def resnet18(**kwargs): return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) def resnet50(**kwargs): return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
5,141
37.088889
104
py
TRSSL
TRSSL-main/models/resnet_cifar.py
""" This code is based on the Torchvision repository, which was licensed under the BSD 3-Clause. """ import torch import torch.nn as nn import torch.nn.functional as F class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1, is_last=False): super(BasicBlock, self).__init__() self.is_last = is_last self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion * planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion * planes) ) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) out += self.shortcut(x) preact = out out = F.relu(out) if self.is_last: return out, preact else: return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, in_planes, planes, stride=1, is_last=False): super(Bottleneck, self).__init__() self.is_last = is_last self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(self.expansion * planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion * planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion * planes) ) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = F.relu(self.bn2(self.conv2(out))) out = self.bn3(self.conv3(out)) out += self.shortcut(x) preact = out out = F.relu(out) if self.is_last: return out, preact else: return out class ResNet(nn.Module): def __init__(self, block, num_blocks, no_class=10, in_channel=3, zero_init_residual=False): super(ResNet, self).__init__() self.in_planes = 64 self.conv1 = nn.Conv2d(in_channel, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(512*block.expansion, no_class, bias=False) # torch.nn.init.kaiming_normal_(self.classifier) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) # Zero-initialize the last BN in each residual branch, # so that the residual branch starts with zeros, and each residual block behaves # like an identity. This improves the model by 0.2~0.3% according to: # https://arxiv.org/abs/1706.02677 if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0) def _make_layer(self, block, planes, num_blocks, stride): strides = [stride] + [1] * (num_blocks - 1) layers = [] for i in range(num_blocks): stride = strides[i] layers.append(block(self.in_planes, planes, stride)) self.in_planes = planes * block.expansion return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = self.avgpool(out) out = torch.flatten(out, 1) out = F.normalize(out) out = self.fc(out) return out def resnet18(**kwargs): return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) def resnet50(**kwargs): return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
5,073
36.865672
104
py
TRSSL
TRSSL-main/datasets/datasets.py
import numpy as np from PIL import Image, ImageFilter, ImageOps import random from torchvision import datasets, transforms import torch import pickle import os import math # normalization parameters cifar10_mean, cifar10_std = (0.4914, 0.4822, 0.4465), (0.2471, 0.2435, 0.2616) cifar100_mean, cifar100_std = (0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761) tinyimagenet_mean, tinyimagenet_std = (0.4802, 0.4481, 0.3975), (0.2302, 0.2265, 0.2262) imgnet_mean, imgnet_std = (0.485, 0.456, 0.406), (0.229, 0.224, 0.225) def get_dataset_class(args): if args.dataset == 'cifar10': return cifar10_dataset(args) elif args.dataset == 'cifar100': return cifar100_dataset(args) elif args.dataset == 'tinyimagenet': return tinyimagenet_dataset(args) elif args.dataset in ['aircraft', 'stanfordcars', 'oxfordpets']: return generic224_dataset(args) elif args.dataset == 'imagenet100': return imagenet100_dataset(args) def x_u_split_seen_novel(labels, lbl_percent, num_classes, lbl_set, unlbl_set, imb_factor): labels = np.array(labels) labeled_idx = [] unlabeled_idx = [] for i in range(num_classes): idx = np.where(labels == i)[0] np.random.shuffle(idx) img_max = len(idx) num = img_max * ((1/imb_factor)**(i / (num_classes - 1.0))) idx = idx[:int(num)] n_lbl_sample = math.ceil(len(idx)*(lbl_percent/100)) if i in lbl_set: labeled_idx.extend(idx[:n_lbl_sample]) unlabeled_idx.extend(idx[n_lbl_sample:]) elif i in unlbl_set: unlabeled_idx.extend(idx) return labeled_idx, unlabeled_idx class cifar10_dataset(): def __init__(self, args): # augmentations self.transform_train = transforms.Compose([ transforms.RandomChoice([ transforms.RandomCrop(32, padding=4), transforms.RandomResizedCrop(32, (0.5, 1.0)), ]), transforms.RandomHorizontalFlip(), transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.6), Solarize(p=0.1), Equalize(p=0.1), transforms.ToTensor(), transforms.Normalize(cifar10_mean, cifar10_std), ]) self.transform_val = transforms.Compose([ transforms.CenterCrop(32), transforms.ToTensor(), transforms.Normalize(mean=cifar10_mean, std=cifar10_std) ]) base_dataset = datasets.CIFAR10(args.data_root, train=True, download=True) train_labeled_idxs, train_unlabeled_idxs = x_u_split_seen_novel(base_dataset.targets, args.lbl_percent, args.no_class, list(range(0,args.no_seen)), list(range(args.no_seen, args.no_class)), args.imb_factor) self.train_labeled_idxs = train_labeled_idxs self.train_unlabeled_idxs = train_unlabeled_idxs self.temperature = args.temperature self.data_root = args.data_root self.no_seen = args.no_seen self.no_class = args.no_class def get_dataset(self, temp_uncr=None): train_labeled_idxs = self.train_labeled_idxs.copy() train_unlabeled_idxs = self.train_unlabeled_idxs.copy() train_labeled_dataset = CIFAR10SSL(self.data_root, train_labeled_idxs, train=True, transform=self.transform_train, temperature=self.temperature) train_unlabeled_dataset = CIFAR10SSL(self.data_root, train_unlabeled_idxs, train=True, transform=TransformTwice(self.transform_train), temperature=self.temperature, temp_uncr=temp_uncr) if temp_uncr is not None: return train_labeled_dataset, train_unlabeled_dataset train_uncr_dataset = CIFAR10SSL_UNCR(self.data_root, train_unlabeled_idxs, train=True, transform=self.transform_train) test_dataset_seen = CIFAR10SSL_TEST(self.data_root, train=False, transform=self.transform_val, download=False, labeled_set=list(range(0,self.no_seen))) test_dataset_novel = CIFAR10SSL_TEST(self.data_root, train=False, transform=self.transform_val, download=False, labeled_set=list(range(self.no_seen, self.no_class))) test_dataset_all = CIFAR10SSL_TEST(self.data_root, train=False, transform=self.transform_val, download=False) return train_labeled_dataset, train_unlabeled_dataset, train_uncr_dataset, test_dataset_all, test_dataset_seen, test_dataset_novel class cifar100_dataset(): def __init__(self, args): # augmentations self.transform_train = transforms.Compose([ transforms.RandomChoice([ transforms.RandomCrop(32, padding=4), transforms.RandomResizedCrop(32, (0.5, 1.0)), ]), transforms.RandomHorizontalFlip(), transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.6), Solarize(p=0.1), Equalize(p=0.1), transforms.ToTensor(), transforms.Normalize(cifar100_mean, cifar100_std), ]) self.transform_val = transforms.Compose([ transforms.CenterCrop(32), transforms.ToTensor(), transforms.Normalize(mean=cifar100_mean, std=cifar100_std) ]) base_dataset = datasets.CIFAR100(args.data_root, train=True, download=True) train_labeled_idxs, train_unlabeled_idxs = x_u_split_seen_novel(base_dataset.targets, args.lbl_percent, args.no_class, list(range(0,args.no_seen)), list(range(args.no_seen, args.no_class)), args.imb_factor) self.train_labeled_idxs = train_labeled_idxs self.train_unlabeled_idxs = train_unlabeled_idxs self.temperature = args.temperature self.data_root = args.data_root self.no_seen = args.no_seen self.no_class = args.no_class def get_dataset(self, temp_uncr=None): train_labeled_idxs = self.train_labeled_idxs.copy() train_unlabeled_idxs = self.train_unlabeled_idxs.copy() train_labeled_dataset = CIFAR100SSL(self.data_root, train_labeled_idxs, train=True, transform=self.transform_train, temperature=self.temperature) train_unlabeled_dataset = CIFAR100SSL(self.data_root, train_unlabeled_idxs, train=True, transform=TransformTwice(self.transform_train), temperature=self.temperature, temp_uncr=temp_uncr) if temp_uncr is not None: return train_labeled_dataset, train_unlabeled_dataset train_uncr_dataset = CIFAR100SSL_UNCR(self.data_root, train_unlabeled_idxs, train=True, transform=self.transform_train) test_dataset_seen = CIFAR100SSL_TEST(self.data_root, train=False, transform=self.transform_val, download=False, labeled_set=list(range(0,self.no_seen))) test_dataset_novel = CIFAR100SSL_TEST(self.data_root, train=False, transform=self.transform_val, download=False, labeled_set=list(range(self.no_seen, self.no_class))) test_dataset_all = CIFAR100SSL_TEST(self.data_root, train=False, transform=self.transform_val, download=False) return train_labeled_dataset, train_unlabeled_dataset, train_uncr_dataset, test_dataset_all, test_dataset_seen, test_dataset_novel class tinyimagenet_dataset(): def __init__(self, args): # augmentations self.transform_train = transforms.Compose([ transforms.RandomChoice([ transforms.RandomCrop(64, padding=8), transforms.RandomResizedCrop(64, (0.5, 1.0)), ]), transforms.RandomHorizontalFlip(), transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.5), transforms.RandomGrayscale(p=0.2), transforms.RandomApply([GaussianBlur([0.1, 2.0])], p=0.2), transforms.ToTensor(), transforms.Normalize(tinyimagenet_mean, tinyimagenet_std), ]) self.transform_val = transforms.Compose([ transforms.CenterCrop(64), transforms.ToTensor(), transforms.Normalize(mean=tinyimagenet_mean, std=tinyimagenet_std) ]) base_dataset = datasets.ImageFolder(os.path.join(args.data_root, 'train')) base_dataset_targets = np.array(base_dataset.imgs) base_dataset_targets = base_dataset_targets[:,1] base_dataset_targets= list(map(int, base_dataset_targets.tolist())) train_labeled_idxs, train_unlabeled_idxs = x_u_split_seen_novel(base_dataset_targets, args.lbl_percent, args.no_class, list(range(0,args.no_seen)), list(range(args.no_seen, args.no_class)), args.imb_factor) self.train_labeled_idxs = train_labeled_idxs self.train_unlabeled_idxs = train_unlabeled_idxs self.temperature = args.temperature self.data_root = args.data_root self.no_seen = args.no_seen self.no_class = args.no_class def get_dataset(self, temp_uncr=None): train_labeled_idxs = self.train_labeled_idxs.copy() train_unlabeled_idxs = self.train_unlabeled_idxs.copy() train_labeled_dataset = GenericSSL(os.path.join(args.data_root, 'train'), train_labeled_idxs, transform=self.transform_train, temperature=self.temperature) train_unlabeled_dataset = GenericSSL(os.path.join(args.data_root, 'train'), train_unlabeled_idxs, transform=TransformTwice(self.transform_train), temperature=self.temperature, temp_uncr=temp_uncr) if temp_uncr is not None: return train_labeled_dataset, train_unlabeled_dataset train_uncr_dataset = GenericUNCR(os.path.join(args.data_root, 'train'), train_unlabeled_idxs, transform=self.transform_train) test_dataset_seen = GenericTEST(os.path.join(args.data_root, 'test'), no_class=args.no_class, transform=self.transform_val, labeled_set=list(range(0,args.no_seen))) test_dataset_novel = GenericTEST(os.path.join(args.data_root, 'test'), no_class=args.no_class, transform=self.transform_val, labeled_set=list(range(args.no_seen, args.no_class))) test_dataset_all = GenericTEST(os.path.join(args.data_root, 'test'), no_class=args.no_class, transform=self.transform_val) return train_labeled_dataset, train_unlabeled_dataset, train_uncr_dataset, test_dataset_all, test_dataset_seen, test_dataset_novel class generic224_dataset(): def __init__(self, args): # augmentations self.transform_train = transforms.Compose([ transforms.RandomResizedCrop(224, (0.5, 1.0)), transforms.RandomHorizontalFlip(), transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.5), transforms.RandomGrayscale(p=0.2), transforms.RandomApply([GaussianBlur([0.1, 2.0])], p=0.2), transforms.ToTensor(), transforms.Normalize(imgnet_mean, imgnet_std), ]) self.transform_val = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=imgnet_mean, std=imgnet_std) ]) base_dataset = datasets.ImageFolder(os.path.join(args.data_root, 'train')) base_dataset_targets = np.array(base_dataset.imgs) base_dataset_targets = base_dataset_targets[:,1] base_dataset_targets= list(map(int, base_dataset_targets.tolist())) train_labeled_idxs, train_unlabeled_idxs = x_u_split_seen_novel(base_dataset_targets, args.lbl_percent, args.no_class, list(range(0,args.no_seen)), list(range(args.no_seen, args.no_class)), args.imb_factor) self.train_labeled_idxs = train_labeled_idxs self.train_unlabeled_idxs = train_unlabeled_idxs self.temperature = args.temperature self.data_root = args.data_root self.no_seen = args.no_seen self.no_class = args.no_class def get_dataset(self, temp_uncr=None): train_labeled_idxs = self.train_labeled_idxs.copy() train_unlabeled_idxs = self.train_unlabeled_idxs.copy() train_labeled_dataset = GenericSSL(os.path.join(args.data_root, 'train'), train_labeled_idxs, transform=self.transform_train, temperature=self.temperature) train_unlabeled_dataset = GenericSSL(os.path.join(args.data_root, 'train'), train_unlabeled_idxs, transform=TransformTwice(self.transform_train), temperature=self.temperature, temp_uncr=temp_uncr) if temp_uncr is not None: return train_labeled_dataset, train_unlabeled_dataset train_uncr_dataset = GenericUNCR(os.path.join(args.data_root, 'train'), train_unlabeled_idxs, transform=self.transform_train) test_dataset_seen = GenericTEST(os.path.join(args.data_root, 'test'), no_class=args.no_class, transform=self.transform_val, labeled_set=list(range(0,args.no_seen))) test_dataset_novel = GenericTEST(os.path.join(args.data_root, 'test'), no_class=args.no_class, transform=self.transform_val, labeled_set=list(range(args.no_seen, args.no_class))) test_dataset_all = GenericTEST(os.path.join(args.data_root, 'test'), no_class=args.no_class, transform=self.transform_val) return train_labeled_dataset, train_unlabeled_dataset, train_uncr_dataset, test_dataset_all, test_dataset_seen, test_dataset_novel class imagenet100_dataset(): def __init__(self, args): # augmentations self.transform_train = transforms.Compose([ transforms.RandomResizedCrop(224, (0.2, 1.0)), #stronger augmnetation transforms.RandomHorizontalFlip(), transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.5), transforms.RandomGrayscale(p=0.2), transforms.RandomApply([GaussianBlur([0.1, 2.0])], p=0.2), transforms.ToTensor(), transforms.Normalize(imgnet_mean, imgnet_std), ]) self.transform_val = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=imgnet_mean, std=imgnet_std) ]) base_dataset = datasets.ImageFolder(os.path.join(args.data_root, 'train')) base_dataset_targets = np.array(base_dataset.imgs) base_dataset_targets = base_dataset_targets[:,1] base_dataset_targets= list(map(int, base_dataset_targets.tolist())) train_labeled_idxs, train_unlabeled_idxs = x_u_split_seen_novel(base_dataset_targets, args.lbl_percent, args.no_class, list(range(0,args.no_seen)), list(range(args.no_seen, args.no_class)), args.imb_factor) self.train_labeled_idxs = train_labeled_idxs self.train_unlabeled_idxs = train_unlabeled_idxs self.temperature = args.temperature self.data_root = args.data_root self.no_seen = args.no_seen self.no_class = args.no_class def get_dataset(self, temp_uncr=None): train_labeled_idxs = self.train_labeled_idxs.copy() train_unlabeled_idxs = self.train_unlabeled_idxs.copy() train_labeled_dataset = GenericSSL(os.path.join(args.data_root, 'train'), train_labeled_idxs, transform=self.transform_train, temperature=self.temperature) train_unlabeled_dataset = GenericSSL(os.path.join(args.data_root, 'train'), train_unlabeled_idxs, transform=TransformTwice(self.transform_train), temperature=self.temperature, temp_uncr=temp_uncr) if temp_uncr is not None: return train_labeled_dataset, train_unlabeled_dataset train_uncr_dataset = GenericUNCR(os.path.join(args.data_root, 'train'), train_unlabeled_idxs, transform=self.transform_train) test_dataset_seen = GenericTEST(os.path.join(args.data_root, 'test'), no_class=args.no_class, transform=self.transform_val, labeled_set=list(range(0,args.no_seen))) test_dataset_novel = GenericTEST(os.path.join(args.data_root, 'test'), no_class=args.no_class, transform=self.transform_val, labeled_set=list(range(args.no_seen, args.no_class))) test_dataset_all = GenericTEST(os.path.join(args.data_root, 'test'), no_class=args.no_class, transform=self.transform_val) return train_labeled_dataset, train_unlabeled_dataset, train_uncr_dataset, test_dataset_all, test_dataset_seen, test_dataset_novel class TransformTwice: def __init__(self, transform): self.transform = transform def __call__(self, inp): out1 = self.transform(inp) out2 = self.transform(inp) return out1, out2 class CIFAR10SSL(datasets.CIFAR10): def __init__(self, root, indexs, temperature=None, temp_uncr=None, train=True, transform=None, target_transform=None, download=True): super().__init__(root, train=train, transform=transform, target_transform=target_transform, download=download) self.targets = np.array(self.targets) if temperature is not None: self.temp = temperature*np.ones(len(self.targets)) else: self.temp = np.ones(len(self.targets)) if temp_uncr is not None: self.temp[temp_uncr['index']] = temp_uncr['uncr'] if indexs is not None: indexs = np.array(indexs) self.data = self.data[indexs] self.targets = np.array(self.targets)[indexs] self.temp = self.temp[indexs] self.indexs = indexs else: self.indexs = np.arange(len(self.targets)) def __getitem__(self, index): img, target = self.data[index], self.targets[index] img = Image.fromarray(img) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target, self.indexs[index], self.temp[index] class CIFAR10SSL_TEST(datasets.CIFAR10): def __init__(self, root, train=False, transform=None, target_transform=None, download=True, labeled_set=None): super().__init__(root, train=train, transform=transform, target_transform=target_transform, download=download) self.targets = np.array(self.targets) indexs = [] if labeled_set is not None: for i in range(10): idx = np.where(self.targets == i)[0] if i in labeled_set: indexs.extend(idx) indexs = np.array(indexs) self.data = self.data[indexs] self.targets = np.array(self.targets)[indexs] def __getitem__(self, index): img, target = self.data[index], self.targets[index] img = Image.fromarray(img) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target class CIFAR10SSL_UNCR(datasets.CIFAR10): def __init__(self, root, indexs, train=True, transform=None, target_transform=None, download=True): super().__init__(root, train=train, transform=transform, target_transform=target_transform, download=download) self.targets = np.array(self.targets) if indexs is not None: indexs = np.array(indexs) self.data = self.data[indexs] self.targets = np.array(self.targets)[indexs] self.indexs = indexs else: self.indexs = np.arange(len(self.targets)) def __getitem__(self, index): img, target = self.data[index], self.targets[index] img = Image.fromarray(img) if self.transform is not None: img1 = self.transform(img) img2 = self.transform(img) img3 = self.transform(img) img4 = self.transform(img) img5 = self.transform(img) img6 = self.transform(img) img7 = self.transform(img) img8 = self.transform(img) img9 = self.transform(img) img10 = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img1, img2, img3, img4, img5, img6, img7, img8, img9, img10, target, self.indexs[index] class CIFAR100SSL(datasets.CIFAR100): def __init__(self, root, indexs, temperature=None, temp_uncr=None, train=True, transform=None, target_transform=None, download=False): super().__init__(root, train=train, transform=transform, target_transform=target_transform, download=download) self.targets = np.array(self.targets) if temperature is not None: self.temp = temperature*np.ones(len(self.targets)) else: self.temp = np.ones(len(self.targets)) if temp_uncr is not None: self.temp[temp_uncr['index']] = temp_uncr['uncr'] if indexs is not None: indexs = np.array(indexs) self.data = self.data[indexs] self.targets = np.array(self.targets)[indexs] self.temp = self.temp[indexs] self.indexs = indexs else: self.indexs = np.arange(len(self.targets)) def __getitem__(self, index): img, target = self.data[index], self.targets[index] img = Image.fromarray(img) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target, self.indexs[index], self.temp[index] class CIFAR100SSL_TEST(datasets.CIFAR100): def __init__(self, root, train=False, transform=None, target_transform=None, download=False, labeled_set=None): super().__init__(root, train=train, transform=transform, target_transform=target_transform, download=download) self.targets = np.array(self.targets) indexs = [] if labeled_set is not None: for i in range(100): idx = np.where(self.targets == i)[0] if i in labeled_set: indexs.extend(idx) indexs = np.array(indexs) self.data = self.data[indexs] self.targets = np.array(self.targets)[indexs] def __getitem__(self, index): img, target = self.data[index], self.targets[index] img = Image.fromarray(img) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target class CIFAR100SSL_UNCR(datasets.CIFAR100): def __init__(self, root, indexs, train=True, transform=None, target_transform=None, download=True): super().__init__(root, train=train, transform=transform, target_transform=target_transform, download=download) self.targets = np.array(self.targets) if indexs is not None: indexs = np.array(indexs) self.data = self.data[indexs] self.targets = np.array(self.targets)[indexs] self.indexs = indexs else: self.indexs = np.arange(len(self.targets)) def __getitem__(self, index): img, target = self.data[index], self.targets[index] img = Image.fromarray(img) if self.transform is not None: img1 = self.transform(img) img2 = self.transform(img) img3 = self.transform(img) img4 = self.transform(img) img5 = self.transform(img) img6 = self.transform(img) img7 = self.transform(img) img8 = self.transform(img) img9 = self.transform(img) img10 = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img1, img2, img3, img4, img5, img6, img7, img8, img9, img10, target, self.indexs[index] class GenericSSL(datasets.ImageFolder): def __init__(self, root, indexs, temperature=None, temp_uncr=None, transform=None, target_transform=None): super().__init__(root, transform=transform, target_transform=target_transform) self.imgs = np.array(self.imgs) self.targets = self.imgs[:, 1] self.targets= list(map(int, self.targets.tolist())) self.data = np.array(self.imgs[:, 0]) self.targets = np.array(self.targets) if temperature is not None: self.temp = temperature*np.ones(len(self.targets)) else: self.temp = np.ones(len(self.targets)) if temp_uncr is not None: self.temp[temp_uncr['index']] = temp_uncr['uncr'] if indexs is not None: indexs = np.array(indexs) self.data = self.data[indexs] self.targets = np.array(self.targets)[indexs] self.temp = self.temp[indexs] self.indexs = indexs else: self.indexs = np.arange(len(self.targets)) def __len__(self): return len(self.data) def __getitem__(self, index): img, target = self.data[index], self.targets[index] img = self.loader(img) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target, self.indexs[index], self.temp[index] class GenericTEST(datasets.ImageFolder): def __init__(self, root, transform=None, target_transform=None, labeled_set=None, no_class=200): super().__init__(root, transform=transform, target_transform=target_transform) self.imgs = np.array(self.imgs) self.targets = self.imgs[:, 1] self.targets= list(map(int, self.targets.tolist())) self.data = np.array(self.imgs[:, 0]) self.targets = np.array(self.targets) indexs = [] if labeled_set is not None: for i in range(no_class): idx = np.where(self.targets == i)[0] if i in labeled_set: indexs.extend(idx) indexs = np.array(indexs) self.data = self.data[indexs] self.targets = np.array(self.targets)[indexs] def __len__(self): return len(self.data) def __getitem__(self, index): img, target = self.data[index], self.targets[index] img = self.loader(img) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target class GenericUNCR(datasets.ImageFolder): def __init__(self, root, indexs, transform=None, target_transform=None): super().__init__(root, transform=transform, target_transform=target_transform) self.imgs = np.array(self.imgs) self.targets = self.imgs[:, 1] self.targets= list(map(int, self.targets.tolist())) self.data = np.array(self.imgs[:, 0]) self.targets = np.array(self.targets) if indexs is not None: indexs = np.array(indexs) self.data = self.data[indexs] self.targets = np.array(self.targets)[indexs] self.indexs = indexs else: self.indexs = np.arange(len(self.targets)) def __len__(self): return len(self.data) def __getitem__(self, index): img, target = self.data[index], self.targets[index] img = self.loader(img) if self.transform is not None: img1 = self.transform(img) img2 = self.transform(img) img3 = self.transform(img) img4 = self.transform(img) img5 = self.transform(img) img6 = self.transform(img) img7 = self.transform(img) img8 = self.transform(img) img9 = self.transform(img) img10 = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img1, img2, img3, img4, img5, img6, img7, img8, img9, img10, target, self.indexs[index] class GaussianBlur(object): """Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709""" def __init__(self, sigma=[0.1, 2.0]): self.sigma = sigma def __call__(self, x): sigma = random.uniform(self.sigma[0], self.sigma[1]) x = x.filter(ImageFilter.GaussianBlur(radius=sigma)) return x class Solarize(object): def __init__(self, p=0.2): self.prob = p def __call__(self, img): if torch.bernoulli(torch.tensor(self.prob)) == 0: return img v = torch.rand(1) * 256 return ImageOps.solarize(img, v) class Equalize(object): def __init__(self, p=0.2): self.prob = p def __call__(self, img): if torch.bernoulli(torch.tensor(self.prob)) == 0: return img return ImageOps.equalize(img)
29,457
41.203438
214
py
TRSSL
TRSSL-main/utils/utils.py
import os import torch import numpy as np import random from progress.bar import Bar as Bar import torch.nn.functional as F import shutil import matplotlib.pyplot as plt def accuracy(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.reshape(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].reshape(-1).float().sum(0) res.append(correct_k.mul_(100.0 / batch_size)) return res class AverageMeter(object): """Computes and stores the average and current value Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262 """ def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def save_checkpoint(state, is_best, save_path): filename=f'checkpoint.pth.tar' filepath = os.path.join(save_path, filename) torch.save(state, filepath) if is_best: shutil.copyfile(filepath, os.path.join(save_path, f'model_best.pth.tar')) class WeightEMA(object): def __init__(self, args, model, ema_model): self.model = model self.ema_model = ema_model self.alpha = args.ema_decay self.params = list(model.state_dict().values()) self.ema_params = list(ema_model.state_dict().values()) # self.wd = 0.02 * args.lr for param, ema_param in zip(self.params, self.ema_params): param.data.copy_(ema_param.data) def step(self): one_minus_alpha = 1.0 - self.alpha for param, ema_param in zip(self.params, self.ema_params): if ema_param.dtype==torch.float32: ema_param.mul_(self.alpha) ema_param.add_(param * one_minus_alpha) # customized weight decay # param.mul_(1 - self.wd) def interleave_offsets(batch, nu): groups = [batch // (nu + 1)] * (nu + 1) for x in range(batch - sum(groups)): groups[-x - 1] += 1 offsets = [0] for g in groups: offsets.append(offsets[-1] + g) assert offsets[-1] == batch return offsets def interleave(xy, batch): nu = len(xy) - 1 offsets = interleave_offsets(batch, nu) xy = [[v[offsets[p]:offsets[p + 1]] for p in range(nu + 1)] for v in xy] for i in range(1, nu + 1): xy[0][i], xy[i][i] = xy[i][i], xy[0][i] return [torch.cat(v, dim=0) for v in xy] class Logger(object): '''Save training process to log file with simple plot function.''' def __init__(self, fpath, title=None, resume=False): self.file = None self.resume = resume self.title = '' if title == None else title if fpath is not None: if resume: self.file = open(fpath, 'r') name = self.file.readline() self.names = name.rstrip().split('\t') self.numbers = {} for _, name in enumerate(self.names): self.numbers[name] = [] for numbers in self.file: numbers = numbers.rstrip().split('\t') for i in range(0, len(numbers)): self.numbers[self.names[i]].append(numbers[i]) self.file.close() self.file = open(fpath, 'a') else: self.file = open(fpath, 'w') def set_names(self, names): if self.resume: pass # initialize numbers as empty list self.numbers = {} self.names = names for _, name in enumerate(self.names): self.file.write(name) self.file.write('\t') self.numbers[name] = [] self.file.write('\n') self.file.flush() def append(self, numbers): assert len(self.names) == len(numbers), 'Numbers do not match names' for index, num in enumerate(numbers): self.file.write("{0:.6f}".format(num)) self.file.write('\t') self.numbers[self.names[index]].append(num) self.file.write('\n') self.file.flush() def plot(self, names=None): names = self.names if names == None else names numbers = self.numbers for _, name in enumerate(names): x = np.arange(len(numbers[name])) plt.plot(x, np.asarray(numbers[name])) plt.legend([self.title + '(' + name + ')' for name in names]) plt.grid(True) def close(self): if self.file is not None: self.file.close()
4,886
30.127389
95
py
TRSSL
TRSSL-main/utils/evaluate_utils.py
import numpy as np import torch import torch.nn.functional as F from sklearn import metrics from scipy.optimize import linear_sum_assignment @torch.no_grad() def hungarian_evaluate(predictions, targets, offset=0): # Hungarian matching targets = targets - offset predictions = predictions - offset predictions_np = predictions.numpy() num_elems = targets.size(0) # only consider the valid predicts. rest are treated as misclassification valid_idx = np.where(predictions_np>=0)[0] predictions_sel = predictions[valid_idx] targets_sel = targets[valid_idx] num_classes = torch.unique(targets).numel() num_classes_pred = max(torch.unique(predictions_sel).numel(), num_classes) match = _hungarian_match(predictions_sel, targets_sel, preds_k=num_classes_pred, targets_k=num_classes) # match is data dependent reordered_preds = torch.zeros(predictions_sel.size(0), dtype=predictions_sel.dtype) for pred_i, target_i in match: reordered_preds[predictions_sel == int(pred_i)] = int(target_i) # Gather performance metrics reordered_preds = reordered_preds.numpy() acc = int((reordered_preds == targets_sel.numpy()).sum()) / float(num_elems) #accuracy is normalized with the total number of samples not only the valid ones nmi = metrics.normalized_mutual_info_score(targets.numpy(), predictions.numpy()) ari = metrics.adjusted_rand_score(targets.numpy(), predictions.numpy()) return {'acc': acc*100, 'ari': ari, 'nmi': nmi, 'hungarian_match': match} @torch.no_grad() def _hungarian_match(flat_preds, flat_targets, preds_k, targets_k): # Based on implementation from IIC num_samples = flat_targets.shape[0] num_k = preds_k num_correct = np.zeros((num_k, num_k)) for c1 in range(num_k): for c2 in range(num_k): # elementwise, so each sample contributes once votes = int(((flat_preds == c1) * (flat_targets == c2)).sum()) num_correct[c1, c2] = votes # num_correct is small match = linear_sum_assignment(num_samples - num_correct) match = np.array(list(zip(*match))) # return as list of tuples, out_c to gt_c res = [] for out_c, gt_c in match: res.append((out_c, gt_c)) return res
2,269
36.213115
161
py
TRSSL
TRSSL-main/utils/uncr_util.py
import random import time import pickle import numpy as np import torch import torch.nn.functional as F from tqdm import tqdm from .utils import AverageMeter def uncr_generator(args, data_loader, model): batch_time = AverageMeter() data_time = AverageMeter() end = time.time() pseudo_idx = [] pseudo_maxstd = [] model.eval() data_loader = tqdm(data_loader) with torch.no_grad(): for batch_idx, (inputs1, inputs2, inputs3, inputs4, inputs5, inputs6, inputs7, inputs8, inputs9, inputs10, targets, indexs) in enumerate(data_loader): data_time.update(time.time() - end) inputs1 = inputs1.cuda() inputs2 = inputs2.cuda() inputs3 = inputs3.cuda() inputs4 = inputs4.cuda() inputs5 = inputs5.cuda() inputs6 = inputs6.cuda() inputs7 = inputs7.cuda() inputs8 = inputs8.cuda() inputs9 = inputs9.cuda() inputs10 = inputs10.cuda() targets = targets.cuda() out_prob = [] outputs = model(inputs1) out_prob.append(F.softmax(outputs, dim=1)) outputs = model(inputs2) out_prob.append(F.softmax(outputs, dim=1)) outputs = model(inputs3) out_prob.append(F.softmax(outputs, dim=1)) outputs = model(inputs4) out_prob.append(F.softmax(outputs, dim=1)) outputs = model(inputs5) out_prob.append(F.softmax(outputs, dim=1)) outputs = model(inputs6) out_prob.append(F.softmax(outputs, dim=1)) outputs = model(inputs7) out_prob.append(F.softmax(outputs, dim=1)) outputs = model(inputs8) out_prob.append(F.softmax(outputs, dim=1)) outputs = model(inputs9) out_prob.append(F.softmax(outputs, dim=1)) outputs = model(inputs10) out_prob.append(F.softmax(outputs, dim=1)) # compute uncertainty scores out_prob = torch.stack(out_prob) out_std = torch.std(out_prob, dim=0) out_prob = torch.mean(out_prob, dim=0) _, max_idx = torch.max(out_prob, dim=1) max_std = out_std.gather(1, max_idx.view(-1,1)) pseudo_maxstd.extend(max_std.squeeze(1).cpu().numpy().tolist()) pseudo_idx.extend(indexs.numpy().tolist()) batch_time.update(time.time() - end) end = time.time() data_loader.set_description("UncrGen Iter: {batch:4}/{iter:4}. Data: {data:.3f}s. Batch: {bt:.3f}s.".format( batch=batch_idx + 1, iter=len(data_loader), data=data_time.avg, bt=batch_time.avg, )) data_loader.close() pseudo_maxstd = np.array(pseudo_maxstd) pseudo_idx = np.array(pseudo_idx) # normalizing the uncertainty values pseudo_maxstd = pseudo_maxstd/max(pseudo_maxstd) pseudo_maxstd = np.clip(pseudo_maxstd, args.temperature, 1.0) uncr_temp = {'index': pseudo_idx.tolist(), 'uncr':pseudo_maxstd.tolist()} return uncr_temp
3,161
31.265306
158
py
TRSSL
TRSSL-main/utils/sinkhorn_knopp.py
import torch import numpy as np def shoot_infs(inp_tensor): """Replaces inf by maximum of tensor""" mask_inf = torch.isinf(inp_tensor) ind_inf = torch.nonzero(mask_inf) if len(ind_inf) > 0: for ind in ind_inf: if len(ind) == 2: inp_tensor[ind[0], ind[1]] = 0 elif len(ind) == 1: inp_tensor[ind[0]] = 0 m = torch.max(inp_tensor) for ind in ind_inf: if len(ind) == 2: inp_tensor[ind[0], ind[1]] = m elif len(ind) == 1: inp_tensor[ind[0]] = m return inp_tensor class SinkhornKnopp(torch.nn.Module): def __init__(self, args): super().__init__() self.num_iters = args.num_iters_sk self.epsilon = args.epsilon_sk self.imb_factor = args.imb_factor @torch.no_grad() def iterate(self, Q): Q = shoot_infs(Q) sum_Q = torch.sum(Q) Q /= sum_Q c = torch.ones(Q.shape[1]).cuda(non_blocking=True) / Q.shape[1] # Samples if self.imb_factor > 1: # obtain permutation/order from the marginals marginals_argsort = torch.argsort(Q.sum(1)) marginals_argsort = marginals_argsort.detach() r = [] for i in range(Q.shape[0]): # Classes r.append((1/self.imb_factor)**(i / (Q.shape[0] - 1.0))) r = np.array(r) r = r * (Q.shape[1]/Q.shape[0]) # Per-class distribution in the mini-batch r = torch.from_numpy(r).cuda(non_blocking=True) r[marginals_argsort] = torch.sort(r)[0] # Sort/permute based on the data order r = torch.clamp(r, min=1) # Clamp the min to have a balance distribution for the tail classes r /= r.sum() # Scaling to make it prob else: r = torch.ones(Q.shape[0]).cuda(non_blocking=True) / Q.shape[0] for it in range(self.num_iters): u = torch.sum(Q, dim=1) u = r / u u = shoot_infs(u) Q *= u.unsqueeze(1) Q *= (c / torch.sum(Q, dim=0)).unsqueeze(0) return (Q / torch.sum(Q, dim=0, keepdim=True)).t().float() @torch.no_grad() def forward(self, logits): # get assignments q = logits / self.epsilon M = torch.max(q) q -= M q = torch.exp(q).t() return self.iterate(q)
2,410
32.957746
105
py
dropmax
dropmax-master/run.py
from __future__ import print_function import tensorflow as tf import numpy as np from tensorflow.examples.tutorials.mnist import input_data from lenet import base_softmax, dropmax from accumulator import Accumulator from mnist import mnist_input import time import os import argparse parser = argparse.ArgumentParser() parser.add_argument('--mnist_path', type=str, default='./mnist') parser.add_argument('--model', type=str, default='softmax') parser.add_argument('--N', type=int, default=100) parser.add_argument('--batch_size', type=int, default=100) parser.add_argument('--n_epochs', type=int, default=200) parser.add_argument('--save_freq', type=int, default=20) parser.add_argument('--savedir', type=str, default=None) parser.add_argument('--mode', type=str, default='train') parser.add_argument('--gpu_num', type=int, default=0) args = parser.parse_args() os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_num) savedir = './results/%s'%args.model if args.savedir is None else args.savedir if not os.path.isdir(savedir): os.makedirs(savedir) bs = args.batch_size N = args.N xtr, ytr, xva, yva, xte, yte = mnist_input(args.mnist_path, [N]*10) n_train_batches, n_val_batches, n_test_batches = int(N*10/bs), int(N*10/bs), int(10000/bs) x = tf.placeholder(tf.float32, [None, 784]) y = tf.placeholder(tf.float32, [None, 10]) if args.model == 'softmax': model = base_softmax elif args.model == 'dropmax': model = dropmax else: raise ValueError('Invalid model %s' % args.model) net = model(x, y, True) tnet = model(x, y, False, reuse=True) def train(): if args.model == 'softmax': loss = net['cent'] + net['wd'] else: loss = net['cent'] + net['wd'] + net['kl'] + net['aux'] + net['neg_ent'] global_step = tf.train.get_or_create_global_step() lr_step = int(n_train_batches*args.n_epochs/3) lr = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), [lr_step, lr_step*2], [1e-3, 1e-4, 1e-5]) train_op = tf.train.AdamOptimizer(lr).minimize(loss, global_step=global_step) saver = tf.train.Saver(net['weights']) logfile = open(os.path.join(savedir, 'train.log'), 'wb', 0) sess = tf.Session() sess.run(tf.global_variables_initializer()) train_logger = Accumulator('cent', 'acc') train_to_run = [train_op, net['cent'], net['acc']] val_logger = Accumulator('cent', 'acc') val_to_run = [tnet['cent'], tnet['acc']] for i in range(args.n_epochs): # shuffle the training data every epoch xytr = np.concatenate((xtr, ytr), axis=1) np.random.shuffle(xytr) xtr_, ytr_ = xytr[:,:784], xytr[:,784:] line = 'Epoch %d start, learning rate %f' % (i+1, sess.run(lr)) print(line) logfile.write((line + '\n').encode()) train_logger.clear() start = time.time() for j in range(n_train_batches): bx, by = xtr_[j*bs:(j+1)*bs,:], ytr_[j*bs:(j+1)*bs,:] train_logger.accum(sess.run(train_to_run, {x:bx, y:by})) train_logger.print_(header='train', epoch=i+1, time=time.time()-start, logfile=logfile) val_logger.clear() for j in range(n_val_batches): bx, by = xva[j*bs:(j+1)*bs,:], yva[j*bs:(j+1)*bs,:] val_logger.accum(sess.run(val_to_run, {x:bx, y:by})) val_logger.print_(header='val', epoch=i+1, time=time.time()-start, logfile=logfile) print() logfile.write(b'\n') logfile.close() saver.save(sess, os.path.join(savedir, 'model')) def test(): sess = tf.Session() saver = tf.train.Saver(tnet['weights']) saver.restore(sess, os.path.join(savedir, 'model')) logfile = open(os.path.join(savedir, 'test.log'), 'wb', 0) logger = Accumulator('cent', 'acc') logger.accum(sess.run([tnet['cent'], tnet['acc']], {x:xte, y:yte})) logger.print_(header='test', logfile=logfile) logfile.close() if __name__=='__main__': if args.mode == 'train': train() elif args.mode == 'test': test() else: raise ValueError('Invalid mode %s' % args.mode)
4,155
33.92437
90
py
dropmax
dropmax-master/layers.py
import tensorflow as tf import numpy as np exp = tf.exp log = lambda x: tf.log(x + 1e-20) logit = lambda x: log(x) - log(1-x) sigmoid = tf.nn.sigmoid softmax = tf.nn.softmax relu = tf.nn.relu tau = 0.1 eps = 1e-20 dense = tf.layers.dense flatten = tf.contrib.layers.flatten # network components def conv(x, filters, kernel_size=3, strides=1, **kwargs): return tf.layers.conv2d(x, filters, kernel_size, strides, data_format='channels_first', **kwargs) def pool(x, **kwargs): return tf.layers.max_pooling2d(x, 2, 2, data_format='channels_first', **kwargs) def global_avg_pool(x): return tf.reduce_mean(x, axis=[2, 3]) # training modules def cross_entropy(expo, y): denom = log(tf.reduce_sum(expo, axis=1)) numer = log(tf.reduce_sum(tf.multiply(expo, y), axis=1)) return -tf.reduce_mean(numer - denom) def accuracy(expo, y): correct = tf.equal(tf.argmax(expo, 1), tf.argmax(y, 1)) return tf.reduce_mean(tf.cast(correct, tf.float32)) def weight_decay(decay, var_list=None): var_list = tf.trainable_variables() if var_list is None else var_list return decay*tf.add_n([tf.nn.l2_loss(var) for var in var_list]) # dropmax modules def dist_unif(p): return tf.contrib.distributions.Uniform(tf.zeros_like(p), tf.ones_like(p)) def genmask(p, y): u = dist_unif(p).sample() z = sigmoid(1/tau * (logit(p) + logit(u))) return tf.where(tf.equal(y,1), tf.ones_like(z), z) def kl_divergence(p, q, y): target = -log(p) nontarget = q*(log(q)-log(p)) + (1-q)*(log(1-q)-log(1-p)) kl = tf.where(tf.equal(y,1), target, nontarget) return tf.reduce_sum(tf.reduce_mean(kl,0)) def auxloss(r, y): target = -log(r) nontarget = -log(1-r) aux = tf.where(tf.equal(y,1), target, nontarget) return tf.reduce_sum(tf.reduce_mean(aux,0)) def neg_entropy(p): neg_ent = p*log(p) + (1-p)*log(1-p) return tf.reduce_sum(tf.reduce_mean(neg_ent,0))
1,936
28.348485
78
py
dropmax
dropmax-master/accumulator.py
from __future__ import print_function class Accumulator(): def __init__(self, *args): self.args = args self.argdict = {} for i, arg in enumerate(args): self.argdict[arg] = i self.sums = [0]*len(args) self.cnt = 0 def accum(self, val): val = [val] if type(val) is not list else val val = [v for v in val if v is not None] assert(len(val) == len(self.args)) for i in range(len(val)): self.sums[i] += val[i] self.cnt += 1 def clear(self): self.sums = [0]*len(self.args) self.cnt = 0 def get(self, arg, avg=True): i = self.argdict.get(arg, -1) assert(i is not -1) return (self.sums[i]/self.cnt if avg else self.sums[i]) def print_(self, header=None, epoch=None, it=None, time=None, logfile=None, do_not_print=[], as_int=[], avg=True): line = '' if header is None else header + ': ' if epoch is not None: line += ('epoch %d, ' % epoch) if it is not None: line += ('iter %d, ' % it) if time is not None: line += ('(%.3f secs), ' % time) args = [arg for arg in self.args if arg not in do_not_print] for arg in args[:-1]: val = self.sums[self.argdict[arg]] if avg: val /= self.cnt if arg in as_int: line += ('%s %d, ' % (arg, int(val))) else: line += ('%s %f, ' % (arg, val)) val = self.sums[self.argdict[args[-1]]] if avg: val /= self.cnt if arg in as_int: line += ('%s %d, ' % (arg, int(val))) else: line += ('%s %f' % (args[-1], val)) print(line) if logfile is not None: logfile.write((line + '\n').encode())
1,876
29.770492
68
py
dropmax
dropmax-master/lenet.py
from layers import * def base_softmax(x, y, training, name='base_softmax', reuse=None): x = tf.reshape(x, [-1, 1, 28, 28]) x = conv(x, 20, 5, name=name+'/conv1', reuse=reuse) x = relu(x) x = pool(x, name=name+'/pool1') x = conv(x, 50, 5, name=name+'/conv2', reuse=reuse) x = relu(x) x = pool(x, name=name+'/pool2') x = flatten(x) h = dense(x, 500, activation=relu, name=name+'/dense', reuse=reuse) o = dense(h, 10, name=name+'/logits', reuse=reuse) net = {} net['cent'] = cross_entropy(exp(o), y) net['acc'] = accuracy(exp(o), y) all_vars = tf.get_collection('variables', scope=name) net['weights'] = all_vars net['wd'] = weight_decay(1e-4, var_list=net['weights']) return net def dropmax(x, y, training, name='dropmax', reuse=None): x = tf.reshape(x, [-1, 1, 28, 28]) x = conv(x, 20, 5, name=name+'/conv1', reuse=reuse) x = relu(x) x = pool(x, name=name+'/pool1') x = conv(x, 50, 5, name=name+'/conv2', reuse=reuse) x = relu(x) x = pool(x, name=name+'/pool2') x = flatten(x) h = dense(x, 500, activation=relu, name=name+'/dense', reuse=reuse) # dropmax branches o = dense(h, 10, name=name+'/logits', reuse=reuse) ph = dense(h, 10, name=name+'/ph_branch', reuse=reuse) rh = dense(h, 10, name=name+'/rh_branch', reuse=reuse) qh = tf.stop_gradient(ph) + rh p = sigmoid(ph) r = sigmoid(rh) q = sigmoid(qh) # sampling the dropout masks z = genmask(q, y) net = {} net['cent'] = cross_entropy(((z if training else p)+eps)*exp(o), y) net['acc'] = accuracy((p+eps)*exp(o), y) all_vars = tf.get_collection('variables', scope=name) net['weights'] = all_vars net['wd'] = weight_decay(1e-4, var_list=[v for v in all_vars if 'branch' not in v.name]) # dropmax modules net['kl'] = kl_divergence(p, q, y) net['aux'] = auxloss(r, y) net['neg_ent'] = neg_entropy(p) return net
1,968
31.816667
71
py
dropmax
dropmax-master/mnist.py
import numpy as np from tensorflow.examples.tutorials.mnist import input_data def mnist_input(path, nlist): mnist = input_data.read_data_sets(path, one_hot=True, validation_size=0) x, y = mnist.train.images, mnist.train.labels y_ = np.argmax(y, axis=1) xtr = [x[y_==k][:nlist[k],:] for k in range(10)] ytr = [y[y_==k][:nlist[k],:] for k in range(10)] xtr, ytr = np.concatenate(xtr, axis=0), np.concatenate(ytr, axis=0) xva = [x[y_==k][nlist[k]:2*nlist[k],:] for k in range(10)] yva = [y[y_==k][nlist[k]:2*nlist[k],:] for k in range(10)] xva, yva = np.concatenate(xva, axis=0), np.concatenate(yva, axis=0) xte, yte = mnist.test.images, mnist.test.labels return xtr, ytr, xva, yva, xte, yte
737
37.842105
76
py
BiRTE
BiRTE-main/main.py
from transformers import WEIGHTS_NAME,AdamW, get_linear_schedule_with_warmup from bert4keras.tokenizers import Tokenizer from model import BiRTE from util import * from tqdm import tqdm import random import os import torch.nn as nn import torch from transformers.modeling_bert import BertConfig import json def search(pattern, sequence): """从sequence中寻找子串pattern 如果找到,返回第一个下标;否则返回-1。 """ n = len(pattern) for i in range(len(sequence)): if sequence[i:i + n] == pattern: return i return -1 def judge(ex): '''判断样本是否正确''' for s,p,o in ex["triple_list"]: if s=='' or o=='' or s not in ex["text"] or o not in ex["text"]: return False return True class data_generator(DataGenerator): """数据生成器 """ def __init__(self, args, train_data, tokenizer, predicate2id, id2predicate): super(data_generator, self).__init__(train_data, args.batch_size) self.max_len=args.max_len self.tokenizer=tokenizer self.predicate2id=predicate2id self.id2predicate=id2predicate def __iter__(self, is_random=True): batch_token_ids, batch_mask = [], [] batch_s1_labels, batch_o1_labels,\ batch_s2_mask, batch_o2_mask, batch_s2_labels, batch_o2_labels,\ batch_s3_mask, batch_o3_mask, batch_r = [],[],[],[],[],[],[],[],[] for is_end, d in self.sample(is_random): if judge(d)==False: continue token_ids, _ ,mask = self.tokenizer.encode( d['text'], max_length=self.max_len ) # 整理三元组 {s: [(o, p)]} spoes_s = {} spoes_o = {} for s, p, o in d['triple_list']: s = self.tokenizer.encode(s)[0][1:-1] p = self.predicate2id[p] o = self.tokenizer.encode(o)[0][1:-1] s_idx = search(s, token_ids) o_idx = search(o, token_ids) if s_idx != -1 and o_idx != -1: s_loc = (s_idx, s_idx + len(s) - 1) o_loc = (o_idx, o_idx + len(o) - 1) if s_loc not in spoes_s: spoes_s[s_loc] = [] spoes_s[s_loc].append((o_loc,p)) if o_loc not in spoes_o: spoes_o[o_loc] = [] spoes_o[o_loc].append((s_loc,p)) if spoes_s and spoes_o: # s1_labels o1_labels def get_entity1_labels(item,l): res=np.zeros([l,2]) for start,end in item: res[start][0]=1 res[end][1]=1 return res s1_labels = get_entity1_labels(spoes_s, len(token_ids)) o1_labels = get_entity1_labels(spoes_o, len(token_ids)) # s2_labels,o2_labels,s2_mask,o2_mask def get_entity2_labels_mask(item,l): start, end = random.choice(list(item.keys())) #构造labels labels = np.zeros((l, 2)) if (start,end) in item: for loc,_ in item[(start,end)]: labels[loc[0], 0] = 1 labels[loc[1], 1] = 1 #构造mask mask=np.zeros(l) mask[start]=1 mask[end]=1 return labels,mask o2_labels,s2_mask=get_entity2_labels_mask(spoes_s,len(token_ids)) s2_labels,o2_mask=get_entity2_labels_mask(spoes_o,len(token_ids)) #s3_mask,o3_mask,r s_loc=random.choice(list(spoes_s.keys())) o_loc,_=random.choice(spoes_s[s_loc]) r=np.zeros(len(self.id2predicate)) if s_loc in spoes_s: for loc,the_r in spoes_s[s_loc]: if loc==o_loc: r[the_r]=1 s3_mask=np.zeros(len(token_ids)) o3_mask=np.zeros(len(token_ids)) s3_mask[s_loc[0]]=1 s3_mask[s_loc[1]]=1 o3_mask[o_loc[0]]=1 o3_mask[o_loc[1]]=1 # 构建batch batch_token_ids.append(token_ids) batch_mask.append(mask) batch_s1_labels.append(s1_labels) batch_o1_labels.append(o1_labels) batch_s2_mask.append(s2_mask) batch_o2_mask.append(o2_mask) batch_s2_labels.append(s2_labels) batch_o2_labels.append(o2_labels) batch_s3_mask.append(s3_mask) batch_o3_mask.append(o3_mask) batch_r.append(r) if len(batch_token_ids) == self.batch_size or is_end: #输出batch batch_token_ids,batch_mask,\ batch_s1_labels,batch_o1_labels,\ batch_s2_mask,batch_o2_mask,batch_s2_labels,batch_o2_labels,\ batch_s3_mask,batch_o3_mask=\ [sequence_padding(i).astype(np.int) for i in [batch_token_ids,batch_mask, batch_s1_labels,batch_o1_labels, batch_s2_mask,batch_o2_mask,batch_s2_labels,batch_o2_labels, batch_s3_mask,batch_o3_mask]] batch_r = np.array(batch_r).astype(np.int) yield [ batch_token_ids, batch_mask, batch_s1_labels, batch_o1_labels, batch_s2_mask, batch_o2_mask, batch_s2_labels, batch_o2_labels, batch_s3_mask, batch_o3_mask,batch_r ] batch_token_ids, batch_mask = [], [] batch_s1_labels, batch_o1_labels, \ batch_s2_mask, batch_o2_mask, batch_s2_labels, batch_o2_labels, \ batch_s3_mask, batch_o3_mask, batch_r = [], [], [], [], [], [], [], [], [] class CE(): def __call__(self,args,targets, pred, from_logist=False): ''' 计算二分类交叉熵 :param targets: [batch,seq,2] :param pred: [batch,seq,2] :param from_logist:是否没有经过softmax/sigmoid :return: loss.shape==targets.shape==pred.shape ''' if not from_logist: '''返回到没有经过softmax/sigmoid得张量''' # 截取pred,防止趋近于0或1,保持在[min_num,1-min_num] pred = torch.where(pred < 1 - args.min_num, pred, torch.ones(pred.shape).to("cuda") * 1 - args.min_num).to("cuda") pred = torch.where(pred > args.min_num, pred, torch.ones(pred.shape).to("cuda") * args.min_num).to("cuda") pred = torch.log(pred / (1 - pred)) relu = nn.ReLU() # 计算传统的交叉熵loss loss = relu(pred) - pred * targets + torch.log(1 + torch.exp(-1 * torch.abs(pred).to("cuda"))).to("cuda") return loss def train(args): output_path = os.path.join(args.base_path, args.dataset, "output", args.file_id) train_path=os.path.join(args.base_path,args.dataset,"train.json") dev_path=os.path.join(args.base_path,args.dataset,"dev.json") test_path=os.path.join(args.base_path,args.dataset,"test.json") rel2id_path=os.path.join(args.base_path,args.dataset,"rel2id.json") test_pred_path=os.path.join(output_path,"test_pred.json") dev_pred_path=os.path.join(output_path,"dev_pred.json") log_path=os.path.join(output_path,"log.txt") if not os.path.exists(output_path): os.makedirs(output_path) print_config(args) # 加载数据集 train_data = json.load(open(train_path)) valid_data = json.load(open(dev_path)) test_data = json.load(open(test_path)) id2predicate, predicate2id = json.load(open(rel2id_path)) tokenizer = Tokenizer(args.bert_vocab_path) # 注意修改 config = BertConfig.from_pretrained(args.bert_config_path) config.num_p=len(id2predicate) torch.cuda.set_device(int(args.cuda_id)) train_model = BiRTE.from_pretrained(pretrained_model_name_or_path=args.bert_model_path,config=config) train_model.to("cuda") if not os.path.exists(output_path): os.makedirs(output_path) dataloader = data_generator(args, train_data, tokenizer, predicate2id, id2predicate) t_total = len(dataloader) * args.num_train_epochs """ 优化器准备 """ optimizer_grouped_parameters = [ { "params": [p for n, p in train_model.named_parameters() if "bert." in n], "weight_decay": args.weight_decay, "lr": args.bert_learning_rate, }, { "params": [p for n, p in train_model.named_parameters() if "bert." not in n], "weight_decay": args.weight_decay, "lr": args.other_learning_rate, } ] optimizer = AdamW(optimizer_grouped_parameters, eps=args.min_num) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup * t_total, num_training_steps=t_total ) best_f1 = -1.0 # 全局的best_f1 step = 0 binary_crossentropy=CE() no_change=0 for epoch in range(args.num_train_epochs): train_model.train() epoch_loss = 0 with tqdm(total=dataloader.__len__(), desc="train", ncols=80) as t: for i, batch in enumerate(dataloader): batch = [torch.tensor(d).to("cuda") for d in batch] batch_token_ids, batch_mask,\ batch_s1_labels, batch_o1_labels,\ batch_s2_mask, batch_o2_mask, batch_s2_labels, batch_o2_labels,\ batch_s3_mask, batch_o3_mask, batch_r = batch s1_pred,o1_pred,s2_pred,o2_pred,p_pred = train_model(batch_token_ids, batch_mask, batch_s2_mask, batch_o2_mask, batch_s3_mask, batch_o3_mask) #计算损失 def get_loss(target,pred,mask): loss = binary_crossentropy(args, targets=target, pred=pred) # BL2 loss = torch.mean(loss, dim=2).to("cuda") # BL loss = torch.sum(loss * mask).to("cuda") / torch.sum(mask).to("cuda") return loss s1_loss=get_loss(target=batch_s1_labels,pred=s1_pred,mask=batch_mask) o1_loss=get_loss(target=batch_o1_labels,pred=o1_pred,mask=batch_mask) s2_loss=get_loss(target=batch_s2_labels,pred=s2_pred,mask=batch_mask) o2_loss=get_loss(target=batch_o2_labels,pred=o2_pred,mask=batch_mask) r_loss=binary_crossentropy(args,targets=batch_r,pred=p_pred) r_loss=r_loss.mean() loss=s1_loss+o1_loss+s2_loss+o2_loss+r_loss loss.backward() step += 1 epoch_loss += loss.item() torch.nn.utils.clip_grad_norm_(train_model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule train_model.zero_grad() t.set_postfix(loss="%.4lf"%(loss.cpu().item())) t.update(1) f1, precision, recall = evaluate(args,tokenizer,id2predicate,train_model,valid_data,dev_pred_path) if f1 > best_f1: # Save model checkpoint best_f1 = f1 torch.save(train_model.state_dict(), os.path.join(output_path, WEIGHTS_NAME)) # 保存最优模型权重 epoch_loss = epoch_loss / dataloader.__len__() with open(log_path, "a", encoding="utf-8") as f: print("epoch:%d\tloss:%f\tf1:%f\tprecision:%f\trecall:%f\tbest_f1:%f" % ( int(epoch), epoch_loss, f1, precision, recall, best_f1), file=f) #对test集合进行预测 #加载训练好的权重 train_model.load_state_dict(torch.load(os.path.join(output_path, WEIGHTS_NAME), map_location="cuda")) f1, precision, recall = evaluate(args,tokenizer,id2predicate,train_model, test_data, test_pred_path) with open(log_path, "a", encoding="utf-8") as f: print("test: f1:%f\tprecision:%f\trecall:%f" % (f1, precision, recall), file=f) def extract_spoes(args,tokenizer,id2predicate,model,text,entity_start=0.5,entity_end=0.5,p_num=0.5): """抽取输入text所包含的三元组 """ #sigmoid=nn.Sigmoid() if isinstance(model,torch.nn.DataParallel): model=model.module model.to("cuda") tokens = tokenizer.tokenize(text, max_length=args.max_len) mapping = tokenizer.rematch(text, tokens) token_ids, _ ,mask = tokenizer.encode(text, max_length=args.max_len) #获取BERT表示 model.eval() with torch.no_grad(): head,tail,rel,cls = model.get_embed(torch.tensor([token_ids]).to("cuda"), torch.tensor([mask]).to("cuda")) head = head.cpu().detach().numpy() #[1,L,H] tail = tail.cpu().detach().numpy() rel = rel.cpu().detach().numpy() cls = cls.cpu().detach().numpy() def get_entity(entity_pred): start = np.where(entity_pred[0, :, 0] > entity_start)[0] end = np.where(entity_pred[0, :, 1] > entity_end)[0] entity = [] for i in start: j = end[end >= i] if len(j) > 0: j = j[0] entity.append((i, j)) return entity #抽取s1 o1 model.eval() with torch.no_grad(): s1_preds = model.s_pred(torch.tensor(head).to("cuda"),torch.tensor(cls).to("cuda")) o1_preds = model.o_pred(torch.tensor(tail).to("cuda"),torch.tensor(cls).to("cuda")) s1_preds = s1_preds.cpu().detach().numpy() #[1,L,2] o1_preds = o1_preds.cpu().detach().numpy() #[1,L,2] s1_preds[:,0,:],s1_preds[:,-1,:]=0.0,0.0 o1_preds[:,0,:],o1_preds[:,-1,:]=0.0,0.0 s1=get_entity(s1_preds) o1=get_entity(o1_preds) #获得s_loc,o_loc pairs_0=[] for s in s1: for o in o1: pairs_0.append((s[0],s[1],o[0],o[1])) pairs_1=[] for s in s1: #s:(start,end) s2_mask=np.zeros(len(token_ids)).astype(np.int) s2_mask[s[0]] = 1 s2_mask[s[1]] = 1 model.eval() with torch.no_grad(): o2_pred=model.o_pred_from_s(torch.tensor(head).to("cuda"),torch.tensor(tail).to("cuda"), torch.tensor([s2_mask]).to("cuda"),cls=torch.tensor(cls).to("cuda")) o2_pred = o2_pred.cpu().detach().numpy() # [1,L,2] o2_pred[:, 0, :], o2_pred[:, -1, :] = 0.0, 0.0 objects2 = get_entity(o2_pred) if objects2: for o in objects2: pairs_1.append((s[0],s[1],o[0],o[1])) pairs_2=[] for o in o1: #o:(start,end) o2_mask=np.zeros(len(token_ids)).astype(np.int) o2_mask[o[0]] = 1 o2_mask[o[1]] = 1 model.eval() with torch.no_grad(): s2_pred=model.s_pred_from_o(torch.tensor(head).to("cuda"),torch.tensor(tail).to("cuda"), torch.tensor([o2_mask]).to("cuda"),cls=torch.tensor(cls).to("cuda")) s2_pred = s2_pred.cpu().detach().numpy() # [1,L,2] s2_pred[:, 0, :], s2_pred[:, -1, :] = 0.0, 0.0 subjects2 = get_entity(s2_pred) if subjects2: for s in subjects2: pairs_2.append((s[0],s[1],o[0],o[1])) pairs_1=set(pairs_1) pairs_2=set(pairs_2) pairs=list(pairs_1|pairs_2) if pairs: # m * 4 s_mask=np.zeros([len(pairs),len(token_ids)]).astype(np.int) o_mask=np.zeros([len(pairs),len(token_ids)]).astype(np.int) for i,pair in enumerate(pairs): s1, s2, o1, o2=pair s_mask[i,s1]=1 s_mask[i,s2]=1 o_mask[i,o1]=1 o_mask[i,o2]=1 spoes = [] rel=np.repeat(rel,len(pairs),0) # 传入subject,抽取object和predicate model.eval() with torch.no_grad(): p_pred = model.p_pred( rel=torch.tensor(rel).to("cuda"), s_mask=torch.tensor(s_mask).to("cuda"), o_mask=torch.tensor(o_mask).to("cuda"), ) p_pred = p_pred.cpu().detach().numpy() #BR index,p_index=np.where(p_pred>p_num) for i,p in zip(index,p_index): s1,s2,o1,o2=pairs[i] spoes.append( ( (mapping[s1][0],mapping[s2][-1]), p, (mapping[o1][0], mapping[o2][-1]) ) ) return [(text[s[0]:s[1] + 1], id2predicate[str(p)], text[o[0]:o[1] + 1]) for s, p, o, in spoes] else: return [] def evaluate(args,tokenizer,id2predicate,model,evl_data,evl_path): """评估函数,计算f1、precision、recall """ X, Y, Z = 1e-10, 1e-10, 1e-10 f = open(evl_path, 'w', encoding='utf-8') pbar = tqdm() for d in evl_data: R = set(extract_spoes(args,tokenizer,id2predicate,model,d['text'])) T = set([(i[0],i[1],i[2]) for i in d['triple_list']]) X += len(R & T) Y += len(R) Z += len(T) f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z pbar.update() pbar.set_description( 'f1: %.5f, precision: %.5f, recall: %.5f' % (f1, precision, recall) ) s = json.dumps({ 'text': d['text'], 'triple_list': list(T), 'triple_list_pred': list(R), 'new': list(R - T), 'lack': list(T - R), },ensure_ascii=False,indent=4) f.write(s + '\n') pbar.close() f.close() return f1, precision, recall def test(args): torch.cuda.set_device(int(args.cuda_id)) test_path = os.path.join(args.base_path, args.dataset, "test.json") output_path=os.path.join(args.base_path,args.dataset,"output",args.file_id) test_pred_path = os.path.join(output_path, "test_pred.json") rel2id_path=os.path.join(args.base_path,args.dataset,"rel2id.json") test_data = json.load(open(test_path)) id2predicate, predicate2id = json.load(open(rel2id_path)) config = BertConfig.from_pretrained(args.bert_config_path) tokenizer = Tokenizer(args.bert_vocab_path) config.num_p=len(id2predicate) train_model = BiRTE.from_pretrained(pretrained_model_name_or_path=args.bert_model_path,config=config) train_model.to("cuda") train_model.load_state_dict(torch.load(os.path.join(output_path, WEIGHTS_NAME), map_location="cuda")) f1, precision, recall = evaluate(args,tokenizer,id2predicate,train_model, test_data, test_pred_path) print("f1:%f, precision:%f, recall:%f"%(f1, precision, recall))
18,791
39.32618
126
py
BiRTE
BiRTE-main/model.py
from transformers.modeling_bert import BertModel,BertPreTrainedModel import torch.nn as nn import torch from torch.autograd import Variable import numpy as np class Biaffine(nn.Module): ''' Args: in1_features: size of each first input sample in2_features: size of each second input sample out_features: size of each output sample bias: If set to False, the layer will not learn an additive bias. Default: ``[True, True]`` bias[0, 1]: the bias of U_m bias[2]: the b_m ''' def __init__(self, in1_features, in2_features, out_features, bias=(True, True, True)): super(Biaffine, self).__init__() self.in1_features = in1_features self.in2_features = in2_features self.out_features = out_features self.bias = bias self.linear_input_size = in1_features + int(bias[0]) self.linear_output_size = out_features * (in2_features + int(bias[1])) # 3-dim -> 2-dim self.linear = nn.Linear(in_features=self.linear_input_size, out_features=self.linear_output_size, bias=False) self.linear_1 = nn.Linear(in_features=2*self.in1_features+1, out_features=self.out_features, bias=False) self.linear_2 = nn.Linear(in_features=2*self.in1_features+1, out_features=self.out_features, bias=False) self.reset_parameters() def reset_parameters(self): U = np.zeros((self.linear_output_size, self.linear_input_size), dtype=np.float32) W1 = np.zeros((self.out_features, 1+2*self.in1_features), dtype=np.float32) W2 = np.zeros((self.out_features, 1+2*self.in1_features), dtype=np.float32) self.linear.weight.data.copy_(torch.from_numpy(U)) self.linear_1.weight.data.copy_(torch.from_numpy(W1)) self.linear_2.weight.data.copy_(torch.from_numpy(W2)) def forward(self, input1, input2): input1=input1.unsqueeze(dim=1) input2=input2.unsqueeze(dim=1) input3=torch.cat([input1, input2],dim=-1) # batch_size, len1, dim1 = input1.size() # batch_size, len2, dim2 = input2.size() batch_size,_, dim1 = input1.size() batch_size,_, dim2 = input2.size() if self.bias[0]: ones = input1.data.new(batch_size, 1,1).zero_().fill_(1) input1 = torch.cat((input1, Variable(ones)), dim=2) dim1 += 1 if self.bias[1]: ones = input2.data.new(batch_size, 1,1).zero_().fill_(1) input2 = torch.cat((input2, Variable(ones)), dim=2) dim2 += 1 if self.bias[2]: ones = input3.data.new(batch_size, 1,1).zero_().fill_(1) input3 = torch.cat((input3, Variable(ones)), dim=2) affine = self.linear(input1) affine = affine.view(batch_size, self.out_features, dim2) input2 = torch.transpose(input2, 1, 2) biaffine = torch.transpose(torch.bmm(affine, input2), 1, 2) biaffine = biaffine.contiguous().view(batch_size, 1, 1, self.out_features) # affine_1 = self.linear_1(input3) # affine_1 = affine_1.view(batch_size, 1, 1, self.out_features) # biaffine = biaffine + affine_1 return biaffine.squeeze(dim=1).squeeze(dim=1) def __repr__(self): return self.__class__.__name__ + ' (' \ + 'in1_features=' + str(self.in1_features) \ + ', in2_features=' + str(self.in2_features) \ + ', out_features=' + str(self.out_features) + ')' class BiRTE(BertPreTrainedModel): def __init__(self, config): super(BiRTE, self).__init__(config) self.bert=BertModel(config=config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.w1=nn.Linear(config.hidden_size,config.hidden_size) self.w2=nn.Linear(config.hidden_size,config.hidden_size) self.w3=nn.Linear(config.hidden_size,config.hidden_size) #s self.s_classier=nn.Linear(config.hidden_size,2) self.s_classier_from_o=nn.Linear(config.hidden_size,2) #o self.o_classier=nn.Linear(config.hidden_size,2) self.o_classier_from_s=nn.Linear(config.hidden_size,2) #p self.biaffine=Biaffine(config.hidden_size,config.hidden_size,config.num_p) self.sigmoid=nn.Sigmoid() self.init_weights() def forward(self, token_ids, mask_token_ids,s2_mask,o2_mask,s3_mask,o3_mask): ''' :param token_ids: :param token_type_ids: :param mask_token_ids: :param s_loc: :return: s_pred: [batch,seq,2] op_pred: [batch,seq,p,2] ''' #获取表示 head,tail,rel,cls=self.get_embed(token_ids, mask_token_ids) #初步预测s o s1_pred=self.s_pred(head,cls=cls) o1_pred=self.o_pred(tail,cls=cls) #进一步预测 s,o o2_pred=self.o_pred_from_s(head,tail,s2_mask,cls) s2_pred=self.s_pred_from_o(head,tail,o2_mask,cls) #预测r p_pred=self.p_pred(rel,s3_mask,o3_mask) return s1_pred,o1_pred,s2_pred,o2_pred,p_pred def get_embed(self,token_ids, mask_token_ids): bert_out = self.bert(input_ids=token_ids.long(), attention_mask=mask_token_ids.long()) embed=bert_out[0] head=self.w1(embed) tail=self.w2(embed) rel=self.w3(embed) cls=bert_out[1] head=head+tail[:,0,:].unsqueeze(dim=1) tail=tail+head[:,0,:].unsqueeze(dim=1) head, tail,rel,cls=self.dropout(head),self.dropout(tail),self.dropout(rel),self.dropout(cls) return head, tail,rel,cls def extract_entity(self, input, mask): ''' 取首尾平均 :param input:BLH :param mask:BL :return: BH ''' _,_,dim=input.shape entity=input*mask.unsqueeze(dim=-1) #BLH entity=entity.sum(dim=1)/mask.sum(dim=-1,keepdim=True) #BH/B1 return entity def s_pred(self,head,cls): s_logist=self.s_classier(head+cls.unsqueeze(dim=1)) #BL,2 s_pred=self.sigmoid(s_logist) return s_pred def o_pred(self,tail,cls): o_logist=self.o_classier(tail+cls.unsqueeze(dim=1)) #BL,2 o_pred=self.sigmoid(o_logist) return o_pred def o_pred_from_s(self,head,tail,s_mask,cls): s_entity=self.extract_entity(head,s_mask) s2o_embed=tail*s_entity.unsqueeze(dim=1) #BLH o_logist=self.o_classier_from_s(s2o_embed+cls.unsqueeze(dim=1)) #BL2 o_pred=self.sigmoid(o_logist) return o_pred #BL2 def s_pred_from_o(self,head,tail,o_mask,cls): o_entity=self.extract_entity(tail,o_mask) o2s_embed=head*o_entity.unsqueeze(dim=1) #BLH s_logist=self.s_classier_from_o(o2s_embed+cls.unsqueeze(dim=1)) #BL2 s_pred=self.sigmoid(s_logist) return s_pred #BL2 def p_pred(self, rel, s_mask, o_mask): s_entity=self.extract_entity(rel,s_mask) #BH o_entity=self.extract_entity(rel,o_mask) #BH logist=self.biaffine(s_entity,o_entity) #bc r_pred=self.sigmoid(logist) return r_pred #BR
7,229
36.46114
100
py
BiRTE
BiRTE-main/run.py
import argparse from main import * import torch parser = argparse.ArgumentParser(description='Model Controller') parser.add_argument('--cuda_id', default="0", type=str) parser.add_argument('--base_path', default="./dataset", type=str) parser.add_argument('--dataset', default='WebNLG', type=str) parser.add_argument('--train', default="train", type=str) parser.add_argument('--bert_learning_rate', default=3e-5, type=float) parser.add_argument('--other_learning_rate', default=(3e-5)*5, type=float) parser.add_argument('--num_train_epochs', default=100, type=int) parser.add_argument('--file_id', default='999', type=str) parser.add_argument('--batch_size', default=6, type=int) parser.add_argument('--max_len', default=100, type=int) parser.add_argument('--warmup', default=0.0, type=float) parser.add_argument('--weight_decay', default=0.0, type=float) parser.add_argument('--max_grad_norm', default=1.0, type=float) parser.add_argument('--min_num', default=1e-7, type=float) parser.add_argument('--bert_vocab_path', default="./pretrained/bert-base-cased/vocab.txt", type=str) parser.add_argument('--bert_config_path', default="./pretrained/bert-base-cased/config.json", type=str) parser.add_argument('--bert_model_path', default="./pretrained/bert-base-cased/pytorch_model.bin", type=str) args = parser.parse_args() if args.train=="train": train(args) else: test(args)
1,383
45.133333
108
py
BiRTE
BiRTE-main/util.py
#! -*- coding:utf-8 -*- import numpy as np import random from copy import deepcopy import os import pickle import torch import json def get_more_data(all_data): s_more = [] o_more = [] for ex in all_data: all_s = set() all_o = set() for s, p, o in ex["triple_list"]: all_s.add(s) all_o.add(o) if len(all_s) >= len(all_o): s_more.append(ex) if len(all_o) >= len(all_s): o_more.append(ex) return s_more,o_more def get_over_lap(all_data): normal,epo,seo=[],[],[] for ex in all_data: item=set() for s, p, o in ex["triple_list"]: item.add((s, p, o)) spo=[] for s,p,o in item: spo.extend([s,o,p]) if is_normal_triple(spo): # 绗琲涓彞瀛愮殑spo鍒楄〃 normal.append(ex) if is_multi_label(spo): epo.append(ex) if is_over_lapping(spo): seo.append(ex) return [normal,epo,seo] def is_normal_triple(triples, is_relation_first=False): entities = set() for i, e in enumerate(triples): key = 0 if is_relation_first else 2 if i % 3 != key: entities.add(e) return len(entities) == 2 * int(len(triples) / 3) def is_multi_label(triples, is_relation_first=False): if is_normal_triple(triples, is_relation_first): return False if is_relation_first: entity_pair = [tuple(triples[3 * i + 1: 3 * i + 3]) for i in range(int(len(triples) / 3))] else: entity_pair = [tuple(triples[3 * i: 3 * i + 2]) for i in range(int(len(triples) / 3))] # if is multi label, then, at least one entity pair appeared more than once return len(entity_pair) != len(set(entity_pair)) def is_over_lapping(triples, is_relation_first=False): '''实体对集合 -> 实体列表 -> 实体集合 -> len(实体集合)==2*len(实体对集合)''' if is_normal_triple(triples, is_relation_first): return False if is_relation_first: entity_pair = [tuple(triples[3 * i + 1: 3 * i + 3]) for i in range(int(len(triples) / 3))] else: entity_pair = [tuple(triples[3 * i: 3 * i + 2]) for i in range(int(len(triples) / 3))] # remove the same entity_pair, then, if one entity appear more than once, it's overlapping entity_pair = set(entity_pair) entities = [] for pair in entity_pair: entities.extend(pair) entities = set(entities) return len(entities) != 2 * len(entity_pair) def to_set(item): ''' 三元组列表转化为集合 ''' item_set=set() for s,p,o in item: item_set.add((s,p,o)) return item_set def get_pred_data(path): all_s="" with open(path,"r") as f1: for l in f1.readlines(): text=l.rsplit("\n")[0] if text=="}": text="}," all_s+=text return json.loads("["+all_s[:-1]+"]") def get_json_data(path): with open(path,"r") as f: data=json.load(f) return data def save_json_data(data,path): with open(path,"w") as f: json.dump(data,f,indent=4,ensure_ascii=False) def set_seed(): # 每次运行代码时设置相同的seed,则每次生成的随机数也相同,如果不设置seed,则每次生成的随机数都会不一样 random.seed(1) # seed()方法改变随机数生成器的种子,可以在调用其他随机模块函数之前调用此函数 np.random.seed(1) torch.manual_seed(1) def print_config(args): config_path=os.path.join(args.base_path, args.dataset, "output", args.file_id,"config.txt") with open(config_path,"w",encoding="utf-8") as f: for k,v in sorted(vars(args).items()): print(k,'=',v,file=f) def is_number(s): try: float(s) return True except ValueError: pass try: import unicodedata unicodedata.numeric(s) return True except (TypeError, ValueError): pass return False def mat_padding(inputs,dim=0, length=None, padding=0): """Numpy函数,将序列的dim维padding到同一长度 """ if not type(inputs[0]) is np.ndarray: inputs = [np.array(i) for i in inputs] if length is None: length = max([x.shape[dim] for x in inputs]) pad_width = [(0, 0) for _ in np.shape(inputs[0])] outputs = [] for x in inputs: pad_width[0] = (0, length - x.shape[dim]) pad_width[1] = (0, length - x.shape[dim]) x = np.pad(x, pad_width, 'constant', constant_values=padding) outputs.append(x) return np.array(outputs) def tuple_mat_padding(inputs,dim=1, length=None, padding=0): """Numpy函数,将序列的dim维padding到同一长度 """ if not type(inputs[0]) is np.ndarray: inputs = [np.array(i) for i in inputs] if length is None: length = max([x.shape[dim] for x in inputs]) pad_width = [(0, 0) for _ in np.shape(inputs[0])] outputs = [] for x in inputs: pad_width[1] = (0, length - x.shape[dim]) pad_width[2] = (0, length - x.shape[dim]) x = np.pad(x, pad_width, 'constant', constant_values=padding) outputs.append(x) return np.array(outputs) def sequence_padding(inputs,dim=0, length=None, padding=0): """Numpy函数,将序列的dim维padding到同一长度 """ if not type(inputs[0]) is np.ndarray: inputs = [np.array(i) for i in inputs] if length is None: length = max([x.shape[dim] for x in inputs]) pad_width = [(0, 0) for _ in np.shape(inputs[0])] outputs = [] for x in inputs: pad_width[dim] = (0, length - x.shape[dim]) x = np.pad(x, pad_width, 'constant', constant_values=padding) outputs.append(x) return np.array(outputs) def data_augmentation(example,ex2): '''数据增强,返回新的样例''' same_example=deepcopy(example) try: a=random.randint(0,6) if a==0:#多个句子随机拼接 text1,text2=example["text"],ex2["text"] tokens1=text1.split() tokens2=text2.split() loc=random.randint(0,len(tokens1)) tokens=tokens1[:loc]+tokens2+tokens2[loc:] spo_list=[] text=" ".join(tokens) for s,p,o in example['triple_list']+ex2["triple_list"]: if s in text and o in text: spo_list.append([s,p,o]) res={"text":text,"triple_list":spo_list} elif a==1:#随机插入单词 text=example["text"] all_tokens=text.split() num_token=len(all_tokens)//10+1 #每10个单词随机插入一个词语 for i in range(num_token): token=random.choice(all_tokens) #将要插入的单词 loc = random.randint(0, len(all_tokens)) #将要插入的位置 all_tokens.insert(loc,token) text=" ".join(all_tokens) spo_list=[] for s,p,o in example["triple_list"]: if s in text and o in text: spo_list.append([s,p,o]) else: return same_example #保证原本的三元组不变 res={"text":text,"triple_list":spo_list} elif a==2:#随机减少单词 text=example["text"] all_tokens=text.split() num_token=len(text)//10+1 #每10个单词随机减少一个词语 for i in range(num_token): loc = random.randint(0, len(all_tokens)-1) #将要删除的位置 all_tokens.pop(loc) text=" ".join(all_tokens) spo_list=[] for s,p,o in example["triple_list"]: if s in text and o in text: spo_list.append([s,p,o]) else: return same_example #保证原本的三元组不变 res={"text":text,"triple_list":spo_list} else: #不进行数据增强 res=example if len(res["triple_list"])==0: #防止没有标注的情况 res=same_example return res except: return same_example def judge(ex): '''判断样本是否正确''' for s,p,o in ex["triple_list"]: if s=='' or o=='' or s not in ex["text"] or o not in ex["text"]: return False return True class DataGenerator(object): """数据生成器模版 """ def __init__(self, data, batch_size=32, buffer_size=None): self.data = data self.batch_size = batch_size if hasattr(self.data, '__len__'): self.steps = len(self.data) // self.batch_size if len(self.data) % self.batch_size != 0: self.steps += 1 else: self.steps = None self.buffer_size = buffer_size or batch_size * 1000 def __len__(self): return self.steps def sample(self, random=False): """采样函数,每个样本同时返回一个is_end标记 """ if random: #乱序 if self.steps is None: def generator(): caches, isfull = [], False for d in self.data: caches.append(d) if isfull: i = np.random.randint(len(caches)) yield caches.pop(i) elif len(caches) == self.buffer_size: isfull = True while caches: i = np.random.randint(len(caches)) yield caches.pop(i) else: def generator(): indices = list(range(len(self.data))) np.random.shuffle(indices) for i in indices: yield self.data[i] #返回样本编号 data = generator() else: #正序 data = iter(self.data) d_current = next(data) for d_next in data: yield False, d_current d_current = d_next yield True, d_current def __iter__(self, random=False): raise NotImplementedError def forfit(self): for d in self.__iter__(True): yield d class Vocab(object): def __init__(self, filename, load=False, word_counter=None, threshold=0): if load: assert os.path.exists(filename), "Vocab file does not exist at " + filename # load from file and ignore all other params self.id2word, self.word2id = self.load(filename) self.size = len(self.id2word) print("Vocab size {} loaded from file".format(self.size)) else: print("Creating vocab from scratch...") assert word_counter is not None, "word_counter is not provided for vocab creation." self.word_counter = word_counter if threshold > 1: # remove words that occur less than thres self.word_counter = dict([(k, v) for k, v in self.word_counter.items() if v >= threshold]) self.id2word = sorted(self.word_counter, key=lambda k: self.word_counter[k], reverse=True) # add special tokens to the beginning self.id2word = ['**PAD**', '**UNK**'] + self.id2word self.word2id = dict([(self.id2word[idx], idx) for idx in range(len(self.id2word))]) self.size = len(self.id2word) self.save(filename) print("Vocab size {} saved to file {}".format(self.size, filename)) def load(self, filename): with open(filename, 'rb') as infile: id2word = pickle.load(infile) word2id = dict([(id2word[idx], idx) for idx in range(len(id2word))]) return id2word, word2id def save(self, filename): # assert not os.path.exists(filename), "Cannot save vocab: file exists at " + filename if os.path.exists(filename): print("Overwriting old vocab file at " + filename) os.remove(filename) with open(filename, 'wb') as outfile: pickle.dump(self.id2word, outfile) return def map(self, token_list): """ Map a list of tokens to their ids. """ return [self.word2id[w] if w in self.word2id else constant.VOCAB_UNK_ID for w in token_list] def unmap(self, idx_list): """ Unmap ids back to tokens. """ return [self.id2word[idx] for idx in idx_list] def get_embeddings(self, word_vectors=None, dim=100): # self.embeddings = 2 * constant.EMB_INIT_RANGE * np.random.rand(self.size, dim) - constant.EMB_INIT_RANGE self.embeddings = np.zeros((self.size, dim)) if word_vectors is not None: assert len(list(word_vectors.values())[0]) == dim, \ "Word vectors does not have required dimension {}.".format(dim) for w, idx in self.word2id.items(): if w in word_vectors: self.embeddings[idx] = np.asarray(word_vectors[w]) return self.embeddings
12,507
32.354667
114
py
BiRTE
BiRTE-main/bert4keras/optimizers.py
# -*- coding: utf-8 -*- # 优化相关 import numpy as np import tensorflow as tf from bert4keras.backend import keras, K, is_tf_keras from bert4keras.snippets import is_string, string_matching from bert4keras.snippets import is_one_of, insert_arguments from bert4keras.backend import piecewise_linear import re class Adam(keras.optimizers.Optimizer): """重新定义Adam优化器,便于派生出新的优化器 (tensorflow的optimizer_v2类) """ def __init__( self, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-6, bias_correction=True, **kwargs ): kwargs['name'] = kwargs.get('name') or 'Adam' super(Adam, self).__init__(**kwargs) self._set_hyper('learning_rate', learning_rate) self._set_hyper('beta_1', beta_1) self._set_hyper('beta_2', beta_2) self.epsilon = epsilon or K.epislon() self.bias_correction = bias_correction def _create_slots(self, var_list): for var in var_list: self.add_slot(var, 'm') self.add_slot(var, 'v') def _resource_apply(self, grad, var, indices=None): # 准备变量 var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr(var_dtype) m = self.get_slot(var, 'm') v = self.get_slot(var, 'v') beta_1_t = self._get_hyper('beta_1', var_dtype) beta_2_t = self._get_hyper('beta_2', var_dtype) epsilon_t = K.cast(self.epsilon, var_dtype) local_step = K.cast(self.iterations + 1, var_dtype) beta_1_t_power = K.pow(beta_1_t, local_step) beta_2_t_power = K.pow(beta_2_t, local_step) # 更新公式 if indices is None: m_t = K.update(m, beta_1_t * m + (1 - beta_1_t) * grad) v_t = K.update(v, beta_2_t * v + (1 - beta_2_t) * grad**2) else: mv_ops = [K.update(m, beta_1_t * m), K.update(v, beta_2_t * v)] with tf.control_dependencies(mv_ops): m_t = self._resource_scatter_add( m, indices, (1 - beta_1_t) * grad ) v_t = self._resource_scatter_add( v, indices, (1 - beta_2_t) * grad**2 ) # 返回算子 with tf.control_dependencies([m_t, v_t]): if self.bias_correction: m_t = m_t / (1.0 - beta_1_t_power) v_t = v_t / (1.0 - beta_2_t_power) var_t = var - lr_t * m_t / (K.sqrt(v_t) + self.epsilon) return K.update(var, var_t) def _resource_apply_dense(self, grad, var): return self._resource_apply(grad, var) def _resource_apply_sparse(self, grad, var, indices): return self._resource_apply(grad, var, indices) def get_config(self): config = { 'learning_rate': self._serialize_hyperparameter('learning_rate'), 'beta_1': self._serialize_hyperparameter('beta_1'), 'beta_2': self._serialize_hyperparameter('beta_2'), 'epsilon': self.epsilon, } base_config = super(Adam, self).get_config() return dict(list(base_config.items()) + list(config.items())) class AdaFactorBase(keras.optimizers.Optimizer): """AdaFactor优化器(基类) 论文链接:https://arxiv.org/abs/1804.04235 参考实现:https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/optimize.py """ def __init__( self, learning_rate=1e-3, # 可以为None beta1=0.0, beta2=None, epsilon1=1e-30, epsilon2=1e-3, multiply_by_parameter_scale=True, clipping_threshold=1.0, min_dim_size_to_factor=128, **kwargs ): super(AdaFactorBase, self).__init__(**kwargs) self._learning_rate = learning_rate self.beta1 = beta1 self._beta2 = beta2 self.epsilon1 = epsilon1 self.epsilon2 = epsilon2 self.multiply_by_parameter_scale = multiply_by_parameter_scale self.clipping_threshold = clipping_threshold self.min_dim_size_to_factor = min_dim_size_to_factor @property def learning_rate(self): if self._learning_rate is None: iterations = K.cast(self.iterations + 1, K.floatx()) learning_rate = K.minimum(1.0 / K.sqrt(iterations), 0.01) if self.multiply_by_parameter_scale: return learning_rate else: return learning_rate * 0.05 else: if not hasattr(self, '__learning_rate'): with K.name_scope(self.__class__.__name__): self.__learning_rate = K.variable( self._learning_rate, name='learning_rate' ) return self.__learning_rate @property def beta2(self): if self._beta2 is None: iterations = K.cast(self.iterations + 1, K.floatx()) return 1.0 - K.pow(iterations, -0.8) else: return self._beta2 def factored_shape(self, shape): if len(shape) < 2: return None shape = np.array(shape) indices = shape.argpartition(-2) if indices[-2] < self.min_dim_size_to_factor: return None shape1, shape2 = np.array(shape), np.array(shape) shape1[indices[-1]] = 1 shape2[indices[-2]] = 1 return shape1, indices[-1], shape2, indices[-2] def get_config(self): config = { 'learning_rate': self._learning_rate, 'beta1': self.beta1, 'beta2': self._beta2, 'epsilon1': self.epsilon1, 'epsilon2': self.epsilon2, 'multiply_by_parameter_scale': self.multiply_by_parameter_scale, 'clipping_threshold': self.clipping_threshold, 'min_dim_size_to_factor': self.min_dim_size_to_factor, } base_config = super(AdaFactorBase, self).get_config() return dict(list(base_config.items()) + list(config.items())) class AdaFactorV1(AdaFactorBase): """AdaFactor优化器(纯Keras版) 论文链接:https://arxiv.org/abs/1804.04235 参考实现:https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/optimize.py """ def __init__(self, *args, **kwargs): super(AdaFactorV1, self).__init__(*args, **kwargs) with K.name_scope(self.__class__.__name__): self.iterations = K.variable(0, dtype='int64', name='iterations') @K.symbolic def get_updates(self, loss, params): grads = self.get_gradients(loss, params) self.updates = [K.update_add(self.iterations, 1)] self.weights = [self.iterations] lr = self.learning_rate for i, (p, g) in enumerate(zip(params, grads)): g2 = K.square(g) + self.epsilon1 shape, dtype = K.int_shape(p), K.dtype(p) factored_shape = self.factored_shape(shape) if factored_shape is None: # 定义参数 v = K.zeros(shape, dtype=dtype, name='v_' + str(i)) self.weights.append(v) # 定义更新 v_t = self.beta2 * v + (1.0 - self.beta2) * g2 self.updates.append(K.update(v, v_t)) else: # 定义参数 shape1, axis1, shape2, axis2 = factored_shape vr = K.zeros(shape1, dtype=dtype, name='vr_' + str(i)) vc = K.zeros(shape2, dtype=dtype, name='vc_' + str(i)) self.weights.extend([vr, vc]) # 定义更新 vr_t = self.beta2 * vr + K.mean(g2, axis=axis1, keepdims=True) vc_t = self.beta2 * vc + K.mean(g2, axis=axis2, keepdims=True) self.updates.extend([K.update(vr, vr_t), K.update(vc, vc_t)]) # 合成矩阵 v_t = vr_t * vc_t / K.mean(vr_t, axis=axis2, keepdims=True) # 增量主体 u = g / K.sqrt(v_t) # 增量裁剪 if self.clipping_threshold is not None: u_rms = K.mean(K.sum(K.square(u))) d = self.clipping_threshold u = u / K.maximum(1.0, u_rms / d) # 增量滑动 if self.beta1 > 0.0: # 定义参数 m = K.zeros(shape, dtype=dtype, name='m_' + str(i)) self.weights.append(m) # 定义更新 m_t = self.beta1 * m + (1.0 - self.beta1) * u self.updates.append(K.update(m, m_t)) u = m_t # 增量调整 if self.multiply_by_parameter_scale: u = u * K.maximum(K.mean(K.sum(K.square(p))), self.epsilon2) # 更新参数 self.updates.append(K.update(p, p - lr * u)) return self.updates class AdaFactorV2(AdaFactorBase): """AdaFactor优化器(tf.keras版) 论文链接:https://arxiv.org/abs/1804.04235 参考实现:https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/optimize.py """ def __init__(self, *args, **kwargs): kwargs['name'] = kwargs.get('name') or 'AdaFactor' super(AdaFactorV2, self).__init__(*args, **kwargs) def _create_slots(self, var_list): for var in var_list: if self.beta1 > 0.0: self.add_slot(var, 'm') shape = K.int_shape(var) factored_shape = self.factored_shape(shape) if factored_shape is None: self.add_slot(var, 'v') else: shape1, axis1, shape2, axis2 = factored_shape value1, value2 = np.zeros(shape1), np.zeros(shape2) self.add_slot(var, 'vr', value1) self.add_slot(var, 'vc', value2) def _resource_apply(self, grad, var, indices=None): lr = self.learning_rate g2 = K.square(grad) + self.epsilon1 shape = K.int_shape(var) factored_shape = self.factored_shape(shape) if factored_shape is None: v = self.get_slot(var, 'v') # 定义更新 v_t = self.beta2 * v + (1.0 - self.beta2) * g2 v_t = K.update(v, v_t) else: shape1, axis1, shape2, axis2 = factored_shape vr = self.get_slot(var, 'vr') vc = self.get_slot(var, 'vc') # 定义更新 vr_t = self.beta2 * vr + K.mean(g2, axis=axis1, keepdims=True) vc_t = self.beta2 * vc + K.mean(g2, axis=axis2, keepdims=True) vr_t, vc_t = K.update(vr, vr_t), K.update(vc, vc_t) # 合成矩阵 v_t = vr_t * vc_t / K.mean(vr_t, axis=axis2, keepdims=True) # 增量主体 u = grad / K.sqrt(v_t) # 增量裁剪 if self.clipping_threshold is not None: u_rms = K.mean(K.sum(K.square(u))) d = self.clipping_threshold u = u / K.maximum(1.0, u_rms / d) # 增量滑动 if self.beta1 > 0.0: m = self.get_slot(var, 'm') # 定义更新 m_t = self.beta1 * m + (1.0 - self.beta1) * u u = K.update(m, m_t) # 增量调整 if self.multiply_by_parameter_scale: u = u * K.maximum(K.mean(K.sum(K.square(var))), self.epsilon2) # 更新参数 return K.update(var, var - lr * u) def _resource_apply_dense(self, grad, var): return self._resource_apply(grad, var) def _resource_apply_sparse(self, grad, var, indices): grad = tf.IndexedSlices(grad, indices, K.shape(var)) grad = tf.convert_to_tensor(grad) return self._resource_apply_dense(grad, var) def export_to_custom_objects(base_extend_with): """装饰器,用来将优化器放到custom_objects中 """ def new_extend_with(BaseOptimizer, name=None): NewOptimizer = base_extend_with(BaseOptimizer) if is_string(name): NewOptimizer.__name__ = name name = NewOptimizer.__name__ keras.utils.get_custom_objects()[name] = NewOptimizer return NewOptimizer return new_extend_with @export_to_custom_objects def extend_with_weight_decay(BaseOptimizer): """返回新的优化器类,加入权重衰减 """ class NewOptimizer(BaseOptimizer): """带有权重衰减的优化器 """ @insert_arguments(weight_decay_rate=0.01, exclude_from_weight_decay=[]) def __init__(self, *args, **kwargs): super(NewOptimizer, self).__init__(*args, **kwargs) if not hasattr(self, 'learning_rate'): self.learning_rate = self.lr @K.symbolic def get_updates(self, loss, params): old_update = K.update def new_update(x, new_x): if is_one_of(x, params) and self._do_weight_decay(x): new_x = new_x - self.learning_rate * self.weight_decay_rate * x return old_update(x, new_x) K.update = new_update updates = super(NewOptimizer, self).get_updates(loss, params) K.update = old_update return updates def _do_weight_decay(self, w): return (not string_matching(w.name, self.exclude_from_weight_decay)) def get_config(self): config = { 'weight_decay_rate': self.weight_decay_rate, 'exclude_from_weight_decay': self.exclude_from_weight_decay, } base_config = super(NewOptimizer, self).get_config() return dict(list(base_config.items()) + list(config.items())) return NewOptimizer @export_to_custom_objects def extend_with_weight_decay_v2(BaseOptimizer): """返回新的优化器类,加入权重衰减 """ class NewOptimizer(BaseOptimizer): """带有权重衰减的优化器 """ @insert_arguments(weight_decay_rate=0.01, exclude_from_weight_decay=[]) def __init__(self, *args, **kwargs): super(NewOptimizer, self).__init__(*args, **kwargs) def _resource_apply(self, grad, var, indices=None): old_update = K.update def new_update(x, new_x): if x is var and self._do_weight_decay(x): lr_t = self._decayed_lr(x.dtype.base_dtype) new_x = new_x - lr_t * self.weight_decay_rate * x return old_update(x, new_x) K.update = new_update op = super(NewOptimizer, self)._resource_apply(grad, var, indices) K.update = old_update return op def _do_weight_decay(self, w): return (not string_matching(w.name, self.exclude_from_weight_decay)) def get_config(self): config = { 'weight_decay_rate': self.weight_decay_rate, 'exclude_from_weight_decay': self.exclude_from_weight_decay, } base_config = super(NewOptimizer, self).get_config() return dict(list(base_config.items()) + list(config.items())) return NewOptimizer @export_to_custom_objects def extend_with_layer_adaptation(BaseOptimizer): """返回新的优化器类,加入层自适应学习率 """ class NewOptimizer(BaseOptimizer): """带有层自适应学习率的优化器 用每一层参数的模长来校正当前参数的学习率 https://arxiv.org/abs/1904.00962 """ @insert_arguments(exclude_from_layer_adaptation=[]) def __init__(self, *args, **kwargs): super(NewOptimizer, self).__init__(*args, **kwargs) if not hasattr(self, 'learning_rate'): self.learning_rate = self.lr @K.symbolic def get_updates(self, loss, params): old_update = K.update def new_update(x, new_x): if is_one_of(x, params) and self._do_layer_adaptation(x): dx = new_x - x lr_t = K.clip(self.learning_rate, K.epsilon(), 1e10) x_norm = tf.norm(x) g_norm = tf.norm(dx / lr_t) ratio = K.switch( x_norm > 0.0, K.switch(g_norm > K.epsilon(), x_norm / g_norm, 1.0), 1.0 ) new_x = x + dx * ratio return old_update(x, new_x) K.update = new_update updates = super(NewOptimizer, self).get_updates(loss, params) K.update = old_update return updates def _do_layer_adaptation(self, w): return ( not string_matching(w.name, self.exclude_from_layer_adaptation) ) def get_config(self): config = { 'exclude_from_layer_adaptation': self.exclude_from_layer_adaptation, } base_config = super(NewOptimizer, self).get_config() return dict(list(base_config.items()) + list(config.items())) return NewOptimizer @export_to_custom_objects def extend_with_layer_adaptation_v2(BaseOptimizer): """返回新的优化器类,加入层自适应学习率 """ class NewOptimizer(BaseOptimizer): """带有层自适应学习率的优化器 用每一层参数的模长来校正当前参数的学习率 https://arxiv.org/abs/1904.00962 """ @insert_arguments(exclude_from_layer_adaptation=[]) def __init__(self, *args, **kwargs): super(NewOptimizer, self).__init__(*args, **kwargs) def _resource_apply(self, grad, var, indices=None): old_update = K.update def new_update(x, new_x): if x is var and self._do_layer_adaptation(x): dx = new_x - x lr_t = self._decayed_lr(x.dtype.base_dtype) lr_t = K.clip(lr_t, K.epsilon(), 1e10) x_norm = tf.norm(x) g_norm = tf.norm(dx / lr_t) ratio = K.switch( x_norm > 0.0, K.switch(g_norm > K.epsilon(), x_norm / g_norm, 1.0), 1.0 ) new_x = x + dx * ratio return old_update(x, new_x) K.update = new_update op = super(NewOptimizer, self)._resource_apply(grad, var, indices) K.update = old_update return op def _do_layer_adaptation(self, w): return ( not string_matching(w.name, self.exclude_from_layer_adaptation) ) def get_config(self): config = { 'exclude_from_layer_adaptation': self.exclude_from_layer_adaptation, } base_config = super(NewOptimizer, self).get_config() return dict(list(base_config.items()) + list(config.items())) return NewOptimizer @export_to_custom_objects def extend_with_piecewise_linear_lr(BaseOptimizer): """返回新的优化器类,加入分段线性学习率 """ class NewOptimizer(BaseOptimizer): """带有分段线性学习率的优化器 其中schedule是形如{1000: 1, 2000: 0.1}的字典, 表示0~1000步内学习率线性地从零增加到100%,然后 1000~2000步内线性地降到10%,2000步以后保持10% """ @insert_arguments(lr_schedule={0: 1}) def __init__(self, *args, **kwargs): super(NewOptimizer, self).__init__(*args, **kwargs) self.lr_schedule = {int(i): j for i, j in self.lr_schedule.items()} @K.symbolic def get_updates(self, loss, params): lr_multiplier = piecewise_linear(self.iterations, self.lr_schedule) old_update = K.update def new_update(x, new_x): if is_one_of(x, params): new_x = x + (new_x - x) * lr_multiplier return old_update(x, new_x) K.update = new_update updates = super(NewOptimizer, self).get_updates(loss, params) K.update = old_update return updates def get_config(self): config = { 'lr_schedule': self.lr_schedule, } base_config = super(NewOptimizer, self).get_config() return dict(list(base_config.items()) + list(config.items())) return NewOptimizer @export_to_custom_objects def extend_with_piecewise_linear_lr_v2(BaseOptimizer): """返回新的优化器类,加入分段线性学习率 """ class NewOptimizer(BaseOptimizer): """带有分段线性学习率的优化器 其中schedule是形如{1000: 1, 2000: 0.1}的字典, 表示0~1000步内学习率线性地从零增加到100%,然后 1000~2000步内线性地降到10%,2000步以后保持10% """ @insert_arguments(lr_schedule={0: 1}) def __init__(self, *args, **kwargs): super(NewOptimizer, self).__init__(*args, **kwargs) self.lr_schedule = {int(i): j for i, j in self.lr_schedule.items()} def _decayed_lr(self, var_dtype): lr_multiplier = piecewise_linear(self.iterations, self.lr_schedule) lr_t = super(NewOptimizer, self)._decayed_lr(var_dtype) return lr_t * K.cast(lr_multiplier, var_dtype) def get_config(self): config = { 'lr_schedule': self.lr_schedule, } base_config = super(NewOptimizer, self).get_config() return dict(list(base_config.items()) + list(config.items())) return NewOptimizer @export_to_custom_objects def extend_with_gradient_accumulation(BaseOptimizer): """返回新的优化器类,加入梯度累积 """ class NewOptimizer(BaseOptimizer): """带有梯度累积的优化器 """ @insert_arguments(grad_accum_steps=2) def __init__(self, *args, **kwargs): super(NewOptimizer, self).__init__(*args, **kwargs) self._first_get_gradients = True def get_gradients(self, loss, params): if self._first_get_gradients: self._first_get_gradients = False return super(NewOptimizer, self).get_gradients(loss, params) else: return [ag / self.grad_accum_steps for ag in self.accum_grads] @K.symbolic def get_updates(self, loss, params): # 更新判据 cond = K.equal(self.iterations % self.grad_accum_steps, 0) cond = K.cast(cond, K.floatx()) # 获取梯度 grads = self.get_gradients(loss, params) self.accum_grads = [ K.zeros( K.int_shape(p), dtype=K.dtype(p), name='accum_grad_%s' % i ) for i, p in enumerate(params) ] old_update = K.update def new_update(x, new_x): new_x = cond * new_x + (1 - cond) * x return old_update(x, new_x) K.update = new_update updates = super(NewOptimizer, self).get_updates(loss, params) K.update = old_update # 累积梯度 with tf.control_dependencies(updates): accum_updates = [ K.update(ag, g + (1 - cond) * ag) for g, ag in zip(grads, self.accum_grads) ] return accum_updates def get_config(self): config = { 'grad_accum_steps': self.grad_accum_steps, } base_config = super(NewOptimizer, self).get_config() return dict(list(base_config.items()) + list(config.items())) return NewOptimizer @export_to_custom_objects def extend_with_gradient_accumulation_v2(BaseOptimizer): """返回新的优化器类,加入梯度累积 """ class NewOptimizer(BaseOptimizer): """带有梯度累积的优化器 """ @insert_arguments(grad_accum_steps=2) def __init__(self, *args, **kwargs): super(NewOptimizer, self).__init__(*args, **kwargs) def _create_slots(self, var_list): super(NewOptimizer, self)._create_slots(var_list) for var in var_list: self.add_slot(var, 'ag') def _resource_apply(self, grad, var, indices=None): # 更新判据 cond = K.equal(self.iterations % self.grad_accum_steps, 0) # 获取梯度 ag = self.get_slot(var, 'ag') old_update = K.update def new_update(x, new_x): new_x = K.switch(cond, new_x, x) return old_update(x, new_x) K.update = new_update ag_t = ag / self.grad_accum_steps op = super(NewOptimizer, self)._resource_apply(ag_t, var) K.update = old_update # 累积梯度 with tf.control_dependencies([op]): ag_t = K.switch(cond, K.zeros_like(ag), ag) with tf.control_dependencies([K.update(ag, ag_t)]): if indices is None: ag_t = K.update(ag, ag + grad) else: ag_t = self._resource_scatter_add(ag, indices, grad) return ag_t def get_config(self): config = { 'grad_accum_steps': self.grad_accum_steps, } base_config = super(NewOptimizer, self).get_config() return dict(list(base_config.items()) + list(config.items())) return NewOptimizer @export_to_custom_objects def extend_with_lookahead(BaseOptimizer): """返回新的优化器类,加入look ahead """ class NewOptimizer(BaseOptimizer): """带有look ahead的优化器 https://arxiv.org/abs/1907.08610 steps_per_slow_update: 即论文中的k; slow_step_size: 即论文中的alpha。 """ @insert_arguments(steps_per_slow_update=5, slow_step_size=0.5) def __init__(self, *args, **kwargs): super(NewOptimizer, self).__init__(*args, **kwargs) @K.symbolic def get_updates(self, loss, params): updates = super(NewOptimizer, self).get_updates(loss, params) k, alpha = self.steps_per_slow_update, self.slow_step_size cond = K.equal(self.iterations % k, 0) slow_vars = [ K.zeros( K.int_shape(p), dtype=K.dtype(p), name='slow_var_%s' % i ) for i, p in enumerate(params) ] with tf.control_dependencies(updates): slow_updates = [ K.update(q, K.switch(cond, q + alpha * (p - q), q)) for p, q in zip(params, slow_vars) ] with tf.control_dependencies(slow_updates): copy_updates = [ K.update(p, K.switch(cond, q, p)) for p, q in zip(params, slow_vars) ] return copy_updates def get_config(self): config = { 'steps_per_slow_update': self.steps_per_slow_update, 'slow_step_size': self.slow_step_size, } base_config = super(NewOptimizer, self).get_config() return dict(list(base_config.items()) + list(config.items())) return NewOptimizer @export_to_custom_objects def extend_with_lookahead_v2(BaseOptimizer): """返回新的优化器类,加入look ahead """ class NewOptimizer(BaseOptimizer): """带有look ahead的优化器 https://arxiv.org/abs/1907.08610 steps_per_slow_update: 即论文中的k; slow_step_size: 即论文中的alpha。 """ @insert_arguments(steps_per_slow_update=5, slow_step_size=0.5) def __init__(self, *args, **kwargs): super(NewOptimizer, self).__init__(*args, **kwargs) def _create_slots(self, var_list): super(NewOptimizer, self)._create_slots(var_list) for var in var_list: self.add_slot(var, 'slow_var') def _resource_apply(self, grad, var, indices=None): op = super(NewOptimizer, self)._resource_apply(grad, var, indices) k, alpha = self.steps_per_slow_update, self.slow_step_size cond = K.equal(self.iterations % k, 0) slow_var = self.get_slot(var, 'slow_var') slow_var_t = slow_var + alpha * (var - slow_var) with tf.control_dependencies([op]): slow_update = K.update( slow_var, K.switch(cond, slow_var_t, slow_var) ) with tf.control_dependencies([slow_update]): copy_update = K.update(var, K.switch(cond, slow_var, var)) return copy_update def get_config(self): config = { 'steps_per_slow_update': self.steps_per_slow_update, 'slow_step_size': self.slow_step_size, } base_config = super(NewOptimizer, self).get_config() return dict(list(base_config.items()) + list(config.items())) return NewOptimizer @export_to_custom_objects def extend_with_lazy_optimization(BaseOptimizer): """返回新的优化器类,加入懒惰更新 """ class NewOptimizer(BaseOptimizer): """带有懒惰更新的优化器 使得部分权重(尤其是embedding)只有在梯度不等于0时 才发生更新。 """ @insert_arguments(include_in_lazy_optimization=[]) def __init__(self, *args, **kwargs): super(NewOptimizer, self).__init__(*args, **kwargs) self._first_get_gradients = True def get_gradients(self, loss, params): if self._first_get_gradients: self._first_get_gradients = False return super(NewOptimizer, self).get_gradients(loss, params) else: return [self.grads[p] for p in params] @K.symbolic def get_updates(self, loss, params): self.grads = dict(zip(params, self.get_gradients(loss, params))) old_update = K.update def new_update(x, new_x): if is_one_of(x, params) and self._do_lazy_optimization(x): g = self.grads[x] r = K.any(K.not_equal(g, 0.0), axis=-1, keepdims=True) new_x = x + (new_x - x) * K.cast(r, K.floatx()) return old_update(x, new_x) K.update = new_update updates = super(NewOptimizer, self).get_updates(loss, params) K.update = old_update return updates def _do_lazy_optimization(self, w): return string_matching(w.name, self.include_in_lazy_optimization) def get_config(self): config = { 'include_in_lazy_optimization': self.include_in_lazy_optimization, } base_config = super(NewOptimizer, self).get_config() return dict(list(base_config.items()) + list(config.items())) return NewOptimizer @export_to_custom_objects def extend_with_lazy_optimization_v2(BaseOptimizer): """返回新的优化器类,加入懒惰更新 """ class NewOptimizer(BaseOptimizer): """带有懒惰更新的优化器 使得部分权重(尤其是embedding)只有在梯度不等于0时 才发生更新。 """ @insert_arguments(include_in_lazy_optimization=[]) def __init__(self, *args, **kwargs): super(NewOptimizer, self).__init__(*args, **kwargs) def _resource_apply(self, grad, var, indices=None): old_update = K.update def new_update(x, new_x): if x is var and self._do_lazy_optimization(x): if indices is None: r = K.any( K.not_equal(grad, 0.0), axis=-1, keepdims=True ) new_x = x + (new_x - x) * K.cast(r, K.floatx()) return old_update(x, new_x) else: return self._resource_scatter_add( x, indices, K.gather(new_x - x, indices) ) return old_update(x, new_x) K.update = new_update op = super(NewOptimizer, self)._resource_apply(grad, var, indices) K.update = old_update return op def _do_lazy_optimization(self, w): return string_matching(w.name, self.include_in_lazy_optimization) def get_config(self): config = { 'include_in_lazy_optimization': self.include_in_lazy_optimization, } base_config = super(NewOptimizer, self).get_config() return dict(list(base_config.items()) + list(config.items())) return NewOptimizer @export_to_custom_objects def extend_with_exponential_moving_average(BaseOptimizer): """返回新的优化器类,加入EMA(权重滑动平均) """ class NewOptimizer(BaseOptimizer): """带EMA(权重滑动平均)的优化器 """ @insert_arguments(ema_momentum=0.999) def __init__(self, *args, **kwargs): super(NewOptimizer, self).__init__(*args, **kwargs) def get_updates(self, loss, params): updates = super(NewOptimizer, self).get_updates(loss, params) self.model_weights = params self.ema_weights = [K.zeros(K.shape(w)) for w in params] self.old_weights = K.batch_get_value(params) K.batch_set_value(zip(self.ema_weights, self.old_weights)) ema_updates, ema_momentum = [], self.ema_momentum with tf.control_dependencies(updates): for w1, w2 in zip(self.ema_weights, params): new_w = ema_momentum * w1 + (1 - ema_momentum) * w2 ema_updates.append(K.update(w1, new_w)) return ema_updates def get_config(self): config = { 'ema_momentum': self.ema_momentum, } base_config = super(NewOptimizer, self).get_config() return dict(list(base_config.items()) + list(config.items())) def apply_ema_weights(self): """备份原模型权重,然后将平均权重应用到模型上去。 """ self.old_weights = K.batch_get_value(self.model_weights) ema_weights = K.batch_get_value(self.ema_weights) K.batch_set_value(zip(self.model_weights, ema_weights)) def reset_old_weights(self): """恢复模型到旧权重。 """ K.batch_set_value(zip(self.model_weights, self.old_weights)) return NewOptimizer @export_to_custom_objects def extend_with_gradient_centralization(BaseOptimizer): """返回新的优化器类,将梯度零中心化 """ class NewOptimizer(BaseOptimizer): """带梯度零中心化的优化器 """ def get_gradients(self, loss, params): grads = [] for g in super(NewOptimizer, self).get_gradients(loss, params): if isinstance(g, tf.IndexedSlices): g = tf.convert_to_tensor(g) if K.ndim(g) > 1: g = g - K.mean(g, axis=range(1, K.ndim(g)), keepdims=True) grads.append(g) return grads return NewOptimizer if is_tf_keras: extend_with_weight_decay = extend_with_weight_decay_v2 extend_with_layer_adaptation = extend_with_layer_adaptation_v2 extend_with_piecewise_linear_lr = extend_with_piecewise_linear_lr_v2 extend_with_gradient_accumulation = extend_with_gradient_accumulation_v2 extend_with_lookahead = extend_with_lookahead_v2 extend_with_lazy_optimization = extend_with_lazy_optimization_v2 AdaFactor = AdaFactorV2 else: Adam = keras.optimizers.Adam AdaFactor = AdaFactorV1 custom_objects = { 'Adam': Adam, 'AdaFactor': AdaFactor, } keras.utils.get_custom_objects().update(custom_objects)
34,935
34.360324
83
py
BiRTE
BiRTE-main/bert4keras/tokenizers.py
#! -*- coding: utf-8 -*- # 工具函数 import unicodedata, re from bert4keras.snippets import is_string, is_py2 from bert4keras.snippets import open def load_vocab(dict_path, encoding='utf-8', simplified=False, startswith=None): """从bert的词典文件中读取词典 """ token_dict = {} with open(dict_path, encoding=encoding) as reader: for line in reader: token = line.strip() token_dict[token] = len(token_dict) if simplified: # 过滤冗余部分token new_token_dict, keep_tokens = {}, [] startswith = startswith or [] for t in startswith: new_token_dict[t] = len(new_token_dict) keep_tokens.append(token_dict[t]) for t, _ in sorted(token_dict.items(), key=lambda s: s[1]): if t not in new_token_dict: keep = True if len(t) > 1: for c in Tokenizer.stem(t): if ( Tokenizer._is_cjk_character(c) or Tokenizer._is_punctuation(c) ): keep = False break if keep: new_token_dict[t] = len(new_token_dict) keep_tokens.append(token_dict[t]) return new_token_dict, keep_tokens else: return token_dict class BasicTokenizer(object): """分词器基类 """ def __init__(self, token_start='[CLS]', token_end='[SEP]'): """初始化 """ self._token_pad = '[PAD]' self._token_unk = '[UNK]' self._token_mask = '[MASK]' self._token_start = token_start self._token_end = token_end def tokenize(self, text, max_length=None,mode="BERT"): """分词函数 """ tokens = self._tokenize(text,mode) if self._token_start is not None: tokens.insert(0, self._token_start) if self._token_end is not None: tokens.append(self._token_end) if max_length is not None: index = int(self._token_end is not None) + 1 self.truncate_sequence(max_length, tokens, None, -index) return tokens def token_to_id(self, token): """token转换为对应的id """ raise NotImplementedError def tokens_to_ids(self, tokens): """token序列转换为对应的id序列 """ return [self.token_to_id(token) for token in tokens] def truncate_sequence( self, max_length, first_sequence, second_sequence=None, pop_index=-1 ): """截断总长度 """ if second_sequence is None: second_sequence = [] while True: total_length = len(first_sequence) + len(second_sequence) if total_length <= max_length: break elif len(first_sequence) > len(second_sequence): first_sequence.pop(pop_index) else: second_sequence.pop(pop_index) def encode( self, first_text, second_text=None, max_length=None, first_length=None, second_length=None, mode="BERT" ): """输出文本对应token id和segment id 如果传入first_length,则强行padding第一个句子到指定长度; 同理,如果传入second_length,则强行padding第二个句子到指定长度。 """ if is_string(first_text): first_tokens = self.tokenize(first_text,mode=mode) else: first_tokens = first_text if second_text is None: second_tokens = None elif is_string(second_text): idx = int(bool(self._token_start)) second_tokens = self.tokenize(second_text,mode=mode)[idx:] else: second_tokens = second_text if max_length is not None: self.truncate_sequence(max_length, first_tokens, second_tokens, -2) first_token_ids = self.tokens_to_ids(first_tokens) if first_length is not None: first_token_ids = first_token_ids[:first_length] first_token_ids.extend([self._token_pad_id] * (first_length - len(first_token_ids))) first_segment_ids = [0] * len(first_token_ids) if second_text is not None: second_token_ids = self.tokens_to_ids(second_tokens) if second_length is not None: second_token_ids = second_token_ids[:second_length] second_token_ids.extend([self._token_pad_id] * (second_length - len(second_token_ids))) second_segment_ids = [1] * len(second_token_ids) first_token_ids.extend(second_token_ids) first_segment_ids.extend(second_segment_ids) first_mask_ids=[1]*len(first_segment_ids) return first_token_ids, first_segment_ids, first_mask_ids def id_to_token(self, i): """id序列为对应的token """ raise NotImplementedError def ids_to_tokens(self, ids): """id序列转换为对应的token序列 """ return [self.id_to_token(i) for i in ids] def decode(self, ids): """转为可读文本 """ raise NotImplementedError def _tokenize(self, text): """基本分词函数 """ raise NotImplementedError class Tokenizer(BasicTokenizer): """Bert原生分词器 纯Python实现,代码修改自keras_bert的tokenizer实现 """ def __init__(self, token_dict, do_lower_case=False, *args, **kwargs): """初始化 """ super(Tokenizer, self).__init__(*args, **kwargs) if is_string(token_dict): token_dict = load_vocab(token_dict) self._do_lower_case = do_lower_case self._token_dict = token_dict self._token_dict_inv = {v: k for k, v in token_dict.items()} self._vocab_size = len(token_dict) for token in ['pad', 'unk', 'mask', 'start', 'end']: try: _token_id = token_dict[getattr(self, '_token_%s' % token)] setattr(self, '_token_%s_id' % token, _token_id) except: pass def token_to_id(self, token): """token转换为对应的id """ return self._token_dict.get(token, self._token_unk_id) def id_to_token(self, i): """id转换为对应的token """ return self._token_dict_inv[i] def decode(self, ids, tokens=None): """转为可读文本 """ tokens = tokens or self.ids_to_tokens(ids) tokens = [token for token in tokens if not self._is_special(token)] text, flag = '', False for i, token in enumerate(tokens): if token[:2] == '##': text += token[2:] elif len(token) == 1 and self._is_cjk_character(token): text += token elif len(token) == 1 and self._is_punctuation(token): text += token text += ' ' elif i > 0 and self._is_cjk_character(text[-1]): text += token else: text += ' ' text += token text = re.sub(' +', ' ', text) text = re.sub('\' (re|m|s|t|ve|d|ll) ', '\'\\1 ', text) punctuation = self._cjk_punctuation() + '+-/={(<[' punctuation_regex = '|'.join([re.escape(p) for p in punctuation]) punctuation_regex = '(%s) ' % punctuation_regex text = re.sub(punctuation_regex, '\\1', text) text = re.sub('(\d\.) (\d)', '\\1\\2', text) return text.strip() def _tokenize(self, text,mode="BERT"): """基本分词函数 """ mode=mode.upper() assert mode in ['BERT','TOKEN'] if self._do_lower_case: if is_py2: text = unicode(text) #if mode=="BERT": text = unicodedata.normalize('NFD', text) text = ''.join([ ch for ch in text if unicodedata.category(ch) != 'Mn' ]) text = text.lower() spaced = '' for ch in text: if self._is_punctuation(ch) or self._is_cjk_character(ch): spaced += ' ' + ch + ' ' elif self._is_space(ch): spaced += ' ' elif ord(ch) == 0 or ord(ch) == 0xfffd or self._is_control(ch): continue elif mode=="BERT": spaced += ch else: spaced += ' ' + ch + ' ' tokens = [] for word in spaced.strip().split(): tokens.extend(self._word_piece_tokenize(word)) return tokens def _word_piece_tokenize(self, word): """word内分成subword """ if word in self._token_dict: return [word] tokens = [] start, stop = 0, 0 while start < len(word): stop = len(word) while stop > start: sub = word[start:stop] if start > 0: sub = '##' + sub if sub in self._token_dict: break stop -= 1 if start == stop: stop += 1 tokens.append(sub) start = stop return tokens @staticmethod def stem(token): """获取token的“词干”(如果是##开头,则自动去掉##) """ if token[:2] == '##': return token[2:] else: return token @staticmethod def _is_space(ch): """空格类字符判断 """ return ch == ' ' or ch == '\n' or ch == '\r' or ch == '\t' or \ unicodedata.category(ch) == 'Zs' @staticmethod def _is_punctuation(ch): """标点符号类字符判断(全/半角均在此内) 提醒:unicodedata.category这个函数在py2和py3下的 表现可能不一样,比如u'§'字符,在py2下的结果为'So', 在py3下的结果是'Po'。 """ code = ord(ch) return 33 <= code <= 47 or \ 58 <= code <= 64 or \ 91 <= code <= 96 or \ 123 <= code <= 126 or \ unicodedata.category(ch).startswith('P') @staticmethod def _cjk_punctuation(): return u'\uff02\uff03\uff04\uff05\uff06\uff07\uff08\uff09\uff0a\uff0b\uff0c\uff0d\uff0f\uff1a\uff1b\uff1c\uff1d\uff1e\uff20\uff3b\uff3c\uff3d\uff3e\uff3f\uff40\uff5b\uff5c\uff5d\uff5e\uff5f\uff60\uff62\uff63\uff64\u3000\u3001\u3003\u3008\u3009\u300a\u300b\u300c\u300d\u300e\u300f\u3010\u3011\u3014\u3015\u3016\u3017\u3018\u3019\u301a\u301b\u301c\u301d\u301e\u301f\u3030\u303e\u303f\u2013\u2014\u2018\u2019\u201b\u201c\u201d\u201e\u201f\u2026\u2027\ufe4f\ufe51\ufe54\xb7\uff01\uff1f\uff61\u3002' @staticmethod def _is_cjk_character(ch): """CJK类字符判断(包括中文字符也在此列) 参考:https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) """ code = ord(ch) return 0x4E00 <= code <= 0x9FFF or \ 0x3400 <= code <= 0x4DBF or \ 0x20000 <= code <= 0x2A6DF or \ 0x2A700 <= code <= 0x2B73F or \ 0x2B740 <= code <= 0x2B81F or \ 0x2B820 <= code <= 0x2CEAF or \ 0xF900 <= code <= 0xFAFF or \ 0x2F800 <= code <= 0x2FA1F @staticmethod def _is_control(ch): """控制类字符判断 """ return unicodedata.category(ch) in ('Cc', 'Cf') @staticmethod def _is_special(ch): """判断是不是有特殊含义的符号 """ return bool(ch) and (ch[0] == '[') and (ch[-1] == ']') def rematch(self, text, tokens): """给出原始的text和tokenize后的tokens的映射关系 """ if is_py2: text = unicode(text) if self._do_lower_case: text = text.lower() normalized_text, char_mapping = '', [] for i, ch in enumerate(text): if self._do_lower_case: ch = unicodedata.normalize('NFD', ch) ch = ''.join([c for c in ch if unicodedata.category(c) != 'Mn']) ch = ''.join([ c for c in ch if not (ord(c) == 0 or ord(c) == 0xfffd or self._is_control(c)) ]) normalized_text += ch char_mapping.extend([i] * len(ch)) text, token_mapping, offset = normalized_text, [], 0 for token in tokens: if self._is_special(token): token_mapping.append([]) else: token = self.stem(token) start = text[offset:].index(token) + offset end = start + len(token) token_mapping.append(char_mapping[start:end]) offset = end return token_mapping def subTokens2Token(self,tokens): ''' 给出分词结果,返回每个token对应的大的范围 :param tokens: :return: ''' mapping = [] last_word = [] for i, token in enumerate(tokens): if token.startswith("##"): assert last_word mapping.append(last_word) else: last_word = [] if i != len(tokens) - 1 and tokens[i + 1].startswith("##"): # 更新last_word last_word.append(i) j = i while j != len(tokens) - 1 and tokens[j + 1].startswith("##"): last_word.append(j + 1) j += 1 if last_word: mapping.append(last_word) else: mapping.append([i]) assert len(mapping) == len(tokens) return mapping class SpTokenizer(BasicTokenizer): """基于SentencePiece模型的封装,使用上跟Tokenizer基本一致。 """ def __init__(self, sp_model_path, *args, **kwargs): super(SpTokenizer, self).__init__(*args, **kwargs) import sentencepiece as spm self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(sp_model_path) self._token_pad = self.sp_model.id_to_piece(self.sp_model.pad_id()) self._token_unk = self.sp_model.id_to_piece(self.sp_model.unk_id()) self._vocab_size = self.sp_model.get_piece_size() for token in ['pad', 'unk', 'mask', 'start', 'end']: try: _token = getattr(self, '_token_%s' % token) _token_id = self.sp_model.piece_to_id(_token) setattr(self, '_token_%s_id' % token, _token_id) except: pass def token_to_id(self, token): """token转换为对应的id """ return self.sp_model.piece_to_id(token) def id_to_token(self, i): """id转换为对应的token """ if i < self._vocab_size: return self.sp_model.id_to_piece(i) else: return '' def decode(self, ids): """转为可读文本 """ ids = [i for i in ids if self._is_decodable(i)] text = self.sp_model.decode_ids(ids) return text.decode('utf-8') if is_py2 else text def _tokenize(self, text): """基本分词函数 """ tokens = self.sp_model.encode_as_pieces(text) return tokens def _is_special(self, i): """判断是不是有特殊含义的符号 """ return self.sp_model.is_control(i) or \ self.sp_model.is_unknown(i) or \ self.sp_model.is_unused(i) def _is_decodable(self, i): """判断是否应该被解码输出 """ return (i < self._vocab_size) and not self._is_special(i)
15,186
31.450855
502
py
BiRTE
BiRTE-main/bert4keras/layers.py
#! -*- coding: utf-8 -*- # 自定义层 import numpy as np import tensorflow as tf from bert4keras.backend import keras, K from bert4keras.backend import search_layer from bert4keras.backend import sequence_masking from bert4keras.backend import pool1d from bert4keras.backend import divisible_temporal_padding from bert4keras.snippets import is_string from keras import initializers, activations from keras.layers import * def integerize_shape(func): """装饰器,保证input_shape一定是int或None """ def convert(item): if hasattr(item, '__iter__'): return [convert(i) for i in item] elif hasattr(item, 'value'): return item.value else: return item def new_func(self, input_shape): input_shape = convert(input_shape) return func(self, input_shape) return new_func if keras.__version__[-2:] != 'tf' and keras.__version__ < '2.3': class Layer(keras.layers.Layer): """重新定义Layer,赋予“层中层”功能 (仅keras 2.3以下版本需要) """ def __init__(self, **kwargs): super(Layer, self).__init__(**kwargs) self.supports_masking = True # 本项目的自定义层均可mask def __setattr__(self, name, value): if isinstance(value, keras.layers.Layer): if not hasattr(self, '_layers'): self._layers = [] if value not in self._layers: self._layers.append(value) super(Layer, self).__setattr__(name, value) @property def trainable_weights(self): trainable = getattr(self, 'trainable', True) if trainable: trainable_weights = super(Layer, self).trainable_weights[:] for l in getattr(self, '_layers', []): trainable_weights += l.trainable_weights return trainable_weights else: return [] @property def non_trainable_weights(self): trainable = getattr(self, 'trainable', True) non_trainable_weights = super(Layer, self).non_trainable_weights[:] for l in getattr(self, '_layers', []): if trainable: non_trainable_weights += l.non_trainable_weights else: non_trainable_weights += l.weights return non_trainable_weights else: class Layer(keras.layers.Layer): def __init__(self, **kwargs): super(Layer, self).__init__(**kwargs) self.supports_masking = True # 本项目的自定义层均可mask class Embedding(keras.layers.Embedding): """为了适配T5,对Embedding的Mask做特殊处理 """ def compute_mask(self, inputs, mask=None): """保证第一个token不被mask """ mask = super(Embedding, self).compute_mask(inputs, mask) if mask is not None: mask1 = K.ones_like(mask[:, :1], dtype='bool') mask2 = mask[:, 1:] return K.concatenate([mask1, mask2], 1) class MultiHeadAttention(Layer): """多头注意力机制 """ def __init__( self, heads, head_size, key_size=None, use_bias=True, scaled_dot_product=True, kernel_initializer='glorot_uniform', **kwargs ): super(MultiHeadAttention, self).__init__(**kwargs) self.heads = heads self.head_size = head_size self.out_dim = heads * head_size self.key_size = key_size or head_size self.use_bias = use_bias self.scaled_dot_product = scaled_dot_product self.kernel_initializer = initializers.get(kernel_initializer) def build(self, input_shape): super(MultiHeadAttention, self).build(input_shape) self.q_dense = Dense( units=self.key_size * self.heads, use_bias=self.use_bias, kernel_initializer=self.kernel_initializer ) self.k_dense = Dense( units=self.key_size * self.heads, use_bias=self.use_bias, kernel_initializer=self.kernel_initializer ) self.v_dense = Dense( units=self.out_dim, use_bias=self.use_bias, kernel_initializer=self.kernel_initializer ) self.o_dense = Dense( units=self.out_dim, use_bias=self.use_bias, kernel_initializer=self.kernel_initializer ) def call(self, inputs, mask=None, a_mask=None, p_bias=None): """实现多头注意力 q_mask: 对输入的query序列的mask。 主要是将输出结果的padding部分置0。 v_mask: 对输入的value序列的mask。 主要是防止attention读取到padding信息。 a_mask: 对attention矩阵的mask。 不同的attention mask对应不同的应用。 p_bias: 在attention里的位置偏置。 一般用来指定相对位置编码的种类。 """ q, k, v = inputs[:3] q_mask, v_mask, n = None, None, 3 if mask is not None: if mask[0] is not None: q_mask = K.cast(mask[0], K.floatx()) if mask[2] is not None: v_mask = K.cast(mask[2], K.floatx()) if a_mask: a_mask = inputs[n] n += 1 # 线性变换 qw = self.q_dense(q) kw = self.k_dense(k) vw = self.v_dense(v) # 形状变换 qw = K.reshape(qw, (-1, K.shape(q)[1], self.heads, self.key_size)) kw = K.reshape(kw, (-1, K.shape(k)[1], self.heads, self.key_size)) vw = K.reshape(vw, (-1, K.shape(v)[1], self.heads, self.head_size)) # Attention a = tf.einsum('bjhd,bkhd->bhjk', qw, kw) # 处理位置编码 if p_bias == 'typical_relative': pos_embeddings = inputs[n] a = a + tf.einsum('bjhd,jkd->bhjk', qw, pos_embeddings) elif p_bias == 't5_relative': pos_embeddings = K.permute_dimensions(inputs[n], (2, 0, 1)) a = a + K.expand_dims(pos_embeddings, 0) # Attention(续) if self.scaled_dot_product: a = a / self.key_size**0.5 a = sequence_masking(a, v_mask, 1, -1) if a_mask is not None: a = a - (1 - a_mask) * 1e12 a = K.softmax(a) # 完成输出 o = tf.einsum('bhjk,bkhd->bjhd', a, vw) if p_bias == 'typical_relative': o = o + tf.einsum('bhjk,jkd->bjhd', a, pos_embeddings) o = K.reshape(o, (-1, K.shape(o)[1], self.out_dim)) o = self.o_dense(o) # 返回结果 o = sequence_masking(o, q_mask, 0) return o def compute_output_shape(self, input_shape): return (input_shape[0][0], input_shape[0][1], self.out_dim) def compute_mask(self, inputs, mask): return mask[0] def get_config(self): config = { 'heads': self.heads, 'head_size': self.head_size, 'key_size': self.key_size, 'use_bias': self.use_bias, 'scaled_dot_product': self.scaled_dot_product, 'kernel_initializer': initializers.serialize(self.kernel_initializer), } base_config = super(MultiHeadAttention, self).get_config() return dict(list(base_config.items()) + list(config.items())) class LayerNormalization(Layer): """(Conditional) Layer Normalization hidden_*系列参数仅为有条件输入时(conditional=True)使用 """ def __init__( self, center=True, scale=True, epsilon=None, conditional=False, hidden_units=None, hidden_activation='linear', hidden_initializer='glorot_uniform', **kwargs ): super(LayerNormalization, self).__init__(**kwargs) self.center = center self.scale = scale self.conditional = conditional self.hidden_units = hidden_units self.hidden_activation = activations.get(hidden_activation) self.hidden_initializer = initializers.get(hidden_initializer) self.epsilon = epsilon or 1e-12 def build(self, input_shape): super(LayerNormalization, self).build(input_shape) if self.conditional: shape = (input_shape[0][-1],) else: shape = (input_shape[-1],) if self.center: self.beta = self.add_weight( shape=shape, initializer='zeros', name='beta' ) if self.scale: self.gamma = self.add_weight( shape=shape, initializer='ones', name='gamma' ) if self.conditional: if self.hidden_units is not None: self.hidden_dense = Dense( units=self.hidden_units, activation=self.hidden_activation, use_bias=False, kernel_initializer=self.hidden_initializer ) if self.center: self.beta_dense = Dense( units=shape[0], use_bias=False, kernel_initializer='zeros' ) if self.scale: self.gamma_dense = Dense( units=shape[0], use_bias=False, kernel_initializer='zeros' ) def call(self, inputs): """如果是条件Layer Norm,则默认以list为输入,第二个是condition """ if self.conditional: inputs, cond = inputs if self.hidden_units is not None: cond = self.hidden_dense(cond) for _ in range(K.ndim(inputs) - K.ndim(cond)): cond = K.expand_dims(cond, 1) if self.center: beta = self.beta_dense(cond) + self.beta if self.scale: gamma = self.gamma_dense(cond) + self.gamma else: if self.center: beta = self.beta if self.scale: gamma = self.gamma outputs = inputs if self.center: mean = K.mean(outputs, axis=-1, keepdims=True) outputs = outputs - mean if self.scale: variance = K.mean(K.square(outputs), axis=-1, keepdims=True) std = K.sqrt(variance + self.epsilon) outputs = outputs / std outputs = outputs * gamma if self.center: outputs = outputs + beta return outputs def compute_output_shape(self, input_shape): if self.conditional: return input_shape[0] else: return input_shape def get_config(self): config = { 'center': self.center, 'scale': self.scale, 'epsilon': self.epsilon, 'conditional': self.conditional, 'hidden_units': self.hidden_units, 'hidden_activation': activations.serialize(self.hidden_activation), 'hidden_initializer': initializers.serialize(self.hidden_initializer), } base_config = super(LayerNormalization, self).get_config() return dict(list(base_config.items()) + list(config.items())) class PositionEmbedding(Layer): """定义位置Embedding,这里的Embedding是可训练的。 """ def __init__( self, input_dim, output_dim, merge_mode='add', embeddings_initializer='zeros', custom_position_ids=False, **kwargs ): super(PositionEmbedding, self).__init__(**kwargs) self.input_dim = input_dim self.output_dim = output_dim self.merge_mode = merge_mode self.embeddings_initializer = initializers.get(embeddings_initializer) self.custom_position_ids = custom_position_ids def build(self, input_shape): super(PositionEmbedding, self).build(input_shape) self.embeddings = self.add_weight( name='embeddings', shape=(self.input_dim, self.output_dim), initializer=self.embeddings_initializer ) def call(self, inputs): """如果custom_position_ids,那么第二个输入为自定义的位置id """ if self.custom_position_ids: inputs, position_ids = inputs if K.dtype(position_ids) != 'int32': position_ids = K.cast(position_ids, 'int32') pos_embeddings = K.gather(self.embeddings, position_ids) else: input_shape = K.shape(inputs) batch_size, seq_len = input_shape[0], input_shape[1] pos_embeddings = self.embeddings[:seq_len] pos_embeddings = K.expand_dims(pos_embeddings, 0) if self.merge_mode != 'add': pos_embeddings = K.tile(pos_embeddings, [batch_size, 1, 1]) if self.merge_mode == 'add': return inputs + pos_embeddings else: return K.concatenate([inputs, pos_embeddings]) def compute_output_shape(self, input_shape): if self.custom_position_ids: input_shape = input_shape[0] if self.merge_mode == 'add': return input_shape else: return input_shape[:2] + (input_shape[2] + self.output_dim,) def get_config(self): config = { 'input_dim': self.input_dim, 'output_dim': self.output_dim, 'merge_mode': self.merge_mode, 'embeddings_initializer': initializers.serialize(self.embeddings_initializer), 'custom_position_ids': self.custom_position_ids, } base_config = super(PositionEmbedding, self).get_config() return dict(list(base_config.items()) + list(config.items())) class RelativePositionEmbedding(Layer): """相对位置编码 来自论文:https://arxiv.org/abs/1803.02155 """ def __init__( self, input_dim, output_dim, embeddings_initializer='zeros', **kwargs ): super(RelativePositionEmbedding, self).__init__(**kwargs) self.input_dim = input_dim self.output_dim = output_dim self.embeddings_initializer = initializers.get(embeddings_initializer) def build(self, input_shape): super(RelativePositionEmbedding, self).build(input_shape) self.embeddings = self.add_weight( name='embeddings', shape=(self.input_dim, self.output_dim), initializer=self.embeddings_initializer, ) def call(self, inputs): pos_ids = self.compute_position_ids(inputs) return K.gather(self.embeddings, pos_ids) def compute_position_ids(self, inputs): q, v = inputs # 计算位置差 q_idxs = K.arange(0, K.shape(q)[1], dtype='int32') q_idxs = K.expand_dims(q_idxs, 1) v_idxs = K.arange(0, K.shape(v)[1], dtype='int32') v_idxs = K.expand_dims(v_idxs, 0) pos_ids = v_idxs - q_idxs # 后处理操作 max_position = (self.input_dim - 1) // 2 pos_ids = K.clip(pos_ids, -max_position, max_position) pos_ids = pos_ids + max_position return pos_ids def compute_output_shape(self, input_shape): return (None, None, self.output_dim) def compute_mask(self, inputs, mask): return mask[0] def get_config(self): config = { 'input_dim': self.input_dim, 'output_dim': self.output_dim, 'embeddings_initializer': initializers.serialize(self.embeddings_initializer), } base_config = super(RelativePositionEmbedding, self).get_config() return dict(list(base_config.items()) + list(config.items())) class RelativePositionEmbeddingT5(RelativePositionEmbedding): """Google T5的相对位置编码 来自论文:https://arxiv.org/abs/1910.10683 """ def __init__( self, input_dim, output_dim, max_distance=128, bidirectional=True, embeddings_initializer='zeros', **kwargs ): super(RelativePositionEmbeddingT5, self).__init__(input_dim, output_dim, **kwargs) self.max_distance = max_distance self.bidirectional = bidirectional def compute_position_ids(self, inputs): """T5的相对位置分桶(直接翻译自官方T5源码) """ q, v = inputs # 计算位置差 q_idxs = K.arange(0, K.shape(q)[1], dtype='int32') q_idxs = K.expand_dims(q_idxs, 1) v_idxs = K.arange(0, K.shape(v)[1], dtype='int32') v_idxs = K.expand_dims(v_idxs, 0) pos_ids = v_idxs - q_idxs # 后处理操作 num_buckets, max_distance = self.input_dim, self.max_distance ret = 0 n = -pos_ids if self.bidirectional: num_buckets //= 2 ret += K.cast(K.less(n, 0), 'int32') * num_buckets n = K.abs(n) else: n = K.maximum(n, 0) # now n is in the range [0, inf) max_exact = num_buckets // 2 is_small = K.less(n, max_exact) val_if_large = max_exact + K.cast( K.log(K.cast(n, K.floatx()) / max_exact) / np.log(max_distance / max_exact) * (num_buckets - max_exact), 'int32', ) val_if_large = K.minimum(val_if_large, num_buckets - 1) ret += K.switch(is_small, n, val_if_large) return ret def get_config(self): config = { 'max_distance': self.max_distance, 'bidirectional': self.bidirectional, } base_config = super(RelativePositionEmbeddingT5, self).get_config() return dict(list(base_config.items()) + list(config.items())) class FeedForward(Layer): """FeedForward层,其实就是两个Dense层的叠加 """ def __init__( self, units, activation='relu', use_bias=True, kernel_initializer='glorot_uniform', **kwargs ): super(FeedForward, self).__init__(**kwargs) self.units = units self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) @integerize_shape def build(self, input_shape): super(FeedForward, self).build(input_shape) output_dim = input_shape[-1] self.dense_1 = Dense( units=self.units, activation=self.activation, use_bias=self.use_bias, kernel_initializer=self.kernel_initializer ) self.dense_2 = Dense( units=output_dim, use_bias=self.use_bias, kernel_initializer=self.kernel_initializer ) def call(self, inputs): x = inputs x = self.dense_1(x) x = self.dense_2(x) return x def get_config(self): config = { 'units': self.units, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), } base_config = super(FeedForward, self).get_config() return dict(list(base_config.items()) + list(config.items())) class EmbeddingDense(Layer): """运算跟Dense一致,但kernel用Embedding层的embeddings矩阵。 根据Embedding层的名字来搜索定位Embedding层。 """ def __init__( self, embedding_name, activation='softmax', use_bias=True, **kwargs ): super(EmbeddingDense, self).__init__(**kwargs) self.embedding_name = embedding_name self.activation = activations.get(activation) self.use_bias = use_bias def call(self, inputs): if not hasattr(self, 'kernel'): embedding_layer = search_layer(inputs, self.embedding_name) if embedding_layer is None: raise Exception('Embedding layer not found') self.kernel = K.transpose(embedding_layer.embeddings) self.units = K.int_shape(self.kernel)[1] if self.use_bias: self.bias = self.add_weight( name='bias', shape=(self.units,), initializer='zeros' ) outputs = K.dot(inputs, self.kernel) if self.use_bias: outputs = K.bias_add(outputs, self.bias) outputs = self.activation(outputs) return outputs def compute_output_shape(self, input_shape): return input_shape[:-1] + (self.units,) def get_config(self): config = { 'embedding_name': self.embedding_name, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, } base_config = super(EmbeddingDense, self).get_config() return dict(list(base_config.items()) + list(config.items())) class ConditionalRandomField(Layer): """纯Keras实现CRF层 CRF层本质上是一个带训练参数的loss计算层。 """ def __init__(self, lr_multiplier=1, **kwargs): super(ConditionalRandomField, self).__init__(**kwargs) self.lr_multiplier = lr_multiplier # 当前层学习率的放大倍数 @integerize_shape def build(self, input_shape): super(ConditionalRandomField, self).build(input_shape) output_dim = input_shape[-1] self.trans = self.add_weight( name='trans', shape=(output_dim, output_dim), initializer='glorot_uniform', trainable=True ) if self.lr_multiplier != 1: K.set_value(self.trans, K.eval(self.trans) / self.lr_multiplier) self.trans = self.lr_multiplier * self.trans def compute_mask(self, inputs, mask=None): return None def call(self, inputs, mask=None): if mask is not None: mask = K.cast(mask, K.floatx()) return sequence_masking(inputs, mask, 1, 1) def target_score(self, y_true, y_pred): """计算目标路径的相对概率(还没有归一化) 要点:逐标签得分,加上转移概率得分。 """ point_score = tf.einsum('bni,bni->b', y_true, y_pred) # 逐标签得分 trans_score = tf.einsum( 'bni,ij,bnj->b', y_true[:, :-1], self.trans, y_true[:, 1:] ) # 标签转移得分 return point_score + trans_score def log_norm_step(self, inputs, states): """递归计算归一化因子 要点:1、递归计算;2、用logsumexp避免溢出。 """ inputs, mask = inputs[:, :-1], inputs[:, -1:] states = K.expand_dims(states[0], 2) # (batch_size, output_dim, 1) trans = K.expand_dims(self.trans, 0) # (1, output_dim, output_dim) outputs = tf.reduce_logsumexp( states + trans, 1 ) # (batch_size, output_dim) outputs = outputs + inputs outputs = mask * outputs + (1 - mask) * states[:, :, 0] return outputs, [outputs] def dense_loss(self, y_true, y_pred): """y_true需要是one hot形式 """ # 导出mask并转换数据类型 mask = K.all(K.greater(y_pred, -1e6), axis=2, keepdims=True) mask = K.cast(mask, K.floatx()) # 计算目标分数 y_true, y_pred = y_true * mask, y_pred * mask target_score = self.target_score(y_true, y_pred) # 递归计算log Z init_states = [y_pred[:, 0]] y_pred = K.concatenate([y_pred, mask], axis=2) input_length = K.int_shape(y_pred[:, 1:])[1] log_norm, _, _ = K.rnn( self.log_norm_step, y_pred[:, 1:], init_states, input_length=input_length ) # 最后一步的log Z向量 log_norm = tf.reduce_logsumexp(log_norm, 1) # logsumexp得标量 # 计算损失 -log p return log_norm - target_score def sparse_loss(self, y_true, y_pred): """y_true需要是整数形式(非one hot) """ # y_true需要重新明确一下shape和dtype y_true = K.reshape(y_true, K.shape(y_pred)[:-1]) y_true = K.cast(y_true, 'int32') # 转为one hot y_true = K.one_hot(y_true, K.shape(self.trans)[0]) return self.dense_loss(y_true, y_pred) def dense_accuracy(self, y_true, y_pred): """训练过程中显示逐帧准确率的函数,排除了mask的影响 此处y_true需要是one hot形式 """ y_true = K.argmax(y_true, 2) return self.sparse_accuracy(y_true, y_pred) def sparse_accuracy(self, y_true, y_pred): """训练过程中显示逐帧准确率的函数,排除了mask的影响 此处y_true需要是整数形式(非one hot) """ # 导出mask并转换数据类型 mask = K.all(K.greater(y_pred, -1e6), axis=2) mask = K.cast(mask, K.floatx()) # y_true需要重新明确一下shape和dtype y_true = K.reshape(y_true, K.shape(y_pred)[:-1]) y_true = K.cast(y_true, 'int32') # 逐标签取最大来粗略评测训练效果 y_pred = K.cast(K.argmax(y_pred, 2), 'int32') isequal = K.cast(K.equal(y_true, y_pred), K.floatx()) return K.sum(isequal * mask) / K.sum(mask) def get_config(self): config = { 'lr_multiplier': self.lr_multiplier, } base_config = super(ConditionalRandomField, self).get_config() return dict(list(base_config.items()) + list(config.items())) class MaximumEntropyMarkovModel(Layer): """(双向)最大熵隐马尔可夫模型 作用和用法都类似CRF,但是比CRF更快更简单。 """ def __init__(self, lr_multiplier=1, hidden_dim=None, **kwargs): super(MaximumEntropyMarkovModel, self).__init__(**kwargs) self.lr_multiplier = lr_multiplier # 当前层学习率的放大倍数 self.hidden_dim = hidden_dim # 如果非None,则将转移矩阵低秩分解 @integerize_shape def build(self, input_shape): super(MaximumEntropyMarkovModel, self).build(input_shape) output_dim = input_shape[-1] if self.hidden_dim is None: self.trans = self.add_weight( name='trans', shape=(output_dim, output_dim), initializer='glorot_uniform', trainable=True ) if self.lr_multiplier != 1: K.set_value(self.trans, K.eval(self.trans) / self.lr_multiplier) self.trans = self.lr_multiplier * self.trans else: self.l_trans = self.add_weight( name='l_trans', shape=(output_dim, self.hidden_dim), initializer='glorot_uniform', trainable=True ) self.r_trans = self.add_weight( name='r_trans', shape=(output_dim, self.hidden_dim), initializer='glorot_uniform', trainable=True ) if self.lr_multiplier != 1: K.set_value( self.l_trans, K.eval(self.l_trans) / self.lr_multiplier ) self.l_trans = self.lr_multiplier * self.l_trans K.set_value( self.r_trans, K.eval(self.r_trans) / self.lr_multiplier ) self.r_trans = self.lr_multiplier * self.r_trans def compute_mask(self, inputs, mask=None): return None def call(self, inputs, mask=None): if mask is not None: mask = K.cast(mask, K.floatx()) return sequence_masking(inputs, mask, 1, 1) def reverse_sequence(self, inputs, mask=None): if mask is None: return [x[:, ::-1] for x in inputs] else: length = K.cast(K.sum(mask, 1), 'int32') return [tf.reverse_sequence(x, length, seq_axis=1) for x in inputs] def basic_loss(self, y_true, y_pred, go_backwards=False): """y_true需要是整数形式(非one hot) """ # 导出mask并转换数据类型 mask = K.all(K.greater(y_pred, -1e6), axis=2) mask = K.cast(mask, K.floatx()) # y_true需要重新明确一下shape和dtype y_true = K.reshape(y_true, K.shape(y_pred)[:-1]) y_true = K.cast(y_true, 'int32') # 反转相关 if self.hidden_dim is None: if go_backwards: # 是否反转序列 y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask) trans = K.transpose(self.trans) else: trans = self.trans histoty = K.gather(trans, y_true) else: if go_backwards: # 是否反转序列 y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask) r_trans, l_trans = self.l_trans, self.r_trans else: l_trans, r_trans = self.l_trans, self.r_trans histoty = K.gather(l_trans, y_true) histoty = tf.einsum('bnd,kd->bnk', histoty, r_trans) # 计算loss histoty = K.concatenate([y_pred[:, :1], histoty[:, :-1]], 1) y_pred = (y_pred + histoty) / 2 loss = K.sparse_categorical_crossentropy( y_true, y_pred, from_logits=True ) return K.sum(loss * mask) / K.sum(mask) def sparse_loss(self, y_true, y_pred): """y_true需要是整数形式(非one hot) """ loss = self.basic_loss(y_true, y_pred, False) loss = loss + self.basic_loss(y_true, y_pred, True) return loss / 2 def dense_loss(self, y_true, y_pred): """y_true需要是one hot形式 """ y_true = K.argmax(y_true, 2) return self.sparse_loss(y_true, y_pred) def basic_accuracy(self, y_true, y_pred, go_backwards=False): """训练过程中显示逐帧准确率的函数,排除了mask的影响 此处y_true需要是整数形式(非one hot) """ # 导出mask并转换数据类型 mask = K.all(K.greater(y_pred, -1e6), axis=2) mask = K.cast(mask, K.floatx()) # y_true需要重新明确一下shape和dtype y_true = K.reshape(y_true, K.shape(y_pred)[:-1]) y_true = K.cast(y_true, 'int32') # 反转相关 if self.hidden_dim is None: if go_backwards: # 是否反转序列 y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask) trans = K.transpose(self.trans) else: trans = self.trans histoty = K.gather(trans, y_true) else: if go_backwards: # 是否反转序列 y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask) r_trans, l_trans = self.l_trans, self.r_trans else: l_trans, r_trans = self.l_trans, self.r_trans histoty = K.gather(l_trans, y_true) histoty = tf.einsum('bnd,kd->bnk', histoty, r_trans) # 计算逐标签accuracy histoty = K.concatenate([y_pred[:, :1], histoty[:, :-1]], 1) y_pred = (y_pred + histoty) / 2 y_pred = K.cast(K.argmax(y_pred, 2), 'int32') isequal = K.cast(K.equal(y_true, y_pred), K.floatx()) return K.sum(isequal * mask) / K.sum(mask) def sparse_accuracy(self, y_true, y_pred): """训练过程中显示逐帧准确率的函数,排除了mask的影响 此处y_true需要是整数形式(非one hot) """ accuracy = self.basic_accuracy(y_true, y_pred, False) accuracy = accuracy + self.basic_accuracy(y_true, y_pred, True) return accuracy / 2 def dense_accuracy(self, y_true, y_pred): """训练过程中显示逐帧准确率的函数,排除了mask的影响 此处y_true需要是one hot形式 """ y_true = K.argmax(y_true, 2) return self.sparse_accuracy(y_true, y_pred) def get_config(self): config = { 'lr_multiplier': self.lr_multiplier, 'hidden_dim': self.hidden_dim, } base_config = super(MaximumEntropyMarkovModel, self).get_config() return dict(list(base_config.items()) + list(config.items())) custom_objects = { 'Embedding': Embedding, 'MultiHeadAttention': MultiHeadAttention, 'LayerNormalization': LayerNormalization, 'PositionEmbedding': PositionEmbedding, 'RelativePositionEmbedding': RelativePositionEmbedding, 'RelativePositionEmbeddingT5': RelativePositionEmbeddingT5, 'FeedForward': FeedForward, 'EmbeddingDense': EmbeddingDense, 'ConditionalRandomField': ConditionalRandomField, 'MaximumEntropyMarkovModel': MaximumEntropyMarkovModel, } keras.utils.get_custom_objects().update(custom_objects)
31,362
33.464835
80
py
BiRTE
BiRTE-main/bert4keras/snippets.py
#! -*- coding: utf-8 -*- # 代码合集 import six import logging import numpy as np import re import sys _open_ = open is_py2 = six.PY2 if not is_py2: basestring = str def is_string(s): """判断是否是字符串 """ return isinstance(s, basestring) def strQ2B(ustring): """全角符号转对应的半角符号 """ rstring = '' for uchar in ustring: inside_code = ord(uchar) # 全角空格直接转换 if inside_code == 12288: inside_code = 32 # 全角字符(除空格)根据关系转化 elif (inside_code >= 65281 and inside_code <= 65374): inside_code -= 65248 rstring += unichr(inside_code) return rstring def string_matching(s, keywords): """判断s是否至少包含keywords中的至少一个字符串 """ for k in keywords: if re.search(k, s): return True return False def convert_to_unicode(text, encoding='utf-8'): """字符串转换为unicode格式(假设输入为utf-8格式) """ if is_py2: if isinstance(text, str): text = text.decode(encoding, 'ignore') else: if isinstance(text, bytes): text = text.decode(encoding, 'ignore') return text def convert_to_str(text, encoding='utf-8'): """字符串转换为str格式(假设输入为utf-8格式) """ if is_py2: if isinstance(text, unicode): text = text.encode(encoding, 'ignore') else: if isinstance(text, bytes): text = text.decode(encoding, 'ignore') return text class open: """模仿python自带的open函数,主要是为了同时兼容py2和py3 """ def __init__(self, name, mode='r', encoding=None): if is_py2: self.file = _open_(name, mode) else: self.file = _open_(name, mode, encoding=encoding) self.encoding = encoding def __iter__(self): for l in self.file: if self.encoding: l = convert_to_unicode(l, self.encoding) yield l def read(self): text = self.file.read() if self.encoding: text = convert_to_unicode(text, self.encoding) return text def write(self, text): if self.encoding: text = convert_to_str(text, self.encoding) self.file.write(text) def flush(self): self.file.flush() def close(self): self.file.close() def __enter__(self): return self def __exit__(self, type, value, tb): self.close() class Progress: """显示进度,自己简单封装,比tqdm更可控一些 iterable: 可迭代的对象; period: 显示进度的周期; steps: iterable可迭代的总步数,相当于len(iterable) """ def __init__(self, iterable, period=1, steps=None, desc=None): self.iterable = iterable self.period = period if hasattr(iterable, '__len__'): self.steps = len(iterable) else: self.steps = steps self.desc = desc if self.steps: self._format_ = u'%s/%s passed' % ('%s', self.steps) else: self._format_ = u'%s passed' if self.desc: self._format_ = self.desc + ' - ' + self._format_ self.logger = logging.getLogger() def __iter__(self): for i, j in enumerate(self.iterable): if (i + 1) % self.period == 0: self.logger.info(self._format_ % (i + 1)) yield j def parallel_apply( func, iterable, workers, max_queue_size, callback=None, dummy=False ): """多进程或多线程地将func应用到iterable的每个元素中。 注意这个apply是异步且无序的,也就是说依次输入a,b,c,但是 输出可能是func(c), func(a), func(b)。 参数: dummy: False是多进程/线性,True则是多线程/线性; callback: 处理单个输出的回调函数; """ if dummy: from multiprocessing.dummy import Pool, Queue else: from multiprocessing import Pool, Queue in_queue, out_queue = Queue(max_queue_size), Queue() def worker_step(in_queue, out_queue): # 单步函数包装成循环执行 while True: d = in_queue.get() r = func(d) out_queue.put(r) # 启动多进程/线程 pool = Pool(workers, worker_step, (in_queue, out_queue)) if callback is None: results = [] # 后处理函数 def process_out_queue(): out_count = 0 for _ in range(out_queue.qsize()): d = out_queue.get() out_count += 1 if callback is None: results.append(d) else: callback(d) return out_count # 存入数据,取出结果 in_count, out_count = 0, 0 for d in iterable: in_count += 1 while True: try: in_queue.put(d, block=False) break except six.moves.queue.Full: out_count += process_out_queue() if in_count % max_queue_size == 0: out_count += process_out_queue() while out_count != in_count: out_count += process_out_queue() pool.terminate() if callback is None: return results def sequence_padding(inputs, length=None, padding=0): """Numpy函数,将序列padding到同一长度 """ if length is None: length = max([len(x) for x in inputs]) pad_width = [(0, 0) for _ in np.shape(inputs[0])] outputs = [] for x in inputs: x = x[:length] pad_width[0] = (0, length - len(x)) x = np.pad(x, pad_width, 'constant', constant_values=padding) outputs.append(x) return np.array(outputs) def is_one_of(x, ys): """判断x是否在ys之中 等价于x in ys,但有些情况下x in ys会报错 """ for y in ys: if x is y: return True return False class DataGenerator(object): """数据生成器模版 """ def __init__(self, data, batch_size=32, buffer_size=None): self.data = data self.batch_size = batch_size if hasattr(self.data, '__len__'): self.steps = len(self.data) // self.batch_size if len(self.data) % self.batch_size != 0: self.steps += 1 else: self.steps = None self.buffer_size = buffer_size or batch_size * 1000 def __len__(self): return self.steps def sample(self, random=False): """采样函数,每个样本同时返回一个is_end标记 """ if random: if self.steps is None: def generator(): caches, isfull = [], False for d in self.data: caches.append(d) if isfull: i = np.random.randint(len(caches)) yield caches.pop(i) elif len(caches) == self.buffer_size: isfull = True while caches: i = np.random.randint(len(caches)) yield caches.pop(i) else: def generator(): indices = list(range(len(self.data))) np.random.shuffle(indices) for i in indices: yield self.data[i] data = generator() else: data = iter(self.data) d_current = next(data) for d_next in data: yield False, d_current d_current = d_next yield True, d_current def __iter__(self, random=False): raise NotImplementedError def forfit(self): while True: for d in self.__iter__(True): yield d def softmax(x, axis=-1): """numpy版softmax """ x = x - x.max(axis=axis, keepdims=True) x = np.exp(x) return x / x.sum(axis=axis, keepdims=True) class AutoRegressiveDecoder(object): """通用自回归生成模型解码基类 包含beam search和random sample两种策略 """ def __init__(self, start_id, end_id, maxlen, minlen=None): self.start_id = start_id self.end_id = end_id self.maxlen = maxlen self.minlen = minlen or 1 if start_id is None: self.first_output_ids = np.empty((1, 0), dtype=int) else: self.first_output_ids = np.array([[self.start_id]]) @staticmethod def set_rtype(default='probas'): """用来给predict方法加上rtype参数,并作相应的处理 """ def actual_decorator(predict): def new_predict(self, inputs, output_ids, step, rtype=default): assert rtype in ['probas', 'logits'] result = predict(self, inputs, output_ids, step) if default == 'probas': if rtype == 'probas': return result else: return np.log(result + 1e-12) else: if rtype == 'probas': return softmax(result, -1) else: return result return new_predict return actual_decorator def predict(self, inputs, output_ids, step, rtype='logits'): """用户需自定义递归预测函数 rtype为字符串logits或probas,用户定义的时候,应当根据rtype来 返回不同的结果,rtype=probas时返回归一化的概率,rtype=logits时 则返回softmax前的结果或者概率对数。 """ raise NotImplementedError def beam_search(self, inputs, topk): """beam search解码 说明:这里的topk即beam size; 返回:最优解码序列。 """ inputs = [np.array([i]) for i in inputs] output_ids, output_scores = self.first_output_ids, np.zeros(1) for step in range(self.maxlen): scores = self.predict(inputs, output_ids, step, 'logits') # 计算当前得分 if step == 0: # 第1步预测后将输入重复topk次 inputs = [np.repeat(i, topk, axis=0) for i in inputs] scores = output_scores.reshape((-1, 1)) + scores # 综合累积得分 indices = scores.argpartition(-topk, axis=None)[-topk:] # 仅保留topk indices_1 = indices // scores.shape[1] # 行索引 indices_2 = (indices % scores.shape[1]).reshape((-1, 1)) # 列索引 output_ids = np.concatenate([output_ids[indices_1], indices_2], 1) # 更新输出 output_scores = np.take_along_axis( scores, indices, axis=None ) # 更新得分 if output_ids.shape[1] >= self.minlen: # 最短长度判断 best_one = output_scores.argmax() # 得分最大的那个 if indices_2[best_one, 0] == self.end_id: # 如果已经终止 return output_ids[best_one] # 直接输出 else: # 否则,只保留未完成部分 flag = (indices_2[:, 0] != self.end_id) # 标记未完成序列 if not flag.all(): # 如果有已完成的 inputs = [i[flag] for i in inputs] # 扔掉已完成序列 output_ids = output_ids[flag] # 扔掉已完成序列 output_scores = output_scores[flag] # 扔掉已完成序列 topk = flag.sum() # topk相应变化 # 达到长度直接输出 return output_ids[output_scores.argmax()] def random_sample(self, inputs, n, topk=None, topp=None): """随机采样n个结果 说明:非None的topk表示每一步只从概率最高的topk个中采样;而非None的topp 表示每一步只从概率最高的且概率之和刚好达到topp的若干个token中采样。 返回:n个解码序列组成的list。 """ inputs = [np.array([i]) for i in inputs] output_ids = self.first_output_ids results = [] for step in range(self.maxlen): probas = self.predict(inputs, output_ids, step, 'probas') # 计算当前概率 probas /= probas.sum(axis=1, keepdims=True) # 确保归一化 if step == 0: # 第1步预测后将结果重复n次 probas = np.repeat(probas, n, axis=0) inputs = [np.repeat(i, n, axis=0) for i in inputs] output_ids = np.repeat(output_ids, n, axis=0) if topk is not None: k_indices = probas.argpartition(-topk, axis=1)[:, -topk:] # 仅保留topk probas = np.take_along_axis(probas, k_indices, axis=1) # topk概率 probas /= probas.sum(axis=1, keepdims=True) # 重新归一化 if topp is not None: p_indices = probas.argsort(axis=1)[:, ::-1] # 从高到低排序 probas = np.take_along_axis(probas, p_indices, axis=1) # 排序概率 cumsum_probas = np.cumsum(probas, axis=1) # 累积概率 flag = np.roll(cumsum_probas >= topp, 1, axis=1) # 标记超过topp的部分 flag[:, 0] = False # 结合上面的np.roll,实现平移一位的效果 probas[flag] = 0 # 后面的全部置零 probas /= probas.sum(axis=1, keepdims=True) # 重新归一化 sample_func = lambda p: np.random.choice(len(p), p=p) # 按概率采样函数 sample_ids = np.apply_along_axis(sample_func, 1, probas) # 执行采样 sample_ids = sample_ids.reshape((-1, 1)) # 对齐形状 if topp is not None: sample_ids = np.take_along_axis( p_indices, sample_ids, axis=1 ) # 对齐原id if topk is not None: sample_ids = np.take_along_axis( k_indices, sample_ids, axis=1 ) # 对齐原id output_ids = np.concatenate([output_ids, sample_ids], 1) # 更新输出 if output_ids.shape[1] >= self.minlen: # 最短长度判断 flag = (sample_ids[:, 0] == self.end_id) # 标记已完成序列 if flag.any(): # 如果有已完成的 for ids in output_ids[flag]: # 存好已完成序列 results.append(ids) flag = (flag == False) # 标记未完成序列 inputs = [i[flag] for i in inputs] # 只保留未完成部分输入 output_ids = output_ids[flag] # 只保留未完成部分候选集 if len(output_ids) == 0: break # 如果还有未完成序列,直接放入结果 for ids in output_ids: results.append(ids) # 返回结果 return results def insert_arguments(**arguments): """装饰器,为类方法增加参数 (主要用于类的__init__方法) """ def actual_decorator(func): def new_func(self, *args, **kwargs): for k, v in arguments.items(): if k in kwargs: v = kwargs.pop(k) setattr(self, k, v) return func(self, *args, **kwargs) return new_func return actual_decorator def delete_arguments(*arguments): """装饰器,为类方法删除参数 (主要用于类的__init__方法) """ def actual_decorator(func): def new_func(self, *args, **kwargs): for k in arguments: if k in kwargs: raise TypeError( '%s got an unexpected keyword argument \'%s\'' % (self.__class__.__name__, k) ) return func(self, *args, **kwargs) return new_func return actual_decorator def groupby(iterable, key=None): """类似itertools.groupby,但这里的key是iterable对象 """ if key is None: key = iterable result = [] for i, (k, v) in enumerate(zip(key, iterable)): if i == 0: result.append((k, [v])) last_k = k else: if k == last_k: result[-1][1].append(v) else: result.append((k, [v])) last_k = k return result class Hook: """注入uniout模块,实现import时才触发 """ def __init__(self, module): self.module = module def __getattr__(self, attr): """使得 from bert4keras.backend import uniout 等效于 import uniout (自动识别Python版本,Python3 下则无操作。) """ if attr == 'uniout': if is_py2: import uniout else: return getattr(self.module, attr) Hook.__name__ = __name__ sys.modules[__name__] = Hook(sys.modules[__name__]) del Hook
15,499
28.807692
80
py
BiRTE
BiRTE-main/bert4keras/backend.py
# -*- coding: utf-8 -*- # 分离后端函数,主要是为了同时兼容原生keras和tf.keras # 通过设置环境变量TF_KERAS=1来切换tf.keras import os, sys from distutils.util import strtobool import numpy as np import tensorflow as tf # 判断是tf.keras还是纯keras的标记 is_tf_keras = strtobool(os.environ.get('TF_KERAS', '0')) if is_tf_keras: import tensorflow.keras as keras import tensorflow.keras.backend as K sys.modules['keras'] = keras else: import keras import keras.backend as K def gelu_erf(x): """基于Erf直接计算的gelu函数 """ return 0.5 * x * (1.0 + tf.math.erf(x / np.sqrt(2.0))) def gelu_tanh(x): """基于Tanh近似计算的gelu函数 """ cdf = 0.5 * ( 1.0 + K.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * K.pow(x, 3)))) ) return x * cdf def set_gelu(version): """设置gelu版本 """ version = version.lower() assert version in ['erf', 'tanh'], 'gelu version must be erf or tanh' if version == 'erf': keras.utils.get_custom_objects()['gelu'] = gelu_erf else: keras.utils.get_custom_objects()['gelu'] = gelu_tanh def piecewise_linear(t, schedule): """分段线性函数 其中schedule是形如{1000: 1, 2000: 0.1}的字典, 表示 t ∈ [0, 1000]时,输出从0均匀增加至1,而 t ∈ [1000, 2000]时,输出从1均匀降低到0.1,最后 t > 2000时,保持0.1不变。 """ schedule = sorted(schedule.items()) if schedule[0][0] != 0: schedule = [(0, 0.0)] + schedule x = K.constant(schedule[0][1], dtype=K.floatx()) t = K.cast(t, K.floatx()) for i in range(len(schedule)): t_begin = schedule[i][0] x_begin = x if i != len(schedule) - 1: dx = schedule[i + 1][1] - schedule[i][1] dt = schedule[i + 1][0] - schedule[i][0] slope = 1.0 * dx / dt x = schedule[i][1] + slope * (t - t_begin) else: x = K.constant(schedule[i][1], dtype=K.floatx()) x = K.switch(t >= t_begin, x, x_begin) return x def search_layer(inputs, name, exclude_from=None): """根据inputs和name来搜索层 说明:inputs为某个层或某个层的输出;name为目标层的名字。 实现:根据inputs一直往上递归搜索,直到发现名字为name的层为止; 如果找不到,那就返回None。 """ if exclude_from is None: exclude_from = set() if isinstance(inputs, keras.layers.Layer): layer = inputs else: layer = inputs._keras_history[0] if layer.name == name: return layer elif layer in exclude_from: return None else: exclude_from.add(layer) if isinstance(layer, keras.models.Model): model = layer for layer in model.layers: if layer.name == name: return layer inbound_layers = layer._inbound_nodes[0].inbound_layers if not isinstance(inbound_layers, list): inbound_layers = [inbound_layers] if len(inbound_layers) > 0: for layer in inbound_layers: layer = search_layer(layer, name, exclude_from) if layer is not None: return layer def sequence_masking(x, mask, mode=0, axis=None): """为序列条件mask的函数 mask: 形如(batch_size, seq_len)的0-1矩阵; mode: 如果是0,则直接乘以mask; 如果是1,则在padding部分减去一个大正数。 axis: 序列所在轴,默认为1; """ if mask is None or mode not in [0, 1]: return x else: if axis is None: axis = 1 if axis == -1: axis = K.ndim(x) - 1 assert axis > 0, 'axis muse be greater than 0' for _ in range(axis - 1): mask = K.expand_dims(mask, 1) for _ in range(K.ndim(x) - K.ndim(mask) - axis + 1): mask = K.expand_dims(mask, K.ndim(mask)) if mode == 0: return x * mask else: return x - (1 - mask) * 1e12 def batch_gather(params, indices): """同tf旧版本的batch_gather """ try: return tf.gather(params, indices, batch_dims=-1) except Exception as e1: try: return tf.batch_gather(params, indices) except Exception as e2: raise ValueError('%s\n%s\n' % (e1.message, e2.message)) def pool1d( x, pool_size, strides=1, padding='valid', data_format=None, pool_mode='max' ): """向量序列的pool函数 """ x = K.expand_dims(x, 1) x = K.pool2d( x, pool_size=(1, pool_size), strides=(1, strides), padding=padding, data_format=data_format, pool_mode=pool_mode ) return x[:, 0] def divisible_temporal_padding(x, n): """将一维向量序列右padding到长度能被n整除 """ r_len = K.shape(x)[1] % n p_len = K.switch(r_len > 0, n - r_len, 0) return K.temporal_padding(x, (0, p_len)) def swish(x): """swish函数(这样封装过后才有 __name__ 属性) """ return tf.nn.swish(x) def leaky_relu(x, alpha=0.2): """leaky relu函数(这样封装过后才有 __name__ 属性) """ return tf.nn.leaky_relu(x, alpha=alpha) def symbolic(f): """恒等装饰器(兼容旧版本keras用) """ return f # 给旧版本keras新增symbolic方法(装饰器), # 以便兼容optimizers.py中的代码 K.symbolic = getattr(K, 'symbolic', None) or symbolic custom_objects = { 'gelu_erf': gelu_erf, 'gelu_tanh': gelu_tanh, 'gelu': gelu_erf, 'swish': swish, 'leaky_relu': leaky_relu, } keras.utils.get_custom_objects().update(custom_objects)
5,182
23.799043
73
py
BiRTE
BiRTE-main/bert4keras/models.py
#! -*- coding: utf-8 -*- # 主要模型 import numpy as np from bert4keras.layers import * from bert4keras.snippets import delete_arguments from keras.models import Model import json class Transformer(object): """模型基类 """ def __init__( self, vocab_size, # 词表大小 hidden_size, # 编码维度 num_hidden_layers, # Transformer总层数 num_attention_heads, # Attention的头数 intermediate_size, # FeedForward的隐层维度 hidden_act, # FeedForward隐层的激活函数 dropout_rate=None, # Dropout比例 embedding_size=None, # 是否指定embedding_size attention_key_size=None, # Attention中Q,K的head_size sequence_length=None, # 是否固定序列长度 keep_tokens=None, # 要保留的词ID列表 layers=None, # 外部传入的Keras层 name=None, # 模型名称 **kwargs ): if keep_tokens is None: self.vocab_size = vocab_size else: self.vocab_size = len(keep_tokens) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.attention_head_size = hidden_size // num_attention_heads self.attention_key_size = attention_key_size or self.attention_head_size self.intermediate_size = intermediate_size self.dropout_rate = dropout_rate or 0 self.hidden_act = hidden_act self.embedding_size = embedding_size or hidden_size self.sequence_length = sequence_length self.keep_tokens = keep_tokens self.attention_mask = None self.position_bias = None self.layers = {} if layers is None else layers self.name = name self.built = False def build( self, layer_norm_cond=None, layer_norm_cond_hidden_size=None, layer_norm_cond_hidden_act=None, additional_input_layers=None, **kwargs ): """模型构建函数 layer_norm_*系列参数为实现Conditional Layer Normalization时使用, 用来实现以“固定长度向量”为条件的条件Bert。 """ if self.built: return None # Input inputs = self.get_inputs() self.set_inputs(inputs, additional_input_layers) # Other self.layer_norm_conds = [ layer_norm_cond, layer_norm_cond_hidden_size, layer_norm_cond_hidden_act or 'linear', ] # Call outputs = self.call(inputs) self.set_outputs(outputs) # Model self.model = Model(self.inputs, self.outputs, name=self.name) self.built = True def call(self, inputs): """定义模型的执行流程 """ # Embedding outputs = self.apply_embeddings(inputs) # Main for i in range(self.num_hidden_layers): outputs = self.apply_main_layers(outputs, i) # Final outputs = self.apply_final_layers(outputs) return outputs def apply(self, inputs, layer=None, arguments=None, **kwargs): """通过apply调用层会自动重用同名层 inputs: 上一层的输出; layer: 要调用的层类名; arguments: 传递给layer.call的参数; kwargs: 传递给层初始化的参数。 """ if layer is Dropout and self.dropout_rate == 0: return inputs arguments = arguments or {} name = kwargs.get('name') if name not in self.layers: layer = layer(**kwargs) name = layer.name self.layers[name] = layer return self.layers[name](inputs, **arguments) def get_inputs(self): raise NotImplementedError def apply_embeddings(self, inputs): raise NotImplementedError def apply_main_layers(self, inputs, index): raise NotImplementedError def apply_final_layers(self, inputs): raise NotImplementedError def compute_attention_mask(self, inputs=None): """定义每一层的Attention Mask """ return self.attention_mask def compute_position_bias(self, inputs=None): """定义每一层的Position Bias(一般相对位置编码用) """ return self.position_bias def set_inputs(self, inputs, additional_input_layers=None): """设置input和inputs属性 """ if inputs is None: inputs = [] elif not isinstance(inputs, list): inputs = [inputs] inputs = inputs[:] if additional_input_layers is not None: if not isinstance(additional_input_layers, list): additional_input_layers = [additional_input_layers] inputs.extend(additional_input_layers) self.inputs = inputs if len(inputs) > 1: self.input = inputs else: self.input = inputs[0] def set_outputs(self, outputs): """设置output和oututs属性 """ if not isinstance(outputs, list): outputs = [outputs] outputs = outputs[:] self.outputs = outputs if len(outputs) > 1: self.output = outputs else: self.output = outputs[0] @property def initializer(self): """默认使用截断正态分布初始化 """ return keras.initializers.TruncatedNormal(stddev=0.02) def simplify(self, inputs): """将list中的None过滤掉 """ inputs = [i for i in inputs if i is not None] if len(inputs) == 1: inputs = inputs[0] return inputs def load_variable(self, checkpoint, name): """加载单个变量的函数 """ return tf.train.load_variable(checkpoint, name) def create_variable(self, name, value): """在tensorflow中创建一个变量 """ return tf.Variable(value, name=name) def variable_mapping(self): """构建keras层与checkpoint的变量名之间的映射表 """ return {} def load_weights_from_checkpoint(self, checkpoint, mapping=None): """根据mapping从checkpoint加载权重 """ mapping = mapping or self.variable_mapping() mapping = {k: v for k, v in mapping.items() if k in self.layers} weight_value_pairs = [] for layer, variables in mapping.items(): layer = self.layers[layer] weights = layer.trainable_weights values = [self.load_variable(checkpoint, v) for v in variables] if isinstance(layer, MultiHeadAttention): """如果key_size不等于head_size,则可以通过 正交矩阵将相应的权重投影到合适的shape。 """ count = 2 if layer.use_bias: count += 2 heads = self.num_attention_heads head_size = self.attention_head_size key_size = self.attention_key_size W = np.linalg.qr(np.random.randn(key_size, head_size))[0].T if layer.scaled_dot_product: W = W * key_size**0.25 / head_size**0.25 for i in range(count): w, v = weights[i], values[i] w_shape, v_shape = K.int_shape(w), v.shape if w_shape[-1] != v_shape[-1]: pre_shape = w_shape[:-1] v = v.reshape(pre_shape + (heads, head_size)) v = np.dot(v, W) v = v.reshape(pre_shape + (heads * key_size,)) values[i] = v weight_value_pairs.extend(zip(weights, values)) K.batch_set_value(weight_value_pairs) def save_weights_as_checkpoint(self, filename, mapping=None): """根据mapping将权重保存为checkpoint格式 """ mapping = mapping or self.variable_mapping() mapping = {k: v for k, v in mapping.items() if k in self.layers} with tf.Graph().as_default(): for layer, variables in mapping.items(): layer = self.layers[layer] values = K.batch_get_value(layer.trainable_weights) for name, value in zip(variables, values): self.create_variable(name, value) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() saver.save(sess, filename, write_meta_graph=False) class BERT(Transformer): """构建BERT模型 """ def __init__( self, max_position, # 序列最大长度 with_pool=False, # 是否包含Pool部分 with_nsp=False, # 是否包含NSP部分 with_mlm=False, # 是否包含MLM部分 custom_position_ids=False, # 是否自行传入位置id **kwargs # 其余参数 ): super(BERT, self).__init__(**kwargs) self.max_position = max_position self.with_pool = with_pool self.with_nsp = with_nsp self.with_mlm = with_mlm self.custom_position_ids = custom_position_ids def get_inputs(self): """BERT的输入是token_ids和segment_ids (但允许自行传入位置id,以实现一些特殊需求) """ x_in = Input(shape=(self.sequence_length,), name='Input-Token') s_in = Input(shape=(self.sequence_length,), name='Input-Segment') if self.custom_position_ids: p_in = Input(shape=(self.sequence_length,), name='Input-Position') return [x_in, s_in, p_in] else: return [x_in, s_in] def apply_embeddings(self, inputs): """BERT的embedding是token、position、segment三者embedding之和 """ x, s = inputs[:2] z = self.layer_norm_conds[0] if self.custom_position_ids: p = inputs[2] else: p = None x = self.apply( inputs=x, layer=Embedding, input_dim=self.vocab_size, output_dim=self.embedding_size, embeddings_initializer=self.initializer, mask_zero=True, name='Embedding-Token' ) s = self.apply( inputs=s, layer=Embedding, input_dim=2, output_dim=self.embedding_size, embeddings_initializer=self.initializer, name='Embedding-Segment' ) x = self.apply(inputs=[x, s], layer=Add, name='Embedding-Token-Segment') x = self.apply( inputs=self.simplify([x, p]), layer=PositionEmbedding, input_dim=self.max_position, output_dim=self.embedding_size, merge_mode='add', embeddings_initializer=self.initializer, custom_position_ids=self.custom_position_ids, name='Embedding-Position' ) x = self.apply( inputs=self.simplify([x, z]), layer=LayerNormalization, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='Embedding-Norm' ) x = self.apply( inputs=x, layer=Dropout, rate=self.dropout_rate, name='Embedding-Dropout' ) if self.embedding_size != self.hidden_size: x = self.apply( inputs=x, layer=Dense, units=self.hidden_size, kernel_initializer=self.initializer, name='Embedding-Mapping' ) return x def apply_main_layers(self, inputs, index): """BERT的主体是基于Self-Attention的模块 顺序:Att --> Add --> LN --> FFN --> Add --> LN """ x = inputs z = self.layer_norm_conds[0] attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index feed_forward_name = 'Transformer-%d-FeedForward' % index attention_mask = self.compute_attention_mask() # Self Attention xi, x, arguments = x, [x, x, x], {'a_mask': None} if attention_mask is not None: arguments['a_mask'] = True x.append(attention_mask) x = self.apply( inputs=x, layer=MultiHeadAttention, arguments=arguments, heads=self.num_attention_heads, head_size=self.attention_head_size, key_size=self.attention_key_size, kernel_initializer=self.initializer, name=attention_name ) x = self.apply( inputs=x, layer=Dropout, rate=self.dropout_rate, name='%s-Dropout' % attention_name ) x = self.apply( inputs=[xi, x], layer=Add, name='%s-Add' % attention_name ) x = self.apply( inputs=self.simplify([x, z]), layer=LayerNormalization, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='%s-Norm' % attention_name ) # Feed Forward xi = x x = self.apply( inputs=x, layer=FeedForward, units=self.intermediate_size, activation=self.hidden_act, kernel_initializer=self.initializer, name=feed_forward_name ) x = self.apply( inputs=x, layer=Dropout, rate=self.dropout_rate, name='%s-Dropout' % feed_forward_name ) x = self.apply( inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name ) x = self.apply( inputs=self.simplify([x, z]), layer=LayerNormalization, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='%s-Norm' % feed_forward_name ) return x def apply_final_layers(self, inputs): """根据剩余参数决定输出 """ x = inputs z = self.layer_norm_conds[0] outputs = [x] if self.with_pool or self.with_nsp: # Pooler部分(提取CLS向量) x = outputs[0] x = self.apply( inputs=x, layer=Lambda, function=lambda x: x[:, 0], name='Pooler' ) pool_activation = 'tanh' if self.with_pool is True else self.with_pool x = self.apply( inputs=x, layer=Dense, units=self.hidden_size, activation=pool_activation, kernel_initializer=self.initializer, name='Pooler-Dense' ) if self.with_nsp: # Next Sentence Prediction部分 x = self.apply( inputs=x, layer=Dense, units=2, activation='softmax', kernel_initializer=self.initializer, name='NSP-Proba' ) outputs.append(x) if self.with_mlm: # Masked Language Model部分 x = outputs[0] x = self.apply( inputs=x, layer=Dense, units=self.embedding_size, activation=self.hidden_act, kernel_initializer=self.initializer, name='MLM-Dense' ) x = self.apply( inputs=self.simplify([x, z]), layer=LayerNormalization, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='MLM-Norm' ) mlm_activation = 'softmax' if self.with_mlm is True else self.with_mlm x = self.apply( inputs=x, layer=EmbeddingDense, embedding_name='Embedding-Token', activation=mlm_activation, name='MLM-Proba' ) outputs.append(x) if len(outputs) == 1: outputs = outputs[0] elif len(outputs) == 2: outputs = outputs[1] else: outputs = outputs[1:] return outputs def load_variable(self, checkpoint, name): """加载单个变量的函数 """ variable = super(BERT, self).load_variable(checkpoint, name) if name in [ 'bert/embeddings/word_embeddings', 'cls/predictions/output_bias', ]: if self.keep_tokens is None: return variable else: return variable[self.keep_tokens] elif name == 'cls/seq_relationship/output_weights': return variable.T else: return variable def create_variable(self, name, value): """在tensorflow中创建一个变量 """ if name == 'cls/seq_relationship/output_weights': value = value.T return super(BERT, self).create_variable(name, value) def variable_mapping(self): """映射到官方BERT权重格式 """ mapping = { 'Embedding-Token': ['bert/embeddings/word_embeddings'], 'Embedding-Segment': ['bert/embeddings/token_type_embeddings'], 'Embedding-Position': ['bert/embeddings/position_embeddings'], 'Embedding-Norm': [ 'bert/embeddings/LayerNorm/beta', 'bert/embeddings/LayerNorm/gamma', ], 'Embedding-Mapping': [ 'bert/encoder/embedding_hidden_mapping_in/kernel', 'bert/encoder/embedding_hidden_mapping_in/bias', ], 'Pooler-Dense': [ 'bert/pooler/dense/kernel', 'bert/pooler/dense/bias', ], 'NSP-Proba': [ 'cls/seq_relationship/output_weights', 'cls/seq_relationship/output_bias', ], 'MLM-Dense': [ 'cls/predictions/transform/dense/kernel', 'cls/predictions/transform/dense/bias', ], 'MLM-Norm': [ 'cls/predictions/transform/LayerNorm/beta', 'cls/predictions/transform/LayerNorm/gamma', ], 'MLM-Proba': ['cls/predictions/output_bias'], } for i in range(self.num_hidden_layers): prefix = 'bert/encoder/layer_%d/' % i mapping.update({ 'Transformer-%d-MultiHeadSelfAttention' % i: [ prefix + 'attention/self/query/kernel', prefix + 'attention/self/query/bias', prefix + 'attention/self/key/kernel', prefix + 'attention/self/key/bias', prefix + 'attention/self/value/kernel', prefix + 'attention/self/value/bias', prefix + 'attention/output/dense/kernel', prefix + 'attention/output/dense/bias', ], 'Transformer-%d-MultiHeadSelfAttention-Norm' % i: [ prefix + 'attention/output/LayerNorm/beta', prefix + 'attention/output/LayerNorm/gamma', ], 'Transformer-%d-FeedForward' % i: [ prefix + 'intermediate/dense/kernel', prefix + 'intermediate/dense/bias', prefix + 'output/dense/kernel', prefix + 'output/dense/bias', ], 'Transformer-%d-FeedForward-Norm' % i: [ prefix + 'output/LayerNorm/beta', prefix + 'output/LayerNorm/gamma', ], }) return mapping class ALBERT(BERT): """构建ALBERT模型 """ def apply_main_layers(self, inputs, index): """ALBERT的主体是基于Self-Attention的模块 顺序:Att --> Add --> LN --> FFN --> Add --> LN """ x = inputs z = self.layer_norm_conds[0] attention_name = 'Transformer-MultiHeadSelfAttention' feed_forward_name = 'Transformer-FeedForward' attention_mask = self.compute_attention_mask(0) # Self Attention xi, x, arguments = x, [x, x, x], {'a_mask': None} if attention_mask is not None: arguments['a_mask'] = True x.append(attention_mask) x = self.apply( inputs=x, layer=MultiHeadAttention, arguments=arguments, heads=self.num_attention_heads, head_size=self.attention_head_size, key_size=self.attention_key_size, kernel_initializer=self.initializer, name=attention_name ) x = self.apply( inputs=x, layer=Dropout, rate=self.dropout_rate, name='%s-Dropout' % attention_name ) x = self.apply( inputs=[xi, x], layer=Add, name='%s-Add' % attention_name ) x = self.apply( inputs=self.simplify([x, z]), layer=LayerNormalization, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='%s-Norm' % attention_name ) # Feed Forward xi = x x = self.apply( inputs=x, layer=FeedForward, units=self.intermediate_size, activation=self.hidden_act, kernel_initializer=self.initializer, name=feed_forward_name ) x = self.apply( inputs=x, layer=Dropout, rate=self.dropout_rate, name='%s-Dropout' % feed_forward_name ) x = self.apply( inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name ) x = self.apply( inputs=self.simplify([x, z]), layer=LayerNormalization, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='%s-Norm' % feed_forward_name ) return x def variable_mapping(self): """映射到官方ALBERT权重格式 """ mapping = super(ALBERT, self).variable_mapping() prefix = 'bert/encoder/transformer/group_0/inner_group_0/' mapping.update({ 'Transformer-MultiHeadSelfAttention': [ prefix + 'attention_1/self/query/kernel', prefix + 'attention_1/self/query/bias', prefix + 'attention_1/self/key/kernel', prefix + 'attention_1/self/key/bias', prefix + 'attention_1/self/value/kernel', prefix + 'attention_1/self/value/bias', prefix + 'attention_1/output/dense/kernel', prefix + 'attention_1/output/dense/bias', ], 'Transformer-MultiHeadSelfAttention-Norm': [ prefix + 'LayerNorm/beta', prefix + 'LayerNorm/gamma', ], 'Transformer-FeedForward': [ prefix + 'ffn_1/intermediate/dense/kernel', prefix + 'ffn_1/intermediate/dense/bias', prefix + 'ffn_1/intermediate/output/dense/kernel', prefix + 'ffn_1/intermediate/output/dense/bias', ], 'Transformer-FeedForward-Norm': [ prefix + 'LayerNorm_1/beta', prefix + 'LayerNorm_1/gamma', ], }) return mapping class ALBERT_Unshared(BERT): """解开ALBERT共享约束,当成BERT用 """ def variable_mapping(self): """映射到官方ALBERT权重格式 """ mapping = super(ALBERT_Unshared, self).variable_mapping() prefix = 'bert/encoder/transformer/group_0/inner_group_0/' for i in range(self.num_hidden_layers): mapping.update({ 'Transformer-%d-MultiHeadSelfAttention' % i: [ prefix + 'attention_1/self/query/kernel', prefix + 'attention_1/self/query/bias', prefix + 'attention_1/self/key/kernel', prefix + 'attention_1/self/key/bias', prefix + 'attention_1/self/value/kernel', prefix + 'attention_1/self/value/bias', prefix + 'attention_1/output/dense/kernel', prefix + 'attention_1/output/dense/bias', ], 'Transformer-%d-MultiHeadSelfAttention-Norm' % i: [ prefix + 'LayerNorm/beta', prefix + 'LayerNorm/gamma', ], 'Transformer-%d-FeedForward' % i: [ prefix + 'ffn_1/intermediate/dense/kernel', prefix + 'ffn_1/intermediate/dense/bias', prefix + 'ffn_1/intermediate/output/dense/kernel', prefix + 'ffn_1/intermediate/output/dense/bias', ], 'Transformer-%d-FeedForward-Norm' % i: [ prefix + 'LayerNorm_1/beta', prefix + 'LayerNorm_1/gamma', ], }) return mapping class NEZHA(BERT): """华为推出的NAZHA模型 链接:https://arxiv.org/abs/1909.00204 """ def apply_embeddings(self, inputs): """NEZHA的embedding是token、segment两者embedding之和 """ x, s = inputs z = self.layer_norm_conds[0] x = self.apply( inputs=x, layer=Embedding, input_dim=self.vocab_size, output_dim=self.embedding_size, embeddings_initializer=self.initializer, mask_zero=True, name='Embedding-Token' ) s = self.apply( inputs=s, layer=Embedding, input_dim=2, output_dim=self.embedding_size, embeddings_initializer=self.initializer, name='Embedding-Segment' ) x = self.apply(inputs=[x, s], layer=Add, name='Embedding-Token-Segment') x = self.apply( inputs=self.simplify([x, z]), layer=LayerNormalization, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='Embedding-Norm' ) x = self.apply( inputs=x, layer=Dropout, rate=self.dropout_rate, name='Embedding-Dropout' ) if self.embedding_size != self.hidden_size: x = self.apply( inputs=x, layer=Dense, units=self.hidden_size, kernel_initializer=self.initializer, name='Embedding-Mapping' ) return x def apply_main_layers(self, inputs, index): """NEZHA的主体是基于Self-Attention的模块 顺序:Att --> Add --> LN --> FFN --> Add --> LN """ x = inputs z = self.layer_norm_conds[0] attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index feed_forward_name = 'Transformer-%d-FeedForward' % index attention_mask = self.compute_attention_mask() position_bias = self.compute_position_bias(x) # Self Attention xi, x = x, [x, x, x, position_bias] arguments = {'a_mask': None, 'p_bias': 'typical_relative'} if attention_mask is not None: arguments['a_mask'] = True x.insert(3, attention_mask) x = self.apply( inputs=x, layer=MultiHeadAttention, arguments=arguments, heads=self.num_attention_heads, head_size=self.attention_head_size, key_size=self.attention_key_size, kernel_initializer=self.initializer, name=attention_name ) x = self.apply( inputs=x, layer=Dropout, rate=self.dropout_rate, name='%s-Dropout' % attention_name ) x = self.apply( inputs=[xi, x], layer=Add, name='%s-Add' % attention_name ) x = self.apply( inputs=self.simplify([x, z]), layer=LayerNormalization, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='%s-Norm' % attention_name ) # Feed Forward xi = x x = self.apply( inputs=x, layer=FeedForward, units=self.intermediate_size, activation=self.hidden_act, kernel_initializer=self.initializer, name=feed_forward_name ) x = self.apply( inputs=x, layer=Dropout, rate=self.dropout_rate, name='%s-Dropout' % feed_forward_name ) x = self.apply( inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name ) x = self.apply( inputs=self.simplify([x, z]), layer=LayerNormalization, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='%s-Norm' % feed_forward_name ) return x def compute_position_bias(self, inputs=None): """经典相对位置编码 """ if self.position_bias is None: def sinusoidal(shape, dtype=None): """NEZHA直接使用Sin-Cos形式的位置向量 """ vocab_size, depth = shape embeddings = np.zeros(shape) for pos in range(vocab_size): for i in range(depth // 2): theta = pos / np.power(10000, 2. * i / depth) embeddings[pos, 2 * i] = np.sin(theta) embeddings[pos, 2 * i + 1] = np.cos(theta) return embeddings x = inputs self.position_bias = self.apply( inputs=[x, x], layer=RelativePositionEmbedding, input_dim=2 * 64 + 1, output_dim=self.attention_head_size, embeddings_initializer=sinusoidal, name='Embedding-Relative-Position', trainable=False ) return self.position_bias class ELECTRA(BERT): """Google推出的ELECTRA模型 链接:https://arxiv.org/abs/2003.10555 """ @delete_arguments('with_pool', 'with_mlm') def __init__( self, max_position, # 序列最大长度 **kwargs # 其余参数 ): if 'keep_tokens' in kwargs: del kwargs['keep_tokens'] super(ELECTRA, self).__init__(max_position, **kwargs) def apply_final_layers(self, inputs): x = inputs z = self.layer_norm_conds[0] return x def variable_mapping(self): mapping = super(ELECTRA, self).variable_mapping() mapping['Embedding-Mapping'] = [ 'electra/embeddings_project/kernel', 'electra/embeddings_project/bias', ] mapping = { k: [i.replace('bert/', 'electra/') for i in v] for k, v in mapping.items() } return mapping class GPT2_ML(Transformer): """构建GPT2_ML模型 链接: https://github.com/imcaspar/gpt2-ml """ def __init__( self, max_position, # 序列最大长度 final_activation='softmax', # 预测分布的激活函数 **kwargs # 其余参数 ): super(GPT2_ML, self).__init__(**kwargs) self.max_position = max_position self.final_activation = final_activation def get_inputs(self): """GPT2_ML的输入是token_ids和segment_ids """ x_in = Input(shape=(self.sequence_length,), name='Input-Token') return x_in def apply_embeddings(self, inputs): """GPT2_ML的embedding是token、position两者embedding之和 """ x = inputs z = self.layer_norm_conds[0] x = self.apply( inputs=x, layer=Embedding, input_dim=self.vocab_size, output_dim=self.embedding_size, embeddings_initializer=self.initializer, mask_zero=True, name='Embedding-Token' ) x = self.apply( inputs=x, layer=PositionEmbedding, input_dim=self.max_position, output_dim=self.embedding_size, merge_mode='add', embeddings_initializer=self.initializer, name='Embedding-Position' ) x = self.apply( inputs=self.simplify([x, z]), layer=LayerNormalization, epsilon=1e-5, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='Embedding-Norm' ) if self.embedding_size != self.hidden_size: x = self.apply( inputs=x, layer=Dense, units=self.hidden_size, kernel_initializer=self.initializer, name='Embedding-Mapping' ) return x def apply_main_layers(self, inputs, index): """GPT2_ML的主体是基于Self-Attention的模块 顺序:Att --> LN --> FFN --> Add --> LN """ x = inputs z = self.layer_norm_conds[0] attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index feed_forward_name = 'Transformer-%d-FeedForward' % index attention_mask = self.compute_attention_mask() # Self Attention xi, x, arguments = x, [x, x, x, attention_mask], {'a_mask': True} x = self.apply( inputs=x, layer=MultiHeadAttention, arguments=arguments, heads=self.num_attention_heads, head_size=self.attention_head_size, key_size=self.attention_key_size, kernel_initializer=self.initializer, name=attention_name ) x = self.apply( inputs=x, layer=Dropout, rate=self.dropout_rate, name='%s-Dropout' % attention_name ) x = self.apply( inputs=[xi, x], layer=Add, name='%s-Add' % attention_name ) # Feed Forward xi = x x = self.apply( inputs=self.simplify([x, z]), layer=LayerNormalization, epsilon=1e-5, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='%s-Norm-0' % feed_forward_name ) x = self.apply( inputs=x, layer=FeedForward, units=self.intermediate_size, activation=self.hidden_act, kernel_initializer=self.initializer, name=feed_forward_name ) x = self.apply( inputs=x, layer=Dropout, rate=self.dropout_rate, name='%s-Dropout' % feed_forward_name ) x = self.apply( inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name ) x = self.apply( inputs=self.simplify([x, z]), layer=LayerNormalization, epsilon=1e-5, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='%s-Norm-1' % feed_forward_name ) return x def apply_final_layers(self, inputs): """剩余部分 """ x = inputs z = self.layer_norm_conds[0] # Language Model部分 x = self.apply( inputs=x, layer=EmbeddingDense, embedding_name='Embedding-Token', activation=self.final_activation, name='LM-Proba' ) return x def load_variable(self, checkpoint, name): """加载单个变量的函数 """ variable = super(GPT2_ML, self).load_variable(checkpoint, name) if name == 'newslm/embeddings/word_embed': if self.keep_tokens is None: return variable else: return variable[self.keep_tokens] else: return variable def compute_attention_mask(self, inputs=None): """添加下三角形式的attention mask """ if self.attention_mask is None: def lm_mask(s): import tensorflow as tf seq_len = K.shape(s)[1] with K.name_scope('attention_mask'): # 用K.ones可能会有问题 # 参考 https://github.com/tensorflow/tensorflow/issues/24938 ones = tf.ones((1, 1, seq_len, seq_len)) a_mask = tf.linalg.band_part(ones, -1, 0) return a_mask self.attention_mask = self.apply( inputs=self.inputs[0], layer=Lambda, function=lm_mask, name='Attention-LM-Mask' ) return self.attention_mask def variable_mapping(self): """映射到官方GPT2_ML权重格式 """ mapping = { 'Embedding-Token': ['newslm/embeddings/word_embed'], 'Embedding-Position': ['newslm/embeddings/pos_embed'], 'Embedding-Norm': [ 'newslm/embeddings/LayerNorm_embed_norm/beta', 'newslm/embeddings/LayerNorm_embed_norm/gamma', ], } for i in range(self.num_hidden_layers): prefix = 'newslm/layer%02d/' % i mapping.update({ 'Transformer-%d-MultiHeadSelfAttention' % i: [ prefix + 'query_layer/kernel', prefix + 'query_layer/bias', prefix + 'key_layer/kernel', prefix + 'key_layer/bias', prefix + 'value_layer/kernel', prefix + 'value_layer/bias', prefix + 'context_projection_layer/kernel', prefix + 'context_projection_layer/bias', ], 'Transformer-%d-FeedForward-Norm-0' % i: [ prefix + 'LayerNorm_mlp_ln0/beta', prefix + 'LayerNorm_mlp_ln0/gamma', ], 'Transformer-%d-FeedForward' % i: [ prefix + 'intermediate/kernel', prefix + 'intermediate/bias', prefix + 'output/kernel', prefix + 'output/bias', ], 'Transformer-%d-FeedForward-Norm-1' % i: [ prefix + 'LayerNorm_mlp_ln1/beta', prefix + 'LayerNorm_mlp_ln1/gamma', ], }) return mapping class T5_Base(Transformer): """Google的T5模型(基类) """ def load_variable(self, checkpoint, name): """加载单个变量的函数 """ variable = super(T5_Base, self).load_variable(checkpoint, name) if name == 'shared/embedding': if self.keep_tokens is None: return variable else: return variable[self.keep_tokens] elif 'relative_attention_bias' in name: return variable.T else: return variable def create_variable(self, name, value): """在tensorflow中创建一个变量 """ if 'relative_attention_bias' in name: value = value.T return super(T5_Base, self).create_variable(name, value) def variable_mapping(self): """映射到官方T5权重格式 """ mapping = { 'Embedding-Token': ['shared/embedding'], 'Encoder-Embedding-Relative-Position': [ 'encoder/block_000/layer_000/SelfAttention/relative_attention_bias' ], 'Encoder-Output-Norm': ['encoder/final_layer_norm/scale'], 'Decoder-Embedding-Relative-Position': [ 'decoder/block_000/layer_000/SelfAttention/relative_attention_bias', ], 'Decoder-Output-Norm': ['decoder/final_layer_norm/scale'], } for i in range(self.num_hidden_layers): # Encoder主体 prefix = 'encoder/block_%03d/' % i mapping.update({ 'Encoder-Transformer-%d-MultiHeadSelfAttention' % i: [ prefix + 'layer_000/SelfAttention/q', prefix + 'layer_000/SelfAttention/k', prefix + 'layer_000/SelfAttention/v', prefix + 'layer_000/SelfAttention/o', ], 'Encoder-Transformer-%d-MultiHeadSelfAttention-Norm' % i: [ prefix + 'layer_000/layer_norm/scale', ], 'Encoder-Transformer-%d-FeedForward' % i: [ prefix + 'layer_001/DenseReluDense/wi/kernel', prefix + 'layer_001/DenseReluDense/wo/kernel', ], 'Encoder-Transformer-%d-FeedForward-Norm' % i: [ prefix + 'layer_001/layer_norm/scale', ], }) # Decoder主体 prefix = 'decoder/block_%03d/' % i mapping.update({ 'Decoder-Transformer-%d-MultiHeadSelfAttention' % i: [ prefix + 'layer_000/SelfAttention/q', prefix + 'layer_000/SelfAttention/k', prefix + 'layer_000/SelfAttention/v', prefix + 'layer_000/SelfAttention/o', ], 'Decoder-Transformer-%d-MultiHeadSelfAttention-Norm' % i: [ prefix + 'layer_000/layer_norm/scale', ], 'Decoder-Transformer-%d-MultiHeadCrossAttention' % i: [ prefix + 'layer_001/EncDecAttention/q', prefix + 'layer_001/EncDecAttention/k', prefix + 'layer_001/EncDecAttention/v', prefix + 'layer_001/EncDecAttention/o', ], 'Decoder-Transformer-%d-MultiHeadCrossAttention-Norm' % i: [ prefix + 'layer_001/layer_norm/scale', ], 'Decoder-Transformer-%d-FeedForward' % i: [ prefix + 'layer_002/DenseReluDense/wi/kernel', prefix + 'layer_002/DenseReluDense/wo/kernel', ], 'Decoder-Transformer-%d-FeedForward-Norm' % i: [ prefix + 'layer_002/layer_norm/scale', ], }) return mapping class T5_Encoder(T5_Base): """Google的T5模型(Encoder) """ def get_inputs(self): """T5的Encoder的输入只有token_ids """ x_in = Input(shape=(self.sequence_length,), name='Encoder-Input-Token') return x_in def apply_embeddings(self, inputs): """T5的embedding只有token embedding, 并把relative position embedding准备好,待attention使用。 """ x = inputs x = self.apply( inputs=x, layer=Embedding, input_dim=self.vocab_size, output_dim=self.embedding_size, embeddings_initializer=self.initializer, mask_zero=True, name='Embedding-Token' ) x = self.apply( inputs=x, layer=Dropout, rate=self.dropout_rate, name='Encoder-Embedding-Dropout' ) if self.embedding_size != self.hidden_size: x = self.apply( inputs=x, layer=Dense, units=self.hidden_size, kernel_initializer=self.initializer, name='Encoder-Embedding-Mapping' ) return x def apply_main_layers(self, inputs, index): """T5的Encoder的主体是基于Self-Attention的模块 顺序:LN --> Att --> Add --> LN --> FFN --> Add """ x = inputs z = self.layer_norm_conds[0] attention_name = 'Encoder-Transformer-%d-MultiHeadSelfAttention' % index feed_forward_name = 'Encoder-Transformer-%d-FeedForward' % index attention_mask = self.compute_attention_mask() position_bias = self.compute_position_bias(x) # Self Attention xi = x x = self.apply( inputs=self.simplify([x, z]), layer=LayerNormalization, center=False, epsilon=1e-6, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='%s-Norm' % attention_name ) x = self.apply( inputs=[x, x, x, position_bias], layer=MultiHeadAttention, arguments={'p_bias': 't5_relative'}, heads=self.num_attention_heads, head_size=self.attention_head_size, key_size=self.attention_key_size, use_bias=False, scaled_dot_product=False, kernel_initializer=self.initializer, name=attention_name ) x = self.apply( inputs=x, layer=Dropout, rate=self.dropout_rate, name='%s-Dropout' % attention_name ) x = self.apply( inputs=[xi, x], layer=Add, name='%s-Add' % attention_name ) # Feed Forward xi = x x = self.apply( inputs=self.simplify([x, z]), layer=LayerNormalization, center=False, epsilon=1e-6, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='%s-Norm' % feed_forward_name ) x = self.apply( inputs=x, layer=FeedForward, units=self.intermediate_size, activation=self.hidden_act, use_bias=False, kernel_initializer=self.initializer, name=feed_forward_name ) x = self.apply( inputs=x, layer=Dropout, rate=self.dropout_rate, name='%s-Dropout' % feed_forward_name ) x = self.apply( inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name ) return x def apply_final_layers(self, inputs): """剩余部分 """ x = inputs z = self.layer_norm_conds[0] x = self.apply( inputs=self.simplify([x, z]), layer=LayerNormalization, center=False, epsilon=1e-6, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='Encoder-Output-Norm' ) x = self.apply( inputs=x, layer=Dropout, rate=self.dropout_rate, name='Encoder-Output-Dropout' ) return x def compute_position_bias(self, inputs=None): """T5相对位置编码 """ if self.position_bias is None: x = inputs p = self.apply( inputs=[x, x], layer=RelativePositionEmbeddingT5, input_dim=32, output_dim=self.num_attention_heads, bidirectional=True, embeddings_initializer=self.initializer, name='Encoder-Embedding-Relative-Position' ) self.position_bias = p return self.position_bias class T5_Decoder(Transformer): """Google的T5模型(Decoder) """ def __init__(self, with_lm=True, **kwargs): super(T5_Decoder, self).__init__(**kwargs) if with_lm is True: self.with_lm = 'softmax' else: self.with_lm = with_lm def get_inputs(self): """T5的Decoder的输入为context序列和token_ids """ c_in = Input( shape=(self.sequence_length, self.hidden_size), name='Input-Context' ) x_in = Input(shape=(self.sequence_length,), name='Decoder-Input-Token') return [c_in, x_in] def apply_embeddings(self, inputs): """T5的embedding只有token embedding, 并把relative position embedding准备好,待attention使用。 """ c, x = inputs x = self.apply( inputs=x, layer=Embedding, input_dim=self.vocab_size, output_dim=self.embedding_size, embeddings_initializer=self.initializer, mask_zero=True, name='Embedding-Token' ) x = self.apply( inputs=x, layer=Dropout, rate=self.dropout_rate, name='Decoder-Embedding-Dropout' ) if self.embedding_size != self.hidden_size: x = self.apply( inputs=x, layer=Dense, units=self.hidden_size, kernel_initializer=self.initializer, name='Decoder-Embedding-Mapping' ) return [c, x] def apply_main_layers(self, inputs, index): """T5的Dencoder主体是基于Self-Attention、Cross-Attention的模块 顺序:LN --> Att1 --> Add --> LN --> Att2 --> Add --> LN --> FFN --> Add """ c, x = inputs z = self.layer_norm_conds[0] self_attention_name = 'Decoder-Transformer-%d-MultiHeadSelfAttention' % index cross_attention_name = 'Decoder-Transformer-%d-MultiHeadCrossAttention' % index feed_forward_name = 'Decoder-Transformer-%d-FeedForward' % index attention_mask = self.compute_attention_mask() position_bias = self.compute_position_bias([x, c]) # Self Attention xi = x x = self.apply( inputs=self.simplify([x, z]), layer=LayerNormalization, center=False, epsilon=1e-6, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='%s-Norm' % self_attention_name ) x = self.apply( inputs=[x, x, x, attention_mask, position_bias[0]], layer=MultiHeadAttention, arguments={ 'a_mask': True, 'p_bias': 't5_relative' }, heads=self.num_attention_heads, head_size=self.attention_head_size, key_size=self.attention_key_size, use_bias=False, scaled_dot_product=False, kernel_initializer=self.initializer, name=self_attention_name ) x = self.apply( inputs=x, layer=Dropout, rate=self.dropout_rate, name='%s-Dropout' % self_attention_name ) x = self.apply( inputs=[xi, x], layer=Add, name='%s-Add' % self_attention_name ) # Cross Attention xi = x x = self.apply( inputs=self.simplify([x, z]), layer=LayerNormalization, center=False, epsilon=1e-6, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='%s-Norm' % cross_attention_name ) x = self.apply( inputs=[x, c, c, position_bias[1]], layer=MultiHeadAttention, arguments={ 'a_mask': None, 'p_bias': 't5_relative' }, heads=self.num_attention_heads, head_size=self.attention_head_size, key_size=self.attention_key_size, use_bias=False, scaled_dot_product=False, kernel_initializer=self.initializer, name=cross_attention_name ) x = self.apply( inputs=x, layer=Dropout, rate=self.dropout_rate, name='%s-Dropout' % cross_attention_name ) x = self.apply( inputs=[xi, x], layer=Add, name='%s-Add' % cross_attention_name ) # Feed Forward xi = x x = self.apply( inputs=self.simplify([x, z]), layer=LayerNormalization, center=False, epsilon=1e-6, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='%s-Norm' % feed_forward_name ) x = self.apply( inputs=x, layer=FeedForward, units=self.intermediate_size, activation=self.hidden_act, use_bias=False, kernel_initializer=self.initializer, name=feed_forward_name ) x = self.apply( inputs=x, layer=Dropout, rate=self.dropout_rate, name='%s-Dropout' % feed_forward_name ) x = self.apply( inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name ) return [c, x] def apply_final_layers(self, inputs): """剩余部分 """ c, x = inputs z = self.layer_norm_conds[0] x = self.apply( inputs=self.simplify([x, z]), layer=LayerNormalization, center=False, epsilon=1e-6, conditional=(z is not None), hidden_units=self.layer_norm_conds[1], hidden_activation=self.layer_norm_conds[2], hidden_initializer=self.initializer, name='Decoder-Output-Norm' ) x = self.apply( inputs=x, layer=Dropout, rate=self.dropout_rate, name='Decoder-Output-Dropout' ) x = self.apply( inputs=x, layer=Lambda, function=lambda x: x / np.sqrt(self.hidden_size), name='Decoder-Output-Scale' ) if self.with_lm: # 预测token概率部分 if self.embedding_size != self.hidden_size: x = self.apply( inputs=x, layer=Dense, units=self.embedding_size, kernel_initializer=self.initializer, name='Decoder-Output-Mapping' ) x = self.apply( inputs=x, layer=EmbeddingDense, embedding_name='Embedding-Token', activation=self.with_lm, use_bias=False, name='Dencoder-Output-LM-Proba' ) return x def compute_attention_mask(self, inputs=None): """添加下三角形式的attention mask """ if self.attention_mask is None: def lm_mask(s): import tensorflow as tf seq_len = K.shape(s)[1] with K.name_scope('attention_mask'): # 用K.ones可能会有问题 # 参考 https://github.com/tensorflow/tensorflow/issues/24938 ones = tf.ones((1, 1, seq_len, seq_len)) a_mask = tf.linalg.band_part(ones, -1, 0) return a_mask self.attention_mask = self.apply( inputs=self.inputs[1], layer=Lambda, function=lm_mask, name='Attention-LM-Mask' ) return self.attention_mask def compute_position_bias(self, inputs=None): """T5相对位置编码 """ if self.position_bias is None: x, c = inputs p1 = self.apply( inputs=[x, x], layer=RelativePositionEmbeddingT5, input_dim=32, output_dim=self.num_attention_heads, bidirectional=False, embeddings_initializer=self.initializer, name='Decoder-Embedding-Relative-Position' ) p2 = self.apply( inputs=[x, c], layer=RelativePositionEmbeddingT5, input_dim=32, output_dim=self.num_attention_heads, bidirectional=False, embeddings_initializer=self.initializer, name='Decoder-Embedding-Relative-Position' ) self.position_bias = (p1, p2) return self.position_bias class T5(T5_Base): """Google的T5模型(Encoder-Decoder) """ def __init__(self, **kwargs): super(T5, self).__init__(**kwargs) kwargs['layers'] = self.layers e_name, d_name = 'Encoder', 'Decoder' if 'name' in kwargs: e_name = '%s_%s' % (kwargs['name'], e_name) d_name = '%s_%s' % (kwargs['name'], d_name) del kwargs['name'] # 防止重复传参 self._encoder = T5_Encoder(name=e_name, **kwargs) self._decoder = T5_Decoder(name=d_name, **kwargs) def build(self, **kwargs): """同时构建Encoder和Decoder """ self._encoder.build(**kwargs) self._decoder.build(**kwargs) self.encoder = self._encoder.model self.decoder = self._decoder.model self.inputs = self.encoder.inputs + self.decoder.inputs[1:] self.outputs = self.decoder( self.encoder.outputs + self.decoder.inputs[1:] ) self.model = Model(self.inputs, self.outputs) def extend_with_language_model(BaseModel): """添加下三角的Attention Mask(语言模型用) """ class LanguageModel(BaseModel): """带下三角Attention Mask的派生模型 """ def __init__(self, *args, **kwargs): super(LanguageModel, self).__init__(*args, **kwargs) self.with_mlm = self.with_mlm or True def compute_attention_mask(self, inputs=None): """重载此函数即可 """ if self.attention_mask is None: def lm_mask(s): import tensorflow as tf seq_len = K.shape(s)[1] with K.name_scope('attention_mask'): # 用K.ones可能会有问题 # 参考 https://github.com/tensorflow/tensorflow/issues/24938 ones = tf.ones((1, 1, seq_len, seq_len)) a_mask = tf.linalg.band_part(ones, -1, 0) return a_mask self.attention_mask = self.apply( inputs=self.inputs[1], layer=Lambda, function=lm_mask, name='Attention-LM-Mask' ) return self.attention_mask return LanguageModel def extend_with_unified_language_model(BaseModel): """添加UniLM的Attention Mask(UnifiedLanguageModel用) """ class UnifiedLanguageModel(BaseModel): """带UniLM的Attention Mask的派生模型 UniLM: https://arxiv.org/abs/1905.03197 """ def __init__(self, *args, **kwargs): super(UnifiedLanguageModel, self).__init__(*args, **kwargs) self.with_mlm = self.with_mlm or True def compute_attention_mask(self, inputs=None): """重载此函数即可 """ if self.attention_mask is None: def unilm_mask(s): import tensorflow as tf s = K.cast(s, K.floatx()) seq_len = K.shape(s)[1] with K.name_scope('attention_mask'): # 用K.ones可能会有问题 # 参考 https://github.com/tensorflow/tensorflow/issues/24938 ones = tf.ones((1, 1, seq_len, seq_len)) a_mask = tf.linalg.band_part(ones, -1, 0) s_ex12 = K.expand_dims(K.expand_dims(s, 1), 2) s_ex13 = K.expand_dims(K.expand_dims(s, 1), 3) a_mask = (1 - s_ex13) * (1 - s_ex12) + s_ex13 * a_mask return a_mask self.attention_mask = self.apply( inputs=self.inputs[1], layer=Lambda, function=unilm_mask, name='Attention-UniLM-Mask' ) return self.attention_mask return UnifiedLanguageModel def build_transformer_model( config_path=None, checkpoint_path=None, model='bert', application='encoder', return_keras_model=True, **kwargs ): """根据配置文件构建模型,可选加载checkpoint权重 """ configs = {} if config_path is not None: configs.update(json.load(open(config_path))) configs.update(kwargs) if 'max_position' not in configs: configs['max_position'] = configs.get('max_position_embeddings') if 'dropout_rate' not in configs: configs['dropout_rate'] = configs.get('hidden_dropout_prob') model, application = model.lower(), application.lower() models = { 'bert': BERT, 'albert': ALBERT, 'albert_unshared': ALBERT_Unshared, 'nezha': NEZHA, 'electra': ELECTRA, 'gpt2_ml': GPT2_ML, 't5': T5, } MODEL = models[model] if model != 't5': if application == 'lm': MODEL = extend_with_language_model(MODEL) elif application == 'unilm': MODEL = extend_with_unified_language_model(MODEL) transformer = MODEL(**configs) transformer.build(**configs) if checkpoint_path is not None: transformer.load_weights_from_checkpoint(checkpoint_path) if return_keras_model: return transformer.model else: return transformer
62,335
32.157447
87
py
BiRTE
BiRTE-main/bert4keras/__init__.py
#! -*- coding: utf-8 -*- __version__ = '0.7.2'
48
11.25
24
py
rnn-seq2seq-learning
rnn-seq2seq-learning-main/scripts/visualization.py
''' Author: Zhengxiang (Jack) Wang GitHub: https://github.com/jaaack-wang Website: https://jaaack-wang.eu.org About: visualization function. ''' import matplotlib.pyplot as plt def plot_training_log(log, show_plot=True, saved_plot_fp=None): log = log.copy() if "Best eval accu" in log: log.pop("Best eval accu") train_loss, dev_loss = [], [] train_full_seq_acc, dev_full_seq_acc = [], [] train_first_n_acc, dev_first_n_acc = [], [] train_overlap_rate, dev_overlap_rate = [], [] epoch_nums = [int(e_n.split("#")[-1]) for e_n in log.keys()] for epoch in epoch_nums: train = log[f"Epoch#{epoch}"]["Train"] dev = log[f"Epoch#{epoch}"]["Eval"] train_loss.append(train['loss']) train_full_seq_acc.append(train["full sequence accuracy"]) train_first_n_acc.append(train["first n-symbol accuracy"]) train_overlap_rate.append(train["overlap rate"]) dev_loss.append(dev['loss']) dev_full_seq_acc.append(dev["full sequence accuracy"]) dev_first_n_acc.append(dev["first n-symbol accuracy"]) dev_overlap_rate.append(dev["overlap rate"]) fig = plt.figure(figsize=(16, 8)) fig.subplots_adjust(hspace=0.3, wspace=0.2) ax1 = plt.subplot(221) ax1.plot(epoch_nums, train_loss, label="train") ax1.plot(epoch_nums, dev_loss, label="dev") ax1.set_xlabel("Epoch Number") ax1.set_ylabel("Loss") ax1.grid(True, alpha=0.1) ax1.legend() ax2 = plt.subplot(222) ax2.plot(epoch_nums, train_full_seq_acc, label="train" ) ax2.plot(epoch_nums, dev_full_seq_acc, label="dev") ax2.set_xlabel("Epoch Number") ax2.set_ylabel("full sequence accuracy".title()) ax2.grid(True, alpha=0.1) ax2.legend() ax3 = plt.subplot(223) ax3.plot(epoch_nums, train_first_n_acc, label="train" ) ax3.plot(epoch_nums, dev_first_n_acc, label="dev") ax3.set_xlabel("Epoch Number") ax3.set_ylabel("first n-symbol accuracy".title()) ax3.grid(True, alpha=0.1) ax3.legend() ax4 = plt.subplot(224) ax4.plot(epoch_nums, train_overlap_rate, label="train" ) ax4.plot(epoch_nums, dev_overlap_rate, label="dev") ax4.set_xlabel("Epoch Number") ax4.set_ylabel("overlap rate".title()) ax4.grid(True, alpha=0.1) ax4.legend() if saved_plot_fp != None: plt.savefig(saved_plot_fp, dpi=600, bbox_inches='tight') print(f"{saved_plot_fp} saved!") if show_plot: plt.show() else: plt.close() def plot_performances_per_seq_len(perfs, show_plot=True, saved_plot_fp=None): perfs = perfs.copy() perfs.pop("Aggregated") lengths = sorted([int(l.split("-")[-1]) for l in perfs.keys()]) full_seq, first_n, overlap = [], [], [] for l in lengths: full_seq.append(perfs[f"Len-{l}"]["full sequence accuracy"]) first_n.append(perfs[f"Len-{l}"]["first n-symbol accuracy"]) overlap.append(perfs[f"Len-{l}"]["overlap rate"]) if len(lengths) < 20: plt.figure(figsize=(len(lengths) // 1, len(lengths) // 2)) else: plt.figure(figsize=(len(lengths) // 4, len(lengths) // 10)) plt.plot(lengths, full_seq, ".-", label="full sequence accuracy") plt.plot(lengths, first_n, ".-", label="first n-symbol accuracy") plt.plot(lengths, overlap, ".-", label="overlap rate") plt.xticks(lengths) plt.xlabel("Sequence Length") plt.ylabel("Performance") plt.grid(True, alpha=0.1) plt.legend() if saved_plot_fp != None: plt.savefig(saved_plot_fp, dpi=600, bbox_inches='tight') print(f"{saved_plot_fp} saved!") if show_plot: plt.show() else: plt.close() plt.show()
3,737
31.789474
77
py
rnn-seq2seq-learning
rnn-seq2seq-learning-main/scripts/dataloader.py
''' Author: Zhengxiang (Jack) Wang GitHub: https://github.com/jaaack-wang Website: https://jaaack-wang.eu.org About: Code for creating dataloader in PyTorch ''' import torch from functools import partial from torch.utils.data import Dataset, DataLoader import sys import pathlib # import from local script sys.path.insert(0, str(pathlib.Path(__file__).parent)) from utils import read_data class Transform(Dataset): '''Creates a Dataset class that encode data into sequences of integers. Args: - data (list): contains a list of [input, output]. - in_seq_encoder (method): a function to encode input - out_seq_encoder (method): a function to encode output ''' def __init__(self, data, in_seq_encoder, out_seq_encoder): self.data = data self.in_seq_encoder = in_seq_encoder self.out_seq_encoder = out_seq_encoder def __len__(self): return len(self.data) def __getitem__(self, index): in_seq, out_seq = self.data[index] x = self.in_seq_encoder(in_seq) y = self.out_seq_encoder(out_seq) return x, y def make_map(vocab): '''Makes a vocab to idx and idx to vocab map. Args: - vocab (iterable): vocabulary to be used. ''' v2idx= {"<s>": 0, "</s>": 1} for v in vocab: v2idx.update({v: len(v2idx)}) idx2v = {idx: v for v, idx in v2idx.items()} return v2idx, idx2v def get_text_encoder_decoder(vocab, tokenizer=None): '''Returns two methods, a text encoder that converts text into a list of indices, and a text decoder that converts a list of indices into a list of tokens. Typically, in transduction tasks, there are no unknown tokens. For simplicity, the last index that equals the vocab size is preserved for all unknown tokens. If a pad index is given, please add the token into vocab and specify the index when using create_dataloader. Otherwise, the padding index also defaults to the vocab size. Args: - vocab (iterable): vocabulary. - tokenizer (method/None): a method that converts a text into a list of tokens. Defaults to None. ''' v2idx, idx2v = make_map(vocab) if not tokenizer: tokenizer = lambda t: t encoder = lambda text: [0] + [v2idx.get(t, len(v2idx)) for t in tokenizer(text)] + [1] decoder = lambda idx: [idx2v.get(i, "<unk>") for i in idx] return encoder, decoder def make_tensors(size, fill_idx): '''Makes tensors of a given size filled with a given fill_idx. The type of the returned tensors is torch.int64.''' tensors = torch.fill_(torch.empty(size), fill_idx) return tensors.long() def collate_fn(batch, padding_idx, in_max_seq_len=None, out_max_seq_len=None): '''Collation function that collates a batch of data. Args: - batch: transformed batched data. - padding_idx (integer): integers to pad a batch. - in/out_max_seq_len (None/integer): max in/output sequence length. If the given length is shorter than the actual max in/output sequence length, the later is used. Defaults to None, using the actual max length. ''' N = len(batch) X, Y = zip(*batch) in_max_len = max([len(x) for x in X]) out_max_len = max([len(y) for y in Y]) if in_max_seq_len and in_max_seq_len > in_max_len: in_max_len = in_max_seq_len inputs = make_tensors((in_max_len, N), padding_idx) if out_max_seq_len and out_max_seq_len > out_max_len: out_max_len = out_max_seq_len outputs = make_tensors((out_max_len, N), padding_idx) for idx, (x, y) in enumerate(batch): inputs[:len(x), idx] = torch.Tensor(x).long() outputs[:len(y), idx] = torch.Tensor(y).long() return inputs, outputs def create_dataloader(data, in_seq_encoder, out_seq_encoder, padding_idx, shuffle=False, batch_size=256, in_max_seq_len=None, out_max_seq_len=None): '''Creates a dataloader object. Args: - data (list/str): contains a list of [input, output]. Can also be a filepath path to the data. See read_data. - in_seq_encoder (method): a function to encode input. - out_seq_encoder (method): a function to encode output. - padding_idx (integer): integers to pad a batch. - shuffle (bool): whether to shuffle the data. Defaults to False. - batch_size(integer): batch size. Defaults to 256. - in/out_max_seq_len (None/integer): max in/output sequence length. If the given length is shorter than the actual max in/output sequence length, the later is used. Defaults to None, using the actual max length. ''' if isinstance(data, str): data = read_data(data) collate = lambda batch: collate_fn(batch, padding_idx, in_max_seq_len, out_max_seq_len) dataset = Transform(data, in_seq_encoder, out_seq_encoder) dataloader = DataLoader(dataset, batch_size, shuffle, collate_fn=collate) return dataloader def customize_dataloader_func(in_seq_encoder, out_seq_encoder, padding_idx, shuffle=False, batch_size=256, in_max_seq_len=None, out_max_seq_len=None): return partial(create_dataloader, shuffle=shuffle, batch_size=batch_size, padding_idx=padding_idx, in_seq_encoder=in_seq_encoder, out_seq_encoder=out_seq_encoder, in_max_seq_len=in_max_seq_len, out_max_seq_len=out_max_seq_len)
6,202
32.711957
75
py
rnn-seq2seq-learning
rnn-seq2seq-learning-main/scripts/utils.py
''' - Author: Zhengxiang (Jack) Wang - GitHub: https://github.com/jaaack-wang - Website: https://jaaack-wang.eu.org - About: General utility functions for handling files. ''' import os from os import listdir, walk from os.path import isfile, join, exists import random import json def read_data(filepath, skip=0, sep="\t"): data = [] file = open(filepath, "r") for _ in range(skip): next(file) for line in file: line = line.strip("\n").split(sep) assert len(line) >= 2, "each line" \ "must have two items separated by" \ f"{sep} in {filepath}" data.append([line[0], line[-1]]) return data def read_datasets(data_folder): train = read_data(join(data_folder, "train.txt")) dev = read_data(join(data_folder, "dev.txt")) test = read_data(join(data_folder, "test.txt")) gen = read_data(join(data_folder, "gen.txt")) return train, dev, test, gen def save_ds_in_txt(ds, fp): tmp = "{}\t{}" with open(fp, "w") as f: f.write(tmp.format(ds[0][0], ds[0][1])) for d in ds[1:]: f.write("\n" + tmp.format(d[0], d[1])) f.close() print(fp + " saved!") def read_json(fp): return json.load(open(fp, "r")) def save_dict_as_json(dic, fp, indent=4): with open(fp, "w") as f: json.dump(dic, f, indent=indent) print(fp + " saved!") def get_filepathes_from_dir(file_dir, include_sub_dir=False, file_format=None, shuffle=False): if include_sub_dir: filepathes = [] for root, _, files in walk(file_dir, topdown=False): for f in files: filepathes.append(join(root, f)) else: filepathes = [join(file_dir, f) for f in listdir(file_dir) if isfile(join(file_dir, f))] if file_format: if not isinstance(file_format, (str, list, tuple)): raise TypeError("file_format must be str, list or tuple.") file_format = tuple(file_format) if isinstance(file_format, list) else file_format format_checker = lambda f: f.endswith(file_format) filepathes = list(filter(format_checker, filepathes)) if shuffle: random.shuffle(filepathes) return filepathes
2,324
25.724138
90
py
rnn-seq2seq-learning
rnn-seq2seq-learning-main/scripts/model.py
''' Author: Zhengxiang (Jack) Wang GitHub: https://github.com/jaaack-wang Website: https://jaaack-wang.eu.org About: RNN Seq2Seq models (Simple RNN, GRU, LSTM) in PyTorch. Allows: attention, bidirectional RNN, as well as multilayered RNN etc. ''' import random import torch import torch.nn as nn import torch.nn.functional as F class Encoder(nn.Module): def __init__(self, in_vocab_size, hidden_size, embd_dim, num_layers=1, rnn_type="SRNN", dropout_rate=0.0, bidirectional=False, reduction_method=torch.sum): super(Encoder, self).__init__() self.embedding = nn.Embedding(in_vocab_size, embd_dim) self.num_layers = num_layers self.rnn_type = rnn_type.upper() self.bidirectional = bidirectional if self.rnn_type == "GRU": rnn_ = nn.GRU elif self.rnn_type == "LSTM": rnn_ = nn.LSTM elif self.rnn_type == "SRNN": rnn_ = nn.RNN else: raise ValueError("Only supports SRNN, GRU, LSTM," \ " but {self.rnn_type} was given.") self.rnn = rnn_(embd_dim, hidden_size, num_layers, bidirectional=bidirectional) self.reduce = reduction_method self.dropout = nn.Dropout(dropout_rate) def forward(self, X): # X: (max input seq len, batch size) # embd: (max input seq len, batch size, embd dim) embd = self.dropout(self.embedding(X)) # outputs: (max input seq len, batch size, # hidden size * num directions) # hidden: (num directions * num layers, batch size, hidden size) # cell: (num directions * num layers, batch size, hidden size) if self.rnn_type == "LSTM": outputs, (hidden, cell) = self.rnn(embd) else: outputs, hidden = self.rnn(embd) cell = None # placeholder if self.bidirectional: seq_len, batch_size = X.shape hidden = hidden.view(2, self.num_layers, batch_size, -1) hidden = self.reduce(hidden, dim=0) if self.rnn_type == "LSTM": cell = cell.view(2, self.num_layers, batch_size, -1) cell = self.reduce(cell, dim=0) outputs = outputs.view(seq_len, batch_size, 2, -1) outputs = self.reduce(outputs, dim=2) # outputs: (max input seq len, batch size, hidden size) # hidden: (num layers, batch size, hidden size) # cell: (num layers, batch size, hidden size) return outputs, hidden, cell class Attention(nn.Module): def __init__(self, hidden_size): super(Attention, self).__init__() self.attn = nn.Linear(2 * hidden_size, hidden_size) self.v = nn.Linear(hidden_size, 1, bias=False) def forward(self, hidden, encoder_outputs): # hidden: (batch size, hidden size) # encoder_outputs: (max input seq len, # batch size, hidden size) seq_len = encoder_outputs.shape[0] batch_size = encoder_outputs.shape[1] # hidden: (batch size, max input seq len, hidden size) # encoder_outputs: same as hidden above hidden = hidden.unsqueeze(1).repeat(1, seq_len, 1) encoder_outputs = encoder_outputs.permute(1, 0, 2) # cat: (batch size, max input seq len, 2 * hidden size) # energy: (batch size, max input seq len, hidden size) # attention: (batch size, max input seq len) cat = torch.cat((hidden, encoder_outputs), dim = 2) energy = torch.tanh(self.attn(cat)) attention = self.v(energy).squeeze(2) return F.softmax(attention, dim=1) class Decoder(nn.Module): def __init__(self, out_vocab_size, hidden_size, embd_dim, num_layers=1, rnn_type="RNN", attention=None, use_attention=True, dropout_rate=0.0, reduction_method=torch.sum): super(Decoder, self).__init__() self.embedding = nn.Embedding(out_vocab_size, embd_dim) self.rnn_type = rnn_type.upper() self.use_attention = use_attention if self.rnn_type == "GRU": rnn_ = nn.GRU elif self.rnn_type == "LSTM": rnn_ = nn.LSTM elif self.rnn_type == "SRNN": rnn_ = nn.RNN else: raise ValueError("Only supports SRNN, GRU, LSTM," \ " but {self.rnn_type} was given.") if use_attention: self.rnn = rnn_(embd_dim + hidden_size, hidden_size, num_layers) else: self.rnn = rnn_(embd_dim, hidden_size, num_layers) self.attention = attention self.reduce = reduction_method self.dropout = nn.Dropout(dropout_rate) self.fc_out = nn.Linear(hidden_size, out_vocab_size) def forward(self, y, hidden, cell, encoder_outputs): # y: (1, batch size) # hidden: (num layers, batch size, hidden size) # cell: (num layers, batch size, hidden size) or a 3-D placeholder # encoder_outputs: (max input seq len, batch size, hidden size) # embd: (num layers, batch size, embd dim) embd = self.dropout(self.embedding(y)) if self.use_attention and self.attention: # reduced_hidden: (batch size, hidden size) # attn_weights: (batch size, 1, max input seq len) reduced_hidden = self.reduce(hidden, dim=0) attn_weights = self.attention(reduced_hidden, encoder_outputs).unsqueeze(1) # encoder_outputs: (batch size, max input seq len, hidden size) encoder_outputs = encoder_outputs.permute(1, 0, 2) # weighted: (1, batch size, hidden size) weighted = torch.bmm(attn_weights, encoder_outputs).permute(1, 0, 2) # cat: (1, batch size, embd dim + hidden size) rnn_input = torch.cat((embd, weighted), dim = 2) else: rnn_input = embd attn_weights = None # placeholder # hidden/cell: (num layers, batch size, hidden size) # output: (1, batch size, hidden size) if self.rnn_type == "LSTM": output, (hidden, cell) = self.rnn(rnn_input, (hidden, cell)) else: output, hidden = self.rnn(rnn_input, hidden) # output: (batch size, out vocab size) output = self.fc_out(output.squeeze(0)) return output, hidden, cell, attn_weights class Seq2Seq(nn.Module): def __init__(self, encoder, decoder, device): super(Seq2Seq, self).__init__() self.encoder = encoder self.decoder = decoder self.device = device def forward(self, X, Y, teacher_forcing_ratio=0.0): # X: (max input seq len, batch size) # Y: (max output seq len, batch size) y = Y[0:1] # y: (1, batch size) outputs, attn_weights = [], [] encoder_outputs, hidden, cell = self.encoder(X) for t in range(1, Y.shape[0]): output, hidden, cell, attn_w = \ self.decoder(y, hidden, cell, encoder_outputs) outputs.append(output); attn_weights.append(attn_w) teacher_force = random.random() < teacher_forcing_ratio if teacher_force: y = Y[t:t+1] else: y = output.argmax(1).unsqueeze(0) # outputs: ((max output seq len-1) * batch size, out vocab size) outputs = torch.cat(outputs).to(self.device) return outputs, attn_weights
7,864
38.522613
80
py
rnn-seq2seq-learning
rnn-seq2seq-learning-main/scripts/data.py
''' - Author: Zhengxiang (Jack) Wang - GitHub: https://github.com/jaaack-wang - Website: https://jaaack-wang.eu.org - About: Code for generating train, dev, test, and gen sets containing string pairs applying the following string transducation functions: identity (w --> w), reveral (w --> w^R), total reduplication (w --> ww), and quadratic copying (w --> w^|w|). - Note that for all the datasets generated by the three string functions, the corresponding input strings are identical. ''' import random import itertools from string import ascii_lowercase from os import makedirs from os.path import join import sys import pathlib # import from local script sys.path.insert(0, str(pathlib.Path(__file__).parent)) from utils import save_ds_in_txt # define the alphbet. in this case, = 26 English letters alphabet = ascii_lowercase # where to store the generated data data_folder = "./data" # define the in- and out-of- distribution ranges in_distribution_ranges = [(6, 15)] out_distribution_ranges = [(1, 5), (16, 30)] # define the sizes for the train, dev, test, and gen sets train_size, dev_size, test_size, gen_size = 1000, 1000, 5000, 5000 # random seed (may not help reproducibility though) random.seed(8741983) def get_identity_pair(w): '''Returns a pair of identical strings.''' return [w, w] def get_rev_pair(w): '''Returns a pair of an input string and its reverse.''' return [w, w[::-1]] def get_total_red_pair(w): '''Returns a pair of an input string and its total reduplication.''' return [w, w+w] def get_quad_copy_pair(w): '''Returns a pair of an input string and its total reduplication.''' return [w, w * len(w)] def all_words_of_length(length, alphabet): '''Returns all possible strings given the string length and an alphabet.''' return [''.join(list(b)) for b in itertools.product(alphabet, repeat=length)] def n_words_of_length(max_num_per_seq, length, alphabet, always_unique=False): '''Returns ::max_num_per_seq:: strings given the string length and an alphabet. When the possible number of strings is less than ::always_unique::, the number of returned strings equals ::max_num_per_seq:: if ::always_unique:: is set False (by default) and thus the strings may contain duplicates; otherwise, simple return all the possible unique strings given the string length and the alphabet.''' if max_num_per_seq >= pow(len(alphabet), length): out = all_words_of_length(length, alphabet) if max_num_per_seq == len(out) or always_unique: return out multiplier = max_num_per_seq // len(out) if max_num_per_seq % len(out) != 0: multiplier = multiplier + 1 out = out * multiplier random.shuffle(out) return out[:max_num_per_seq] out = set() while len(out) < max_num_per_seq: word = "".join(random.choices(alphabet, k=length)) out.add(word) return list(out) def generate_datasets(train_X, dev_X, test_X, gen_X, str_func, str_func_name, data_folder): '''Generates train/dev/test/gen sets for a given str_func and saves them in ::data_folder/str_func_name::. ''' folder = join(data_folder, str_func_name) makedirs(folder, exist_ok=True) for X, fname in zip([train_X, dev_X, test_X, gen_X], ["train.txt", "dev.txt", "test.txt", "gen.txt"]): data = list(map(str_func, X)) fp = join(folder, fname) save_ds_in_txt(data, fp) def main(): '''The main function generates all the needed datasets for the three given string transduction functions according to the configurations specified at the top of the current script.''' train_X, dev_X, test_X, gen_X = [], [], [], [] total = sum((train_size, dev_size, test_size)) split_1, split_2 = train_size, train_size + dev_size for (l, h) in in_distribution_ranges: for n in range(l, h+1): data = n_words_of_length(total, n, alphabet) train_X.extend(data[:split_1]) dev_X.extend(data[split_1:split_2]) test_X.extend(data[split_2:]) for (l, h) in out_distribution_ranges: for n in range(l, h+1): gen_X.extend(n_words_of_length(gen_size, n, alphabet)) str_funcs = [get_identity_pair, get_rev_pair, get_total_red_pair, get_quad_copy_pair] str_func_names = ["identity", "rev", "total_red", "quad_copy"] for str_func, str_func_name in zip(str_funcs, str_func_names): generate_datasets(train_X, dev_X, test_X, gen_X, str_func, str_func_name, data_folder=data_folder) if __name__ == "__main__": main()
4,819
32.013699
86
py
rnn-seq2seq-learning
rnn-seq2seq-learning-main/scripts/pytorch_utils.py
''' Author: Zhengxiang (Jack) Wang GitHub: https://github.com/jaaack-wang Website: https://jaaack-wang.eu.org About: Utility functions for training, evaluation, and deployment (i.e., prediction). ''' import torch import torch.nn as nn import torch.nn.init as init from functools import partial import matplotlib.pyplot as plt import sys import pathlib # import from local script sys.path.insert(0, str(pathlib.Path(__file__).parent)) from model import * def init_weights(model, init_method=init.xavier_uniform_): '''Initialize model's weights by a given method. Defaults to Xavier initialization.''' for name, param in model.named_parameters(): if 'weight' in name: init_method(param.data) else: init.constant_(param.data, 0) def count_parameters(model): '''Count the number of trainable parameters.''' return sum(p.numel() for p in model.parameters() if p.requires_grad) def get_model(ModelConfig, init_method=init.xavier_uniform_): '''Customized function to initialze a model given ModelConfig.''' rnn_type = ModelConfig['rnn_type'] hidden_size = ModelConfig['hidden_size'] embd_dim = ModelConfig['embd_dim'] num_layers = ModelConfig['num_layers'] dropout_rate = ModelConfig['dropout_rate'] use_attention = ModelConfig['use_attention'] bidirectional = ModelConfig['bidirectional'] in_vocab_size = ModelConfig['in_vocab_size'] out_vocab_size = ModelConfig['out_vocab_size'] device = torch.device(ModelConfig["device"]) reduction_method = ModelConfig['reduction_method'] if reduction_method == "sum": reduction_method = torch.sum elif reduction_method == "mean": reduction_method = torch.mean else: raise TypeError(f"unknown reduction method: {reduction_method}") encoder = Encoder(in_vocab_size, hidden_size, embd_dim, num_layers, rnn_type, dropout_rate, bidirectional, reduction_method) attention = Attention(hidden_size) decoder = Decoder(out_vocab_size, hidden_size, embd_dim, num_layers, rnn_type, attention, use_attention, dropout_rate) model = Seq2Seq(encoder, decoder, device).to(device) if init_method != None: init_weights(model, init_method) n = count_parameters(model) print(f'The model has {n:,} trainable parameters') return model def metrics(Y, Ypred): '''Computer the following three metrics: - full sequence accuracy: % of sequences correctly generated from end to end - first n-symbol accuracy: % of first n symbols correctly generated - overlap rate: % of pairwise overlapping symbols ''' # pairwise overlap pairwise_overlap = (Y == Ypred).to(torch.float64) # pairwise overlap over across sequences (within the given batch) per_seq_overlap = pairwise_overlap.mean(dim=0) # overlap rate overlap_rate = per_seq_overlap.mean().item() # full sequence accuracy abs_correct = per_seq_overlap.isclose(torch.tensor(1.0, dtype=torch.float64)) full_seq_accu = abs_correct.to(torch.float64).mean().item() # if the n-th symbol does not match, set the following overlapping values to 0 if pairwise_overlap.dim() <= 1: min_idx = pairwise_overlap.argmin(0) if pairwise_overlap[min_idx] == 0: pairwise_overlap[min_idx:] = 0 else: for col_idx, min_idx in enumerate(pairwise_overlap.argmin(0)): if pairwise_overlap[min_idx, col_idx] == 0: pairwise_overlap[min_idx:, col_idx] = 0 # first n-symbol accuracy first_n_accu = pairwise_overlap.mean().item() return full_seq_accu, first_n_accu, overlap_rate def _get_results(dic): loss = dic["loss"] overlap_rate = dic["overlap rate"] full_seq_acc = dic["full sequence accuracy"] first_n_acc = dic["first n-symbol accuracy"] return [loss, full_seq_acc, first_n_acc, overlap_rate] def get_results(log, train_log=True): '''Return the results of the four metrics: loss, full sequence accuracy, first n-symbol accuracy, overlap rate, given a result dictionary.''' if train_log: best = log["Best eval accu"] best_train = best["Train"] best_dev = best["Eval"] train_res = _get_results(best_train) dev_res = _get_results(best_dev) return train_res, dev_res return _get_results(log) def evaluate(model, dataloader, criterion, per_seq_len_performance=False): '''Evaluate model performance on a given dataloader. "per_seq_len_performance" can be reported if each batch in the dataloader only consists of a specific length. ''' model.eval() if per_seq_len_performance: seq_len = set(X.shape[0] for X, _ in dataloader) assert len(seq_len) == len(dataloader), "Each batch" \ " must contain sequences of a specific length. " perf_log = dict() # aggragate performance aggr_perf = {"loss": 0.0, "full sequence accuracy": 0.0, "first n-symbol accuracy": 0.0, "overlap rate": 0.0} with torch.no_grad(): for X, Y in dataloader: x_seq_len = X.shape[0] - 2 # not counting <s> and </s> seq_len, batch_size = Y.shape seq_len -= 1 # logits does not have <s> X = X.to(model.device) Y = Y.to(model.device) logits, _ = model(X, Y, teacher_forcing_ratio=0.0) Ypred = logits.view(seq_len, batch_size, -1).argmax(2) full_seq_accu, first_n_accu, overlap_rate = metrics(Y[1:], Ypred) loss = criterion(logits, Y[1:].view(-1)) aggr_perf["loss"] += loss.item() aggr_perf["full sequence accuracy"] += full_seq_accu aggr_perf["first n-symbol accuracy"] += first_n_accu aggr_perf["overlap rate"] += overlap_rate if per_seq_len_performance: perf_log[f"Len-{x_seq_len}"] = {"loss": loss.item(), "full sequence accuracy": full_seq_accu, "first n-symbol accuracy": first_n_accu, "overlap rate": overlap_rate} aggr_perf = {k:v/len(dataloader) for k,v in aggr_perf.items()} if per_seq_len_performance: perf_log[f"Aggregated"] = aggr_perf return aggr_perf, perf_log return aggr_perf def train_loop(model, dataloader, optimizer, criterion, teacher_forcing_ratio): '''A single training loop (for am epoch). ''' model.train() for X, Y in dataloader: seq_len, batch_size = Y.shape seq_len -= 1 # logits does not have <s> X = X.to(model.device) Y = Y.to(model.device) optimizer.zero_grad() logits, _ = model(X, Y, teacher_forcing_ratio) Ypred = logits.view(seq_len, batch_size, -1).argmax(2) loss = criterion(logits, Y[1:].view(-1)) loss.backward() nn.utils.clip_grad_norm_(model.parameters(), 1) optimizer.step() def train_and_evaluate(model, train_dl, eval_dl, criterion, optimizer, saved_model_fp="model.pt", acc_threshold=0.0, print_eval_freq=5, max_epoch_num=10, train_exit_acc=1.0, eval_exit_acc=1.0, teacher_forcing_ratio=1.0): '''Trains and evaluates model while training and returns the training log. The best model with highest full sequence accuracy is saved and returned. Args: - model (nn.Module): a neural network model in PyTorch. - train_dl (Dataset): train set dataloader. - eval_dl (Dataset): dataloader for evaluation. - criterion (method): loss function for computing loss. - optimizer (method): Optimization method. - saved_model_fp (str): filepath for the saved model (.pt). - acc_threshold (float): the min accuracy to save model. Defaults to 0.0. If set greater than 1, no model will be saved. - print_eval_freq (int): print and evaluation frequency. - max_epoch_num (int): max epoch number. Defaults to 10. Training is stopped if the max epoch number is run out. - train_exit_acc (float): the min train accuracy to exit training. Defaults to 1.0. Only takes effect if eval_exit_acc is also met. - eval_exit_acc (float): the min eval accu to exit training. Defaults to 1.0. Training is stopped if the eval accuracy if 1.0 or both train_exit_acc and eval_exit_acc are met. - teacher_forcing_ratio (float): the probability of using the real next symbols from the output sequences at decoding time during training. ''' log = dict() best_acc, best_epoch = acc_threshold, 0 epoch, train_acc, eval_acc = 0, 0, 0 while (epoch < max_epoch_num) and (eval_acc != 1.0) and ( train_acc < train_exit_acc or eval_acc < eval_exit_acc): epoch += 1 train_loop(model, train_dl, optimizer, criterion, teacher_forcing_ratio) if epoch % print_eval_freq == 0: train_perf = evaluate(model, train_dl, criterion) train_acc = train_perf['full sequence accuracy'] eval_perf = evaluate(model, eval_dl, criterion) eval_acc = eval_perf['full sequence accuracy'] print(f"Current epoch: {epoch}, \ntraining performance: " \ f"{train_perf}\nevaluation performance: {eval_perf}\n") log[f"Epoch#{epoch}"] = {"Train": train_perf, "Eval": eval_perf} if eval_acc > best_acc: best_acc = eval_acc best_epoch = epoch torch.save(model.state_dict(), saved_model_fp) if best_acc > acc_threshold: log["Best eval accu"] = {"Epoch Number": epoch} log["Best eval accu"].update(log[f"Epoch#{best_epoch}"]) print(saved_model_fp + " saved!\n") model.load_state_dict(torch.load(saved_model_fp)) return log def predict(text, model, in_seq_encoder, out_seq_decoder, in_seq_decoder=None, max_output_len=None, visualize=True, show_plot=True, saved_plot_fp=None): if isinstance(text, str): pass elif isinstance(text, (list, tuple,)): assert all(isinstance(t, str) for t in text), "must be a list of strs" output_seqs, attn_weights = [], [] for t in text: o, w = predict(t, model, in_seq_encoder, out_seq_decoder, visualize=False, max_output_len=max_output_len) output_seqs.append(o) attn_weights.append(w) return output_seqs, attn_weights else: raise TypeError("texts must be a str or a list of strs," \ f" {type(text)} was given.") device = model.device in_seq = in_seq_encoder(text) in_seq_tensor = torch.Tensor(in_seq).long().unsqueeze(1).to(device) model.eval() y = torch.Tensor([[0]]).long().to(device) outputs, attn_ws = [], [] encoder_outputs, hidden, cell = model.encoder(in_seq_tensor) if max_output_len == None: max_output_len = len(in_seq) + 3 while y.item() != 1 and len(outputs) < max_output_len: output, hidden, cell, attn_w = \ model.decoder(y, hidden, cell, encoder_outputs) y = output.argmax(1).unsqueeze(0) outputs.append(y.item()); attn_ws.append(attn_w) if attn_ws[0] != None: attn_ws = torch.cat(attn_ws).squeeze(1) if device.type != "cpu": attn_ws = attn_ws.cpu().detach().numpy() else: attn_ws = attn_ws.detach().numpy() else: visualize = False output_seq = out_seq_decoder(outputs) if visualize: if in_seq_decoder == None: in_seq_decoder = out_seq_decoder in_seq_len, out_seq_len = len(in_seq)-1, len(outputs) width = max(int(in_seq_len * 0.3), 1) height = max(int(out_seq_len * 0.3), 1) plt.figure(figsize=(width, height)) plt.imshow(attn_ws[:, 1:], cmap='BuGn') plt.xticks(range(in_seq_len), in_seq_decoder(in_seq)[1:], rotation=45) plt.yticks(range(out_seq_len), output_seq) plt.xlabel("Input") plt.ylabel("Output") plt.grid(True, alpha=0.05) if saved_plot_fp != None: plt.savefig(saved_plot_fp, dpi=600, bbox_inches='tight') if show_plot: plt.show() else: plt.close() return output_seq, attn_ws def customize_predictor(model, in_seq_encoder, out_seq_decoder, in_seq_decoder=None, max_output_len=None, visualize=True, show_plot=True, saved_plot_fp=None): '''Customize a predictor function so that the func can be used more easily.''' return partial(predict, model=model, in_seq_encoder=in_seq_encoder, out_seq_decoder=out_seq_decoder, in_seq_decoder=in_seq_decoder, max_output_len=max_output_len, visualize=visualize, show_plot=show_plot, saved_plot_fp=saved_plot_fp)
13,983
35.511749
89
py
libai
libai-main/setup.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os import shutil import subprocess import sys from os import path from typing import List from setuptools import Extension, find_packages, setup version = "0.2.0" package_name = "LiBai" cwd = os.path.dirname(os.path.abspath(__file__)) sha = "Unknown" try: sha = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd).decode("ascii").strip() except Exception: pass def write_version_file(): version_path = os.path.join(cwd, "libai", "version.py") with open(version_path, "w") as f: f.write(f"__version__ = '{version}'\n") f.write(f"git_version = {repr(sha)}\n") if sys.version_info < (3,): sys.exit("Sorry, Python3 is required for LiBai.") def get_pybind11(): import pybind11 as pb return pb extensions = [ Extension( "libai.data.data_utils.helpers", sources=["libai/data/data_utils/helpers.cpp"], extra_compile_args=[ "-O3", "-Wall", "-shared", "-std=c++11", "-fPIC", "-fdiagnostics-color", ], include_dirs=[get_pybind11().get_include()], ), ] def get_libai_configs() -> List[str]: """ Return a list of configs to include in package for model zoo. """ source_configs_dir = path.join(path.dirname(path.realpath(__file__)), "configs") destination = path.join(path.dirname(path.realpath(__file__)), "libai", "config", "configs") # Symlink the config directory inside package to have a cleaner pip install. # Remove stale symlink/directory from a previous build. if path.exists(source_configs_dir): if path.islink(destination): os.unlink(destination) elif path.isdir(destination): shutil.rmtree(destination) if not path.exists(destination): try: os.symlink(source_configs_dir, destination) except OSError: # Fall back to copying if symlink fails: ex. on Windows. shutil.copytree(source_configs_dir, destination) config_paths = glob.glob("configs/**/*.py", recursive=True) return config_paths if __name__ == "__main__": print(f"Building wheel {package_name}-{version}") with open("LICENSE", "r", encoding="utf-8") as f: license = f.read() write_version_file() setup( name=package_name, version=version, description="Toolkit for Pretraining Models with OneFlow", license=license, install_requires=[ "boto3", "botocore", "cloudpickle", "flowvision==0.1.0", "wget", "hydra-core", "nltk", "numpy", "omegaconf==2.1.0", "Pygments", "PyYAML", "jieba", "regex", "requests", "scipy", "sentencepiece>=0.1", "tabulate", "termcolor", "tqdm", "pybind11", "portalocker", "dill", "flake8==3.8.1 ", "isort==5.10.1", "black==21.4b ", "autoflake", "tensorboardX<=2.5.1", "pytest", ], packages=find_packages(), package_data={"libai.config": get_libai_configs()}, ext_modules=extensions, test_suite="tests", )
3,998
26.770833
96
py
libai
libai-main/libai/__init__.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This line will be programatically read/write by setup.py. # Leave them at the bottom of this file and don't touch them. from libai import data from libai import evaluation from libai import layers from libai import models from libai import optim from libai import scheduler from libai import tokenizer from libai import engine from libai import utils try: from .version import __version__ # noqa: F401 except ImportError: pass
1,062
30.264706
74
py
libai
libai-main/libai/scheduler/lr_scheduler.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import oneflow as flow logger = logging.getLogger(__name__) def WarmupCosineLR( optimizer: flow.optim.Optimizer, max_iter: int, warmup_factor: float, warmup_iter: int, alpha: float = 0.0, warmup_method: str = "linear", ): """Create a schedule with a learning rate that decreases following the values of the Cosine function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (flow.optim.Optimizer): Wrapped optimizer. max_iter (int): Total training iters. warmup_factor (float): The warmup factor. warmup_iter (int): The number of warmup steps. alpha (float, optional): The learning rate scale factor (:math:`\\alpha`). Defaults to 0.0. warmup_method (str, optional): The method of warmup, you can choose "linear" or "constant". In linear mode, the multiplication factor starts with warmup_factor in the first epoch and then inreases linearly to reach 1. Defaults to "linear". """ cosine_decay_lr = flow.optim.lr_scheduler.CosineDecayLR( optimizer, decay_steps=max_iter, alpha=alpha ) if warmup_iter == 0: logger.warning("warmup iters equals to zero, return CosineLR") return cosine_decay_lr elif warmup_iter > max_iter: logger.warning("warmup iters is larger than the total training iters") warmup_cosine_lr = flow.optim.lr_scheduler.WarmUpLR( cosine_decay_lr, warmup_factor=warmup_factor, warmup_iters=warmup_iter, warmup_method=warmup_method, ) return warmup_cosine_lr def WarmupCosineAnnealingLR( optimizer: flow.optim.Optimizer, max_iter: int, warmup_factor: float, warmup_iter: int, eta_min: float = 0.0, warmup_method: str = "linear", ): """Create a schedule with a learning rate that decreases following the values of the Cosine Annealing function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (flow.optim.Optimizer): Wrapped optimizer. max_iter (int): Total training iters. warmup_factor (float): The warmup factor. warmup_iter (int): The number of warmup steps. eta_min (float, optional): Minimum learning rate. Defaults to 0.0. warmup_method (str, optional): The method of warmup, you can choose "linear" or "constant". In linear mode, the multiplication factor starts with warmup_factor in the first epoch and then inreases linearly to reach 1. Defaults to "linear". """ cosine_annealing_lr = flow.optim.lr_scheduler.CosineAnnealingLR( optimizer, T_max=max_iter, eta_min=eta_min ) if warmup_iter == 0: logger.warning("warmup iters equals to zero, return CosineAnnealingLR") return cosine_annealing_lr warmup_cosine_annealing_lr = flow.optim.lr_scheduler.WarmUpLR( cosine_annealing_lr, warmup_factor=warmup_factor, warmup_iters=warmup_iter, warmup_method=warmup_method, ) return warmup_cosine_annealing_lr def WarmupStepLR( optimizer: flow.optim.Optimizer, max_iter: int, warmup_factor: float, warmup_iter: int, step_size: int, gamma: float = 0.1, warmup_method: str = "linear", ): """Create a schedule with a learning rate that decreases following the values of the Step function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (flow.optim.Optimizer): Wrapped optimizer. max_iter (int): Total training iters. warmup_factor (float): The warmup factor. warmup_iter (int): The number of warmup steps. step_size (int): Period of learning rate decay. gamma (float, optional): Multiplicative factor of learning rate decay. Defaults to 0.1. warmup_method (str, optional): The method of warmup, you can choose "linear" or "constant". In linear mode, the multiplication factor starts with warmup_factor in the first epoch and then inreases linearly to reach 1. Defaults to "linear". """ step_lr = flow.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma) if warmup_iter == 0: logger.warning("warmup iters equals to zero, return StepLR") return step_lr warmup_step_lr = flow.optim.lr_scheduler.WarmUpLR( step_lr, warmup_factor=warmup_factor, warmup_iters=warmup_iter, warmup_method=warmup_method, ) return warmup_step_lr def WarmupMultiStepLR( optimizer: flow.optim.Optimizer, max_iter: int, warmup_factor: float, warmup_iter: int, milestones: list, gamma: float = 0.1, warmup_method: str = "linear", ): """Create a schedule with a learning rate that decreases following the values of the MultiStep function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (flow.optim.Optimizer): Wrapped optimizer. max_iter (int): Total training iters. warmup_factor (float): The warmup factor. warmup_iter (int): The number of warmup steps. milestones (list): List of step indices. Must be increasing. gamma (float, optional): Multiplicative factor of learning rate decay. Defaults to 0.1. warmup_method (str, optional): The method of warmup, you can choose "linear" or "constant". In linear mode, the multiplication factor starts with warmup_factor in the first epoch and then inreases linearly to reach 1. Defaults to "linear". """ multistep_lr = flow.optim.lr_scheduler.MultiStepLR( optimizer, milestones=milestones, gamma=gamma ) if warmup_iter == 0: logger.warning("warmup iters equals to zero, return MultiStepLR") return multistep_lr warmup_multistep_lr = flow.optim.lr_scheduler.WarmUpLR( multistep_lr, warmup_factor=warmup_factor, warmup_iters=warmup_iter, warmup_method=warmup_method, ) return warmup_multistep_lr def WarmupExponentialLR( optimizer: flow.optim.Optimizer, max_iter: int, gamma: float, warmup_factor: float, warmup_iter: int, warmup_method: str = "linear", ): """Create a schedule with a learning rate that decreases following the values of the Exponential function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (flow.optim.Optimizer): Wrapped optimizer. max_iter (int): Total training iters. gamma (float): Multiplicative factor of learning rate decay. warmup_factor (float): The warmup factor. warmup_iter (int): The number of warmup steps. warmup_method (str, optional): The method of warmup, you can choose "linear" or "constant". In linear mode, the multiplication factor starts with warmup_factor in the first epoch and then inreases linearly to reach 1. Defaults to "linear". """ exponential_lr = flow.optim.lr_scheduler.ExponentialLR(optimizer, gamma=gamma) if warmup_iter == 0: logger.warning("warmup iters equals to zero, return ExponentialLR") return exponential_lr warmup_exponential_lr = flow.optim.lr_scheduler.WarmUpLR( exponential_lr, warmup_factor=warmup_factor, warmup_iters=warmup_iter, warmup_method=warmup_method, ) return warmup_exponential_lr def WarmupPolynomialLR( optimizer: flow.optim.Optimizer, max_iter: int, warmup_factor: float, warmup_iter: int, end_learning_rate: float = 0.0001, power: float = 1.0, cycle: bool = False, warmup_method: str = "linear", ): """Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the optimizer to end lr defined by `lr_end`, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. Args: optimizer (flow.optim.Optimizer): Wrapped optimizer. max_iter (int): Total training iters. warmup_factor (float): The warmup factor. warmup_iter (int): The number of warmup steps. end_learning_rate (float, optional): The final learning rate. Defaults to 0.0001. power (float, optional): The power of polynomial. Defaults to 1.0. cycle (bool, optional): If cycle is True, the scheduler will decay the learning rate every decay steps. Defaults to False. warmup_method (str, optional): The method of warmup, you can choose "linear" or "constant". In linear mode, the multiplication factor starts with warmup_factor in the first epoch and then inreases linearly to reach 1. Defaults to "linear". """ polynomial_lr = flow.optim.lr_scheduler.PolynomialLR( optimizer, decay_batch=max_iter, end_learning_rate=end_learning_rate, power=power, cycle=cycle, ) if warmup_iter == 0: logger.warning("warmup iters equals to zero, return PolynomialLR") return polynomial_lr warmup_polynomial_lr = flow.optim.lr_scheduler.WarmUpLR( polynomial_lr, warmup_factor=warmup_factor, warmup_iters=warmup_iter, warmup_method=warmup_method, ) return warmup_polynomial_lr
10,480
39.624031
99
py
libai
libai-main/libai/scheduler/__init__.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .build import build_lr_scheduler from .lr_scheduler import ( WarmupCosineAnnealingLR, WarmupCosineLR, WarmupExponentialLR, WarmupMultiStepLR, WarmupPolynomialLR, WarmupStepLR, )
828
32.16
74
py
libai
libai-main/libai/scheduler/build.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libai.config import instantiate def build_lr_scheduler(cfg, optimizer): """Build learning rate scheduler, defined by ``cfg``.""" cfg.optimizer = optimizer scheduler = instantiate(cfg) return scheduler
845
34.25
74
py
libai
libai-main/libai/evaluation/reg_evaluator.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import logging from collections import OrderedDict import numpy as np from scipy.stats import pearsonr, spearmanr from libai.utils import distributed as dist from .evaluator import DatasetEvaluator logger = logging.getLogger(__name__) class RegEvaluator(DatasetEvaluator): def __init__(self): self._predictions = [] def reset(self): self._predictions = [] def process(self, inputs, outputs): pred_logits = outputs["prediction_scores"] labels = inputs["labels"] # measure accuracy preds = pred_logits.cpu().topk(1)[1].squeeze(1).numpy() labels = labels.cpu().numpy() self._predictions.append({"preds": preds, "labels": labels}) def evaluate(self): if not dist.is_main_process(): return {} else: predictions = self._predictions preds = np.array([]) labels = np.array([]) for prediction in predictions: preds = np.concatenate((preds, prediction["preds"])) labels = np.concatenate((labels, prediction["labels"])) pearson_corr = pearsonr(preds, labels)[0] spearman_corr = spearmanr(preds, labels)[0] corr = (pearson_corr + spearman_corr) / 2 self._results = OrderedDict() self._results["pearson"] = pearson_corr self._results["spearman"] = spearman_corr self._results["corr"] = corr return copy.deepcopy(self._results)
2,094
29.362319
74
py
libai
libai-main/libai/evaluation/utils.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from collections.abc import Mapping import oneflow as flow from libai.utils import distributed as dist def pad_batch(x_dict, batch_size, last_batch_lack, is_last_batch): x = list(x_dict.values())[0] tensor_batch = x.shape[0] assert tensor_batch <= batch_size if tensor_batch == batch_size and not is_last_batch: return x_dict, batch_size valid_sample = tensor_batch - last_batch_lack data_parallel_size = dist.get_data_parallel_size() assert tensor_batch % data_parallel_size == 0 tensor_micro_batch_size = tensor_batch // data_parallel_size padded_dict = {} for key, xi in x_dict.items(): pad_shape = (batch_size, *xi.shape[1:]) local_xi = xi.to_global( sbp=flow.sbp.broadcast, placement=flow.env.all_device_placement("cuda") ).to_local() padded_xi = flow.zeros(pad_shape, dtype=xi.dtype, device="cuda") padded_xi[:tensor_batch, ...] = padded_xi[:tensor_batch, ...] + local_xi for i in range(last_batch_lack - 1): start_idx = tensor_micro_batch_size * (data_parallel_size - i - 1) - 1 padded_xi[start_idx:-1] = padded_xi[start_idx + 1 :] padded_xi = padded_xi.to_global( sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), placement=xi.placement ).to_global(sbp=xi.sbp) padded_dict[key] = padded_xi return padded_dict, valid_sample def print_csv_format(results): """ Print main metrics in a particular format so that they are easy to copypaste into a spreadsheet. Args: results (OrderedDict[dict]): task_name -> {metric -> score} unordered dict can also be printed, but in arbitrary order """ assert isinstance(results, Mapping) or not len(results), results logger = logging.getLogger(__name__) for task, res in results.items(): if isinstance(res, Mapping): # Don't print "AP-category" metrics since they are usually not tracked. important_res = [(k, v) for k, v in res.items() if "-" not in k] logger.info("copypaste: Task: {}".format(task)) logger.info("copypaste: " + ",".join([k[0] for k in important_res])) logger.info("copypaste: " + ",".join(["{0:.4f}".format(k[1]) for k in important_res])) else: logger.info(f"copypaste: {task}={res}") def flatten_results_dict(results): """ Expand a hierarchical dict of scalars into a flat dict of scalars. If results[k1][k2][k3] = v, the returned dict will have the entry {"k1/k2/k3": v}. Args: results (dict): """ r = {} for k, v in results.items(): if isinstance(v, Mapping): v = flatten_results_dict(v) for kk, vv in v.items(): r[k + "/" + kk] = vv else: r[k] = v return r
3,505
36.297872
98
py
libai
libai-main/libai/evaluation/evaluator.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import logging import time from collections import OrderedDict, abc from contextlib import ExitStack, contextmanager from typing import Callable, List, Union import oneflow as flow from libai.utils import distributed as dist from libai.utils.logger import log_every_n_seconds from .utils import pad_batch # -------------------------------------------------------- # References: # https://github.com/facebookresearch/detectron2/blob/main/detectron2/evaluation/evaluator.py # -------------------------------------------------------- class DatasetEvaluator: """ Base class for a dataset evaluator. The function :func:`inference_on_dataset` runs the model over all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs. This class will accumulate information of the inputs/outputs (by :meth:`process`), and produce evaluation results in the end (by :meth:`evaluate`). """ def reset(self): """ Preparation for a new round of evaluation. Should be called before starting a round of evaluation. """ def process(self, inputs, outputs): """ Process the pair of inputs and outputs. .. code-block:: python pred_logits = outputs["prediction_scores"] labels = inputs["labels"] # do evaluation on pred_logits/labels pair ... Args: inputs (dict): the inputs that's used to call the model. outputs (dict): the return dict of `model(**inputs)` """ def evaluate(self): """ Evaluate/summarize the performance after processing all input/output pairs. Returns: dict: A new evaluator class can return a dict of arbitrary format as long as the user can process the results. In our train_net.py, we expect the following format: * key: the name of the task (e.g., Classification) * value: a dict of {metric name: score}, e.g.: {"Acc@1": 75.0} """ class DatasetEvaluators(DatasetEvaluator): """ Wrapper class to combine multiple :class:`DatasetEvaluator` instances. This class dispatches every evaluation call to all of its :class:`DatasetEvaluator`. """ def __init__(self, evaluators): """ Args: evaluators (list): the evaluators to combine. """ super().__init__() self._evaluators = evaluators def reset(self): for evaluator in self._evaluators: evaluator.reset() def process(self, inputs, outputs): for evaluator in self._evaluators: evaluator.process(inputs, outputs) def evaluate(self): results = OrderedDict() for evaluator in self._evaluators: result = evaluator.evaluate() if dist.is_main_process() and result is not None: for k, v in result.items(): assert ( k not in results ), "Different evaluators produce results with the same key {}".format(k) results[k] = v return results def inference_on_dataset( model, data_loader, batch_size, eval_iter, get_batch: Callable, input_placement_device: str, evaluator: Union[DatasetEvaluator, List[DatasetEvaluator], None], ): """ Run model on the data_loader and evaluate the metrics with evaluator. Also benchmark the inference speed of `model.__call__` accurately. The model will be used in eval mode. Args: model (callable): a callable which takes an object from `data_loader` and returns some outputs. If it's an nn.Module, it will be temporarily set to `eval` mode. If you wish to evaluate a model in `training` mode instead, you can wrap the given model and override its behavior of `.eval()` and `.train()`. batch_size: batch size for inference data_loader: an iterable object with a length. The elements it generates will be the inputs to the model. eval_iter: running steps for evaluation get_batch: a Callable function for getting data from dataloader input_placement_device: used in get_batch, set it to `cuda` or `cpu`. see input_placement_device in `libai.configs.common.train.py` for more details. evaluator: the evaluator(s) to run. Use `None` if you only want to benchmark, but don't want to do any evaluation. Returns: The return value of `evaluator.evaluate()` """ num_devices = dist.get_world_size() logger = logging.getLogger(__name__) total_samples = len(data_loader.dataset) # inference data loader must have a fixed length if evaluator is None: # create a no-op evaluator evaluator = DatasetEvaluators([]) if isinstance(evaluator, abc.MutableSequence): evaluator = DatasetEvaluators(evaluator) evaluator.reset() num_warmup = min(5, len(data_loader) - 1) start_time = time.perf_counter() total_data_time = 0 total_compute_time = 0 total_eval_time = 0 consumed_samples = 0 dps = dist.get_data_parallel_size() last_batch_lack = (dps - (total_samples % dps)) % dps # reset total samples real_eval_iter = min(eval_iter, len(data_loader)) total_samples = min(real_eval_iter * batch_size, len(data_loader.dataset)) logger.info( f"with eval_iter {eval_iter}, " f"reset total samples {len(data_loader.dataset)} to {total_samples}" ) logger.info(f"Start inference on {total_samples} samples") with ExitStack() as stack: if isinstance(model, (flow.nn.Module, flow.nn.Graph)): stack.enter_context(inference_context(model)) stack.enter_context(flow.no_grad()) start_data_time = time.perf_counter() for idx, inputs in enumerate(data_loader): if idx >= real_eval_iter: break total_data_time += time.perf_counter() - start_data_time if idx == num_warmup: start_time = time.perf_counter() total_data_time = 0 total_compute_time = 0 total_eval_time = 0 start_compute_time = time.perf_counter() # model forward data = get_batch(inputs, input_placement_device) is_last_batch = idx == len(data_loader) - 1 paded_data, valid_sample = pad_batch(data, batch_size, last_batch_lack, is_last_batch) outputs = model(**paded_data) # get valid sample valid_data = { key: dist.tensor_to_rank0(value, to_local=True)[:valid_sample] for key, value in data.items() } valid_outputs = {} for key, value in outputs.items(): value = dist.tensor_to_rank0(value, to_local=True) if value.ndim > 1: valid_outputs[key] = value[:valid_sample] # Slice if it's batched output else: valid_outputs[key] = value if flow.cuda.is_available(): dist.synchronize() total_compute_time += time.perf_counter() - start_compute_time start_eval_time = time.perf_counter() if dist.is_main_process(): evaluator.process(valid_data, valid_outputs) dist.synchronize() total_eval_time += time.perf_counter() - start_eval_time consumed_samples += valid_sample iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup) data_seconds_per_iter = total_data_time / iters_after_start compute_seconds_per_iter = total_compute_time / iters_after_start eval_seconds_per_iter = total_eval_time / iters_after_start total_seconds_per_iter = (time.perf_counter() - start_time) / iters_after_start if idx >= num_warmup * 2 or compute_seconds_per_iter > 5: eta = datetime.timedelta( seconds=int(total_seconds_per_iter * (total_samples // batch_size - idx - 1)) ) log_every_n_seconds( logging.INFO, ( f"Inference done {consumed_samples}/{total_samples}. " f"Dataloading: {data_seconds_per_iter:.4f} s/iter. " f"Inference: {compute_seconds_per_iter:.4f} s/iter. " f"Eval: {eval_seconds_per_iter:.4f} s/iter. " f"Total: {total_seconds_per_iter:.4f} s/iter. " f"ETA={eta}" ), n=5, ) start_data_time = time.perf_counter() # Measure the time only for this worker (before the synchronization barrier) total_time = time.perf_counter() - start_time total_time_str = str(datetime.timedelta(seconds=total_time)) # NOTE this format is parsed by grep logger.info("Total valid samples: {}".format(consumed_samples)) logger.info( "Total inference time: {} ({:.6f} s / iter per device, on {} devices)".format( total_time_str, total_time / (total_samples - num_warmup), num_devices ) ) total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time))) logger.info( "Total inference pure compute time: {} ({:.6f} s / iter per device, on {} devices)".format( total_compute_time_str, total_compute_time / (total_samples - num_warmup), num_devices, ) ) results = evaluator.evaluate() # An evaluator may return None when not in main process. # Replace it by an empty dict instead to make it easier for downstream code to handle if results is None: results = {} return results @contextmanager def inference_context(model): """ A context where the model is temporarily changed to eval mode, and restored to previous mode afterwards. Args: model: eager or graph mode in oneflow """ training_mode = model.model.training if isinstance(model, flow.nn.Graph) else model.training if isinstance(model, flow.nn.Graph): model.model.eval() else: model.eval() yield if isinstance(model, flow.nn.Graph): model.model.train(training_mode) else: model.train(training_mode)
11,242
36.983108
99
py
libai
libai-main/libai/evaluation/bleu_evaluator.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from collections import OrderedDict from nltk.translate.bleu_score import corpus_bleu from libai.utils import distributed as dist from .evaluator import DatasetEvaluator class BLEUEvaluator(DatasetEvaluator): """ Evaluate BLEU(Bilingual Evaluation Understudy) score. BLEU is a score for comparing a candidate translation of text to one or more reference translations. """ def __init__(self): super().__init__() self._predictions = [] def reset(self): self._predictions = [] def process(self, inputs, outputs): candidate = outputs["candidate"] reference = inputs["reference"] self._predictions.append({"candidate": candidate, "reference": reference}) def evaluate(self): if not dist.is_main_process(): return {} else: predictions = self._predictions candidates = [] references = [] for pred in predictions: candidates.append(pred["candidate"]) references.append(pred["reference"]) bleu_score = corpus_bleu(references, candidates) self._results = OrderedDict() self._results["bleu_score"] = bleu_score return copy.deepcopy(self._results)
1,888
28.061538
82
py
libai
libai-main/libai/evaluation/ppl_evaluator.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import math from collections import OrderedDict from libai.utils import distributed as dist from .evaluator import DatasetEvaluator class PPLEvaluator(DatasetEvaluator): """ Evaluate perplexity for Language Model. Perplexity is a measurement of how well a probability distribution or probability model predicts a sample. """ def __init__(self): self._predictions = [] def reset(self): self._predictions = [] def process(self, inputs, outputs): for k, v in outputs.items(): ppl = math.exp(min(20, v.item())) self._predictions.append({f"{k}_PPL": ppl}) def evaluate(self): if not dist.is_main_process(): return {} else: predictions = self._predictions self._results = OrderedDict() for prediction in predictions: for k, v in prediction.items(): if k not in self._results: self._results[k] = 0 self._results[k] += v for k in self._results.keys(): self._results[k] /= len(predictions) return copy.deepcopy(self._results)
1,794
28.42623
74
py
libai
libai-main/libai/evaluation/__init__.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .evaluator import DatasetEvaluator, inference_on_dataset from .utils import print_csv_format, flatten_results_dict from .cls_evaluator import ClsEvaluator from .ppl_evaluator import PPLEvaluator from .reg_evaluator import RegEvaluator from .bleu_evaluator import BLEUEvaluator
903
40.090909
74
py
libai
libai-main/libai/evaluation/cls_evaluator.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from collections import OrderedDict from libai.utils import distributed as dist from .evaluator import DatasetEvaluator def accuracy(output, target, topk=(1,)): maxk = min(max(topk), output.size()[1]) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.reshape(1, -1).expand_as(pred)) return [ (correct[: min(k, maxk)].reshape(-1).float().sum(0) * 100.0 / batch_size).item() for k in topk ] class ClsEvaluator(DatasetEvaluator): """ Evaluate accuracy for classification. The metrics range from 0 to 100 (instead of 0 to 1). We support evaluate different topk accuracy. You can reset `cfg.train.topk=(1, 5, N)` according to your needs. """ def __init__(self, topk=(1, 5)): self.topk = topk self._predictions = [] def reset(self): self._predictions = [] def process(self, inputs, outputs): pred_logits = outputs["prediction_scores"] labels = inputs["labels"] # measure accuracy topk_acc = accuracy(pred_logits, labels, topk=self.topk) num_correct_acc_topk = [acc * labels.size(0) / 100 for acc in topk_acc] self._predictions.append( {"num_correct_topk": num_correct_acc_topk, "num_samples": labels.size(0)} ) def evaluate(self): if not dist.is_main_process(): return {} else: predictions = self._predictions total_correct_num = OrderedDict() for top_k in self.topk: total_correct_num["Acc@" + str(top_k)] = 0 total_samples = 0 for prediction in predictions: for top_k, num_correct_n in zip(self.topk, prediction["num_correct_topk"]): total_correct_num["Acc@" + str(top_k)] += int(num_correct_n) total_samples += int(prediction["num_samples"]) self._results = OrderedDict() for top_k, topk_correct_num in total_correct_num.items(): self._results[top_k] = topk_correct_num / total_samples * 100 return copy.deepcopy(self._results)
2,773
31.635294
88
py
libai
libai-main/libai/config/arguments.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import sys def default_argument_parser(epilog=None): """Create a parser with some common arguments used by libai users. Args: epilog (str): epilog passed to ArgumentParser describing the usage. Returns: argparse.ArgumentParser. """ parser = argparse.ArgumentParser( epilog=epilog or f""" Examples: Run on single machine: $ python3 -m oneflow.distributed.launch \ --nproc_per_node 8 --nnodes 1 --node_rank 0 --master_addr 127.0.0.1 --master_port 12345 \ {sys.argv[0]} --config-file cfg.yaml Change some config options: $ python3 -m oneflow.distributed.launch \ --nproc_per_node 8 --nnodes 1 --node_rank 0 --master_addr 127.0.0.1 --master_port 12345 \ {sys.argv[0]} --config-file cfg.yaml train.load_weight=/path/to/weight.pth optim.lr=0.001 Run on multiple machines: (machine0)$ python3 -m oneflow.distributed.launch \ --nproc_per_node 8 --nnodes 2 --node_rank 0 --master_addr <URL> --master_port 12345 \ {sys.argv[0]} --config-file cfg.yaml $ python3 -m oneflow.distributed.launch \ --nproc_per_node 8 --nnodes 2 --node_rank 1 --master_addr <URL> --master_port 12345 \ {sys.argv[0]} --config-file cfg.yaml """, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file") parser.add_argument( "--resume", action="store_true", help="Whether to attempt to resume from the checkpoint directory. " "See documentation of `DefaultTrainer.resume_or_load()` for what it means.", ) parser.add_argument("--eval-only", action="store_true", help="Perform evaluation only") parser.add_argument( "--fast-dev-run", action="store_true", help="Run several batches of train, eval and test to find any bugs, " "(ie: a sort of unit test)", ) parser.add_argument( "opts", help=""" Modify config options at the end of the command. For Yacs configs, use space-separated "path.key value" pairs. For python-based LazyConfig, use "path.key=value". """.strip(), default=None, nargs=argparse.REMAINDER, ) return parser
2,877
34.530864
96
py
libai
libai-main/libai/config/config.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import inspect import os import pkg_resources from omegaconf import OmegaConf from .lazy import LazyConfig # -------------------------------------------------------- # References: # https://github.com/facebookresearch/detectron2/blob/main/detectron2/config/config.py # -------------------------------------------------------- def configurable(init_func=None, *, from_config=None): """ Decorate a function or a class's __init__ method so that it can be called with a :class:`CfgNode` object using a :func:`from_config` function that translates :class:`CfgNode` to arguments. Examples: .. code-block:: python # Usage 1: Decorator on __init__: class A: @configurable def __init__(self, a, b=2, c=3): pass @classmethod def from_config(cls, cfg): # 'cfg' must be the first argument # Returns kwargs to be passed to __init__ return {"a": cfg.A, "b": cfg.B} a1 = A(a=1, b=2) # regular construction a2 = A(cfg) # construct with a cfg a3 = A(cfg, b=3, c=4) # construct with extra overwrite # Usage 2: Decorator on any function. Needs an extra from_config argument: @configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B}) def a_func(a, b=2, c=3): pass a1 = a_func(a=1, b=2) # regular call a2 = a_func(cfg) # call with a cfg a3 = a_func(cfg, b=3, c=4) # call with extra overwrite Args: init_func (callable): a class's ``__init__`` method in usage 1. The class must have a ``from_config`` classmethod which takes `cfg` as the first argument. from_config (callable): the from_config function in usage 2. It must take `cfg` as its first argument. """ if init_func is not None: assert ( inspect.isfunction(init_func) and from_config is None and init_func.__name__ == "__init__" ), "Incorrect use of @configurable. Check API documentation for examples." @functools.wraps(init_func) def wrapped(self, *args, **kwargs): try: from_config_func = type(self).from_config except AttributeError as e: raise AttributeError( "Class with @configurable must have a 'from_config' classmethod." ) from e if not inspect.ismethod(from_config_func): raise TypeError("Class with @configurable must have a 'from_config' classmethod.") if _called_with_cfg(*args, **kwargs): explicit_args = _get_args_from_config(from_config_func, *args, **kwargs) init_func(self, **explicit_args) else: init_func(self, *args, **kwargs) return wrapped else: if from_config is None: return configurable # @configurable() is made equivalent to @configurable assert inspect.isfunction( from_config ), "from_config argument of configurable must be a function!" def wrapper(orig_func): @functools.wraps(orig_func) def wrapped(*args, **kwargs): if _called_with_cfg(*args, **kwargs): explicit_args = _get_args_from_config(from_config, *args, **kwargs) return orig_func(**explicit_args) else: return orig_func(*args, **kwargs) wrapped.from_config = from_config return wrapped return wrapper def _get_args_from_config(from_config_func, *args, **kwargs): """ Use `from_config` to obtain explicit arguments. Returns: dict: arguments to be used for cls.__init__ """ signature = inspect.signature(from_config_func) if list(signature.parameters.keys())[0] != "cfg": if inspect.isfunction(from_config_func): name = from_config_func.__name__ else: name = f"{from_config_func.__self__}.from_config" raise TypeError(f"{name} must take 'cfg' as the first argument!") support_var_arg = any( param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD] for param in signature.parameters.values() ) if support_var_arg: # forward all arguments to from_config, if from_config accepts them ret = from_config_func(*args, **kwargs) else: # forward supported arguments to from_config supported_arg_names = set(signature.parameters.keys()) extra_kwargs = {} for name in list(kwargs.keys()): if name not in supported_arg_names: extra_kwargs[name] = kwargs.pop(name) ret = from_config_func(*args, **kwargs) # forward the other arguments to __init__ ret.update(extra_kwargs) return ret def _called_with_cfg(*args, **kwargs): """ Returns: bool: whether the arguments contain CfgNode and should be considered forwarded to from_config. """ from omegaconf import DictConfig if len(args) and isinstance(args[0], DictConfig): return True if isinstance(kwargs.pop("cfg", None), DictConfig): return True # `from_config`'s first argument is forced to be "cfg". # So the above check covers all cases. return False def try_get_key(cfg, *keys, default=None): """ Try select keys from cfg until the first key that exists. Otherwise return default. """ for k in keys: none = object() p = OmegaConf.select(cfg, k, default=none) if p is not none: return p return default def get_config(config_path): """ Returns a config object from a config_path. Args: config_path (str): config file name relative to libai's "configs/" directory, e.g., "common/models/bert.py" Returns: omegaconf.DictConfig: a config object """ cfg_file = pkg_resources.resource_filename("libai.config", os.path.join("configs", config_path)) if not os.path.exists(cfg_file): raise RuntimeError("{} not available in LiBai configs!".format(config_path)) cfg = LazyConfig.load(cfg_file) return cfg
6,972
34.040201
100
py
libai
libai-main/libai/config/__init__.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .arguments import default_argument_parser from .config import configurable, try_get_key, get_config from .instantiate import instantiate from .lazy import LazyCall, LazyConfig __all__ = [ "LazyCall", "LazyConfig", "instantiate", "default_argument_parser", "configurable", "try_get_key", "get_config", ]
958
30.966667
74
py
libai
libai-main/libai/config/lazy.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ast import builtins import importlib import inspect import logging import os import pydoc import uuid from collections import abc from contextlib import contextmanager from copy import deepcopy from dataclasses import is_dataclass from typing import Any, List, Tuple, Union import cloudpickle import yaml from omegaconf import DictConfig, ListConfig, OmegaConf __all__ = ["LazyCall", "LazyConfig"] # -------------------------------------------------------- # References: # https://github.com/facebookresearch/detectron2/blob/main/detectron2/config/lazy.py # -------------------------------------------------------- def locate(name: str) -> Any: """ Locate and return an object ``x`` using an input string ``{x.__module__}.{x.__qualname__}``, such as "module.submodule.class_name". Raise Exception if it cannot be found. """ obj = pydoc.locate(name) # Some cases (e.g. flow.optim.sgd.SGD) not handled correctly # by pydoc.locate. Try a private function from hydra. if obj is None: try: # from hydra.utils import get_method - will print many errors from hydra.utils import _locate except ImportError as e: raise ImportError(f"Cannot dynamically locate object {name}!") from e else: obj = _locate(name) # it raises if fails return obj def _convert_target_to_string(t: Any) -> str: """ Inverse of ``locate()``. Args: t: any object with ``__module__`` and ``__qualname__`` """ module, qualname = t.__module__, t.__qualname__ # Compress the path to this object, e.g. ``module.submodule._impl.class`` # may become ``module.submodule.class``, if the later also resolves to the same # object. This simplifies the string, and also is less affected by moving the # class implementation. module_parts = module.split(".") for k in range(1, len(module_parts)): prefix = ".".join(module_parts[:k]) candidate = f"{prefix}.{qualname}" try: if locate(candidate) is t: return candidate except ImportError: pass return f"{module}.{qualname}" class LazyCall: """ Wrap a callable so that when it's called, the call will not be executed, but returns a dict that describes the call. LazyCall object has to be called with only keyword arguments. Positional arguments are not yet supported. Examples: .. code-block:: python from libai.config import instantiate, LazyCall layer_cfg = LazyCall(nn.Conv2d)(in_channels=32, out_channels=32) layer_cfg.out_channels = 64 # can edit it afterwards layer = instantiate(layer_cfg) """ def __init__(self, target): if not (callable(target) or isinstance(target, (str, abc.Mapping))): raise TypeError( f"target of LazyCall must be a callable or defines a callable! Got {target}" ) self._target = target def __call__(self, **kwargs): if is_dataclass(self._target): # omegaconf object cannot hold dataclass type # https://github.com/omry/omegaconf/issues/784 target = _convert_target_to_string(self._target) else: target = self._target kwargs["_target_"] = target return DictConfig(content=kwargs, flags={"allow_objects": True}) def _visit_dict_config(cfg, func): """ Apply func recursively to all DictConfig in cfg. """ if isinstance(cfg, DictConfig): func(cfg) for v in cfg.values(): _visit_dict_config(v, func) elif isinstance(cfg, ListConfig): for v in cfg: _visit_dict_config(v, func) def _validate_py_syntax(filename): # see also https://github.com/open-mmlab/mmcv/blob/master/mmcv/utils/config.py with open(filename, "r", encoding="utf-8") as f: # Setting encoding explicitly to resolve coding issue on windows content = f.read() try: ast.parse(content) except SyntaxError as e: raise SyntaxError(f"Config file {filename} has syntax error!") from e def _cast_to_config(obj): # if given a dict, return DictConfig instead if isinstance(obj, dict): return DictConfig(obj, flags={"allow_objects": True}) return obj _CFG_PACKAGE_NAME = "libai._cfg_loader" """ A namespace to put all imported config into. """ def _random_package_name(filename): # generate a random package name when loading config files return _CFG_PACKAGE_NAME + str(uuid.uuid4())[:4] + "." + os.path.basename(filename) @contextmanager def _patch_import(): """ Enhance relative import statements in config files, so that they: 1. locate files purely based on relative location, regardless of packages. e.g. you can import file without having __init__ 2. do not cache modules globally; modifications of module states has no side effect 3. support other storage system through PathManager 4. imported dict are turned into omegaconf.DictConfig automatically """ old_import = builtins.__import__ def find_relative_file(original_file, relative_import_path, level): cur_file = os.path.dirname(original_file) for _ in range(level - 1): cur_file = os.path.dirname(cur_file) cur_name = relative_import_path.lstrip(".") for part in cur_name.split("."): cur_file = os.path.join(cur_file, part) # NOTE: directory import is not handled. Because then it's unclear # if such import should produce python module or DictConfig. This can # be discussed further if needed. if not cur_file.endswith(".py"): cur_file += ".py" if not os.path.isfile(cur_file): raise ImportError( f"Cannot import name {relative_import_path} from " f"{original_file}: {cur_file} has to exist." ) return cur_file def new_import(name, globals=None, locals=None, fromlist=(), level=0): if ( # Only deal with relative imports inside config files level != 0 and globals is not None and (globals.get("__package__", "") or "").startswith(_CFG_PACKAGE_NAME) ): cur_file = find_relative_file(globals["__file__"], name, level) _validate_py_syntax(cur_file) spec = importlib.machinery.ModuleSpec( _random_package_name(cur_file), None, origin=cur_file ) module = importlib.util.module_from_spec(spec) module.__file__ = cur_file with open(cur_file, "r", encoding="utf-8") as f: content = f.read() exec(compile(content, cur_file, "exec"), module.__dict__) for name in fromlist: # turn imported dict into DictConfig automatically val = _cast_to_config(module.__dict__[name]) module.__dict__[name] = val return module return old_import(name, globals, locals, fromlist=fromlist, level=level) builtins.__import__ = new_import yield new_import builtins.__import__ = old_import class LazyConfig: """ Provide methods to save, load, and overrides an omegaconf config object which may contain definition of lazily-constructed objects. """ @staticmethod def load_rel(filename: str, keys: Union[None, str, Tuple[str, ...]] = None): """ Similar to :meth:`load()`, but load path relative to the caller's source file. This has the same functionality as a relative import, except that this method accepts filename as a string, so more characters are allowed in the filename. """ caller_frame = inspect.stack()[1] caller_fname = caller_frame[0].f_code.co_filename assert caller_fname != "<string>", "load_rel Unable to find caller" caller_dir = os.path.dirname(caller_fname) filename = os.path.join(caller_dir, filename) return LazyConfig.load(filename, keys) @staticmethod def load(filename: str, keys: Union[None, str, Tuple[str, ...]] = None): """ Load a config file. Args: filename: absolute path or relative path w.r.t. the current working directory keys: keys to load and return. If not given, return all keys (whose values are config objects) in a dict. """ has_keys = keys is not None filename = filename.replace("/./", "/") # redundant if os.path.splitext(filename)[1] not in [".py", ".yaml", ".yml"]: raise ValueError(f"Config file {filename} has to be a python or yaml file.") if filename.endswith(".py"): _validate_py_syntax(filename) with _patch_import(): # Record the filename module_namespace = { "__file__": filename, "__package__": _random_package_name(filename), } with open(filename, "r", encoding="utf-8") as f: content = f.read() # Compile first with filename to: # 1. make filename appears in stacktrace # 2. make load_rel able to find its parent's (possibly remote) location exec(compile(content, filename, "exec"), module_namespace) ret = module_namespace else: with open(filename, "r", encoding="utf-8") as f: obj = yaml.unsafe_load(f) ret = OmegaConf.create(obj, flags={"allow_objects": True}) if has_keys: if isinstance(keys, str): return _cast_to_config(ret[keys]) else: return tuple(_cast_to_config(ret[a]) for a in keys) else: if filename.endswith(".py"): # when not specified, only load those that are config objects ret = DictConfig( { name: _cast_to_config(value) for name, value in ret.items() if isinstance(value, (DictConfig, ListConfig, dict)) and not name.startswith("_") }, flags={"allow_objects": True}, ) return ret @staticmethod def save(cfg, filename: str): """ Save a config object to a yaml file. Note that when the config dictionary contains complex objects (e.g. lambda), it can't be saved to yaml. In that case we will print an error and attempt to save to a pkl file instead. Args: cfg: an omegaconf config object filename: yaml file name to save the config file """ logger = logging.getLogger(__name__) try: cfg = deepcopy(cfg) except Exception: pass else: # if it's deep-copyable, then... def _replace_type_by_name(x): if "_target_" in x and callable(x._target_): try: x._target_ = _convert_target_to_string(x._target_) except AttributeError: pass # not necessary, but makes yaml looks nicer _visit_dict_config(cfg, _replace_type_by_name) save_pkl = False try: dict = OmegaConf.to_container(cfg, resolve=False) dumped = yaml.dump(dict, default_flow_style=None, allow_unicode=True, width=9999) with open(filename, "w") as f: f.write(dumped) try: _ = yaml.unsafe_load(dumped) # test that it is loadable except Exception: logger.warning( "The config contains objects that cannot serialize to a valid yaml. " f"{filename} is human-readable but cannot be loaded." ) save_pkl = True except Exception: logger.exception("Unable to serialize the config to yaml. Error:") save_pkl = True if save_pkl: new_filename = filename + ".pkl" try: # retry by pickle with open(new_filename, "wb") as f: cloudpickle.dump(cfg, f) logger.warning(f"Config is saved using cloudpickle at {new_filename}.") except Exception: pass @staticmethod def apply_overrides(cfg, overrides: List[str]): """ In-place override contents of cfg. Args: cfg: an omegaconf config object overrides: list of strings in the format of "a=b" to override configs. See https://hydra.cc/docs/next/advanced/override_grammar/basic/ for syntax. Returns: the cfg object """ def safe_update(cfg, key, value): parts = key.split(".") for idx in range(1, len(parts)): prefix = ".".join(parts[:idx]) v = OmegaConf.select(cfg, prefix, default=None) if v is None: break if not OmegaConf.is_config(v): raise KeyError( f"Trying to update key {key}, but {prefix} " f"is not a config, but has type {type(v)}." ) OmegaConf.update(cfg, key, value, merge=True) from hydra.core.override_parser.overrides_parser import OverridesParser parser = OverridesParser.create() overrides = parser.parse_overrides(overrides) for o in overrides: key = o.key_or_group value = o.value() if o.is_delete(): # TODO support this raise NotImplementedError("deletion is not yet a supported override") safe_update(cfg, key, value) return cfg @staticmethod def to_py(cfg, prefix: str = "cfg."): """ Try to convert a config object into Python-like pseudo code. Note that perfect conversion is not always possible. So the returned results are mainly meant to be human-readable, and not meant to be executed. Args: cfg: an omegaconf config object prefix: root name for the resulting code (default: "cfg.") Returns: str of formatted Python code """ import black cfg = OmegaConf.to_container(cfg, resolve=True) def _to_str(obj, prefix=None, inside_call=False): if prefix is None: prefix = [] if isinstance(obj, abc.Mapping) and "_target_" in obj: # Dict representing a function call target = _convert_target_to_string(obj.pop("_target_")) args = [] for k, v in sorted(obj.items()): args.append(f"{k}={_to_str(v, inside_call=True)}") args = ", ".join(args) call = f"{target}({args})" return "".join(prefix) + call elif isinstance(obj, abc.Mapping) and not inside_call: # Dict that is not inside a call is a list of top-level config objects that we # render as one object per line with dot separated prefixes key_list = [] for k, v in sorted(obj.items()): if isinstance(v, abc.Mapping) and "_target_" not in v: key_list.append(_to_str(v, prefix=prefix + [k + "."])) else: key = "".join(prefix) + k key_list.append(f"{key}={_to_str(v)}") return "\n".join(key_list) elif isinstance(obj, abc.Mapping): # Dict that is inside a call is rendered as a regular dict return ( "{" + ",".join( f"{repr(k)}: {_to_str(v, inside_call=inside_call)}" for k, v in sorted(obj.items()) ) + "}" ) elif isinstance(obj, list): return "[" + ",".join(_to_str(x, inside_call=inside_call) for x in obj) + "]" else: return repr(obj) py_str = _to_str(cfg, prefix=[prefix]) try: return black.format_str(py_str, mode=black.Mode()) except black.InvalidInput: return py_str
17,312
36.3125
96
py
libai
libai-main/libai/config/instantiate.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dataclasses import logging from collections import abc from enum import Enum from typing import Any, Callable, Dict, List, Union from hydra.errors import InstantiationException from omegaconf import OmegaConf from libai.config.lazy import _convert_target_to_string, locate logger = logging.getLogger(__name__) __all__ = ["dump_dataclass", "instantiate"] # -------------------------------------------------------- # References: # https://github.com/facebookresearch/detectron2/blob/main/detectron2/config/instantiate.py # -------------------------------------------------------- class _Keys(str, Enum): """Special keys in configs used by instantiate.""" TARGET = "_target_" RECURSIVE = "_recursive_" def _is_target(x: Any) -> bool: if isinstance(x, dict): return _Keys.TARGET in x if OmegaConf.is_dict(x): return _Keys.TARGET in x return False def _is_dict(cfg: Any) -> bool: return OmegaConf.is_dict(cfg) or isinstance(cfg, abc.Mapping) def _is_list(cfg: Any) -> bool: return OmegaConf.is_list(cfg) or isinstance(cfg, list) def dump_dataclass(obj: Any): """ Dump a dataclass recursively into a dict that can be later instantiated. Args: obj: a dataclass object Returns: dict """ assert dataclasses.is_dataclass(obj) and not isinstance( obj, type ), "dump_dataclass() requires an instance of a dataclass." ret = {"_target_": _convert_target_to_string(type(obj))} for f in dataclasses.fields(obj): v = getattr(obj, f.name) if dataclasses.is_dataclass(v): v = dump_dataclass(v) if isinstance(v, (list, tuple)): v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v] ret[f.name] = v return ret def _prepare_input_dict_or_list(d: Union[Dict[Any, Any], List[Any]]) -> Any: res: Any if isinstance(d, dict): res = {} for k, v in d.items(): if k == "_target_": v = _convert_target_to_string(d["_target_"]) elif isinstance(v, (dict, list)): v = _prepare_input_dict_or_list(v) res[k] = v elif isinstance(d, list): res = [] for v in d: if isinstance(v, (list, dict)): v = _prepare_input_dict_or_list(v) res.append(v) else: assert False return res def _resolve_target(target): if isinstance(target, str): try: target = locate(target) except Exception as e: msg = f"Error locating target '{target}', see chained exception above." raise InstantiationException(msg) from e if not callable(target): msg = f"Expected a callable target, got '{target}' of type '{type(target).__name__}'" raise InstantiationException(msg) return target def _call_target(_target_: Callable[..., Any], kwargs: Dict[str, Any]): """Call target (type) with kwargs""" try: return _target_(**kwargs) except Exception as e: msg = f"Error in call to target '{_convert_target_to_string(_target_)}':\n{repr(e)}" raise InstantiationException(msg) from e def instantiate(cfg, **kwargs: Any) -> Any: """ Recursively instantiate objects defined in dictionaries by "_target_" and arguments. Args: cfg: a dict-like object with "_target_" that defines the caller, and other keys that define the arguments Returns: object instantiated by cfg """ if cfg is None: return None if isinstance(cfg, (dict, list)): cfg = _prepare_input_dict_or_list(cfg) kwargs = _prepare_input_dict_or_list(kwargs) if _is_dict(cfg): if kwargs: cfg = OmegaConf.merge(cfg, kwargs) _recursive_ = kwargs.pop(_Keys.RECURSIVE, True) return instantiate_cfg(cfg, recursive=_recursive_) elif _is_list(cfg): _recursive_ = kwargs.pop(_Keys.RECURSIVE, True) return instantiate_cfg(cfg, recursive=_recursive_) else: return cfg # return as-is if don't know what to do def instantiate_cfg(cfg: Any, recursive: bool = True): if cfg is None: return cfg if _is_dict(cfg): recursive = cfg[_Keys.RECURSIVE] if _Keys.RECURSIVE in cfg else recursive if not isinstance(recursive, bool): msg = f"Instantiation: _recursive_ flag must be a bool, got {type(recursive)}" raise TypeError(msg) # If OmegaConf list, create new list of instances if recursive if OmegaConf.is_list(cfg): items = [instantiate_cfg(item, recursive=recursive) for item in cfg._iter_ex(resolve=True)] lst = OmegaConf.create(items, flags={"allow_objects": True}) return lst elif isinstance(cfg, list): # Specialize for list, because many classes take # list[objects] as arguments, such as ResNet, DatasetMapper return [instantiate(item, recursive=recursive) for item in cfg] elif _is_dict(cfg): exclude_keys = set({"_target_", "_recursive_"}) if _is_target(cfg): _target_ = instantiate(cfg.get(_Keys.TARGET)) # instantiate lazy target _target_ = _resolve_target(_target_) kwargs = {} for key, value in cfg.items(): if key not in exclude_keys: if recursive: value = instantiate_cfg(value, recursive=recursive) kwargs[key] = value return _call_target(_target_, kwargs) else: return cfg else: return cfg # return as-is if don't know what to do
6,348
30.430693
99
py
libai
libai-main/libai/models/gpt_model.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import oneflow as flow from oneflow import nn from oneflow.nn import init from libai.config import configurable from libai.layers import ( Embedding, LayerNorm, LMLogits, ParallelCrossEntropyLoss, TransformerLayer, VocabEmbedding, ) from libai.layers.attention import AttnMaskType from libai.utils import distributed as dist from .utils import init_method_normal, scaled_init_method_normal class CasualMask(nn.Module): """ Create a casual mask and combine it with the padding mask. It will be used in gpt model and T5 decoder. When in T5 decoder, the argument `layer_idx` should be set to first decoder layer index. """ def __init__(self, max_positions=1024, *, layer_idx=0): super().__init__() self.mask = flow.tril( flow.ones( (max_positions, max_positions), dtype=flow.int8, placement=dist.get_layer_placement(layer_idx), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ) ) def forward(self, input_ids, past_length=0, attention_mask=None): bsz, tgt_len = input_ids.size() casual_mask = self.mask[:tgt_len, :tgt_len] if past_length > 0: # in case past_key_values are used, we need to add a prefix ones mask to casual mask casual_mask = flow.cat( [flow.ones(tgt_len, past_length, dtype=flow.int8), casual_mask], dim=-1 ) casual_mask = ( casual_mask.unsqueeze(0).unsqueeze(1).expand(bsz, 1, tgt_len, tgt_len + past_length) ) casual_mask = casual_mask.to_global(sbp=input_ids.sbp) if attention_mask is not None: assert attention_mask.dim() == 4, "please extend the attention mask first" casual_mask = casual_mask * attention_mask return casual_mask class GPTModel(nn.Module): """GPT-2 language model. The output of the forward method is logits. Args: hidden_layers (int): The number of ``TransformerLayer`` in the gpt model. vocab_size (int): The size of vocabulary file. hidden_size (int): The size of hidden states. ffn_hidden_size (int): The size of intermediate layer in feed-forward network for each ``TransformerLayer``. num_attention_heads (int): The number of attention heads for each attention layer of ``TransformerLayer``. max_seq_length (int, optional): Max sequence length of input, defines the shape of Position Embeddings in GPTEmebedding. Defaults to 1024. embedding_dropout_prob (float, optional): The dropout ratio for the output of GPTEmbedding Layer. Defaults to 0.0. attention_dropout_prob (float, optional): The dropout ratio for the output of each attention layer in ``TransformerLayer``. Defaults to 0.0. output_dropout_prob (float, optional): The dropout ratio for the output for each TransformerLayer. Defaults to 0.0. layernorm_epsilon (float, optional): The epsilon of LayerNorm layer. Defaults to 1e-5. initializer_range (float, optional): Sigma of the normal distribution in the initialization method. Defaults to 0.02. use_scaled_init_for_output_weights (bool, optional): Defaults to ``True``. bias_gelu_fusion (bool, optional): Whether or not to fuse the computing of bias and gelu. Defaults to ``False``. bias_dropout_fusion (bool, optional): Whether or not to fuse the computing of dropout and bias. Defaults to ``False``. scale_mask_softmax_fusion (bool, optional): Whether to fuse the computing of mask and softmax in attention layers. Defaults to ``False``. apply_query_key_layer_scaling (bool, optional): Whether or not to use layer index related scaling in computing attention scores. If ``True``, the scaling factor equals to sqrt(d) * (layer_index + 1). Defaults to ``False``. apply_residual_post_layernorm (bool, optional): If set ``True``, use original BERT residual connection ordering otherwise use Megatron BERT residual connection which is more stable when scaling model size introduced in https://arxiv.org/pdf/1909.08053.pdf. Default: ``False``. amp_enabled (bool, optional): Whether or not to set fp16 for embedding weight in T5 model. Defaults to ``False``. """ @configurable def __init__( self, hidden_layers, vocab_size, hidden_size, ffn_hidden_size, num_attention_heads, max_seq_length=1024, embedding_dropout_prob=0.0, attention_dropout_prob=0.0, output_dropout_prob=0.0, layernorm_epsilon=1e-5, initializer_range=0.02, use_scaled_init_for_output_weights=True, bias_gelu_fusion=False, bias_dropout_fusion=False, scale_mask_softmax_fusion=False, apply_query_key_layer_scaling=False, apply_residual_post_layernorm=False, amp_enabled=False, ): super().__init__() init_method = init_method_normal(sigma=initializer_range) if use_scaled_init_for_output_weights: output_layer_init_method = scaled_init_method_normal(initializer_range, hidden_layers) else: output_layer_init_method = init_method self.embeddings = GPTEmbedding( vocab_size, hidden_size, max_seq_length, init_method=init_method, embedding_dropout_prob=embedding_dropout_prob, amp_enabled=amp_enabled, ) self.transformer = Transformer( hidden_layers, hidden_size, ffn_hidden_size, num_attention_heads, attention_dropout_prob=attention_dropout_prob, output_dropout_prob=output_dropout_prob, layernorm_epsilon=layernorm_epsilon, init_method=init_method, output_layer_init_method=output_layer_init_method, bias_gelu_fusion=bias_gelu_fusion, bias_dropout_fusion=bias_dropout_fusion, scale_mask_softmax_fusion=scale_mask_softmax_fusion, apply_query_key_layer_scaling=apply_query_key_layer_scaling, apply_residual_post_layernorm=apply_residual_post_layernorm, ) self.lm_head = LMLogits(vocab_size, bias=False) @classmethod def from_config(cls, cfg): return { "hidden_layers": cfg.hidden_layers, "vocab_size": cfg.vocab_size, "hidden_size": cfg.hidden_size, "ffn_hidden_size": cfg.ffn_hidden_size, "num_attention_heads": cfg.num_attention_heads, "max_seq_length": cfg.max_seq_length, "embedding_dropout_prob": cfg.embedding_dropout_prob, "attention_dropout_prob": cfg.attention_dropout_prob, "output_dropout_prob": cfg.output_dropout_prob, "layernorm_epsilon": cfg.layernorm_epsilon, "initializer_range": cfg.initializer_range, "use_scaled_init_for_output_weights": cfg.use_scaled_init_for_output_weights, "bias_gelu_fusion": cfg.bias_gelu_fusion, "bias_dropout_fusion": cfg.bias_dropout_fusion, "scale_mask_softmax_fusion": cfg.scale_mask_softmax_fusion, "apply_query_key_layer_scaling": cfg.apply_query_key_layer_scaling, "apply_residual_post_layernorm": cfg.apply_residual_post_layernorm, "amp_enabled": cfg.amp_enabled, } def forward(self, input_ids): """ Args: input_ids (flow.LongTensor): Indices of input sequence tokens in vocabulary. Returns: flow.Tensor: logits """ input_ids = input_ids.to_global(placement=dist.get_layer_placement(0)) input_embeds = self.embeddings(input_ids, 0) transformer_output = self.transformer(input_embeds, attention_mask=None) output = self.lm_head(transformer_output, self.embeddings.token_embeddings.weight) return output class GPTEmbedding(nn.Module): def __init__( self, vocab_size, hidden_size, max_seq_length, init_method=init.xavier_normal_, embedding_dropout_prob=0.0, amp_enabled=False, ): super().__init__() self.token_embeddings = VocabEmbedding( vocab_size, hidden_size, init_method=init_method, amp_enabled=amp_enabled ) self.position_embeddings = Embedding( max_seq_length, hidden_size, init_method=init_method, amp_enabled=amp_enabled ) self.dropout = nn.Dropout(embedding_dropout_prob) self.position_ids = flow.arange( max_seq_length, dtype=flow.long, sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), placement=dist.get_layer_placement(0), ).unsqueeze(0) def forward(self, input_ids, past_length=0): bsz, seq_length = input_ids.size() position_ids = self.position_ids[:, past_length : past_length + seq_length] position_ids = position_ids.expand_as(input_ids).to_global(sbp=input_ids.sbp) token_embeds = self.token_embeddings(input_ids) position_embeds = self.position_embeddings(position_ids) input_embeds = token_embeds + position_embeds input_embeds = self.dropout(input_embeds) return input_embeds class Transformer(nn.Module): def __init__( self, hidden_layers, hidden_size, ffn_hidden_size, num_attention_heads, attention_dropout_prob=0.0, output_dropout_prob=0.0, layernorm_epsilon=1e-5, init_method=init.xavier_normal_, output_layer_init_method=None, bias_gelu_fusion=False, bias_dropout_fusion=False, scale_mask_softmax_fusion=False, apply_query_key_layer_scaling=False, apply_residual_post_layernorm=False, ): super().__init__() self.hidden_layers = hidden_layers def build_layer(layer_number): return TransformerLayer( hidden_size, ffn_hidden_size, num_attention_heads, attention_dropout_prob=attention_dropout_prob, output_dropout_prob=output_dropout_prob, layernorm_epsilon=layernorm_epsilon, init_method=init_method, output_layer_init_method=output_layer_init_method, bias_gelu_fusion=bias_gelu_fusion, bias_dropout_fusion=bias_dropout_fusion, scale_mask_softmax_fusion=scale_mask_softmax_fusion, apply_query_key_layer_scaling=apply_query_key_layer_scaling, apply_residual_post_layernorm=apply_residual_post_layernorm, attn_mask_type=AttnMaskType.causal, layer_idx=layer_number, ) self.layers = nn.ModuleList([build_layer(i) for i in range(self.hidden_layers)]) self.layernorm_f = LayerNorm(hidden_size, eps=layernorm_epsilon, layer_idx=-1) def forward(self, hidden_states, attention_mask): # hidden_states shape: (batch_size, seq_length, hidden_size) # sbp: [S(0), B] for i, layer in enumerate(self.layers): hidden_states = layer(hidden_states, attention_mask) output = self.layernorm_f(hidden_states) return output class GPTLoss(nn.Module): def __init__(self) -> None: super().__init__() self.lm_loss = ParallelCrossEntropyLoss() def forward(self, logits, lm_labels): lm_loss = self.lm_loss(logits, lm_labels) lm_loss = lm_loss.mean() return {"lm_loss": lm_loss} class GPTForPreTraining(nn.Module): """ GPT Model with classification head on top. """ def __init__(self, cfg) -> None: super().__init__() self.GPT_model = GPTModel(cfg) self.loss_func = GPTLoss() def forward( self, input_ids, labels=None, ): """ Args: input_ids (flow.LongTensor): Indices of input sequence tokens in vocabulary. labels (flow.LongTensor, optional): Labels for computing language modeling loss. None for evaluating. Defaults to None. Returns: dict: A dict containing :code:`loss_value` or :code:`logits` depending on training or evaluation. :code:`{"masked_lm_loss": loss_value}` when training, :code:`{"prediction_scores": logits}` when evaluating. """ logits = self.GPT_model(input_ids) if labels is not None: lm_loss = self.loss_func(logits, labels) return lm_loss else: return {"prediction_scores": logits} @staticmethod def set_pipeline_stage_id(model: nn.Module): dist_utils = dist.get_dist_util() if hasattr(model.GPT_model.transformer.layernorm_f, "config"): # Old API in OneFlow 0.8 for module_block in model.modules(): if isinstance(module_block.origin, (GPTEmbedding, CasualMask)): module_block.config.set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) elif isinstance(module_block.origin, TransformerLayer): module_block.config.set_stage( dist_utils.get_layer_stage_id(module_block.layer_idx), dist.get_layer_placement(module_block.layer_idx), ) elif isinstance(module_block.origin, (LMLogits, GPTLoss)): module_block.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.GPT_model.transformer.layernorm_f.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) else: for module_block in model.modules(): if isinstance(module_block.to(nn.Module), (GPTEmbedding, CasualMask)): module_block.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) elif isinstance(module_block.to(nn.Module), TransformerLayer): module_block.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(module_block.layer_idx), dist.get_layer_placement(module_block.layer_idx), ) elif isinstance(module_block.to(nn.Module), (LMLogits, GPTLoss)): module_block.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.GPT_model.transformer.layernorm_f.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) )
16,017
39.045
100
py
libai
libai-main/libai/models/t5_model.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import oneflow as flow import oneflow.nn as nn from libai.config import configurable from libai.layers import ( Embedding, LayerNorm, LMLogits, ParallelCrossEntropyLoss, TransformerLayer, VocabEmbedding, ) from libai.layers.attention import AttnMaskType from libai.models.utils import init_method_normal, scaled_init_method_normal from libai.utils import distributed as dist class ExtendedMask(flow.nn.Module): def forward(self, attention_mask): return attention_mask.unsqueeze(1) class T5Embedding(flow.nn.Module): def __init__( self, hidden_size, vocab_size, max_sequence_length, embedding_dropout_prob, init_method=flow.nn.init.xavier_normal_, amp_enabled=False, ) -> None: super().__init__() self.hidden_size = hidden_size self.vocab_size = vocab_size self.word_embeddings = VocabEmbedding( num_embeddings=vocab_size, embedding_dim=hidden_size, init_method=init_method, amp_enabled=amp_enabled, ) self.position_embeddings = Embedding( num_embeddings=max_sequence_length, embedding_dim=hidden_size, init_method=init_method, amp_enabled=amp_enabled, ) self.position_ids = flow.arange( max_sequence_length, dtype=flow.long, sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), placement=dist.get_layer_placement(0), ).unsqueeze(0) self.embedding_dropout = flow.nn.Dropout(embedding_dropout_prob) def forward(self, input_ids, past_length=0): seq_length = input_ids.size()[1] position_ids = self.position_ids[:, past_length : past_length + seq_length] position_ids = position_ids.expand_as(input_ids).to_global(sbp=input_ids.sbp) word_embeddings = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) embeddings = word_embeddings + position_embeddings embeddings = self.embedding_dropout(embeddings) return embeddings class T5Model(flow.nn.Module): """T5 Model that outputs logits. Args: vocab_size (int): The size of vocabulary file. hidden_size (int): The size of hidden states. hidden_layers (int): The number of ``TransformerLayer`` in the encoder and decoder. num_attention_heads (int): The number of attention heads for each attention layer of ``TransformerLayer``. intermediate_size (int): The size of intermediate layer in feed-forward network for each ``TransformerLayer``. embedding_dropout_prob (float): The dropout ratio for the output of T5Embedding Layer. hidden_dropout_prob (float): The dropout ratio for the output for each ``TransformerLayer``. attention_probs_dropout_prob (float): The dropout ratio for the output of each attention layer in ``TransformerLayer``. max_position_embeddings (int): Max sequence length of input, defines the shape of Position Embeddings in ``T5Emebedding``. initializer_range (float, optional): Sigma of the normal distribution in the initialization method. Defaults to 0.02. layernorm_eps (float, optional): The epsilon of LayerNorm layer. Defaults to 1e-12. bias_gelu_fusion (bool, optional): Whether or not to fuse the computing of bias and gelu. Defaults to ``False``. bias_dropout_fusion (bool, optional): Whether or not to fuse the computing of dropout and bias. Defaults to ``False``. scale_mask_softmax_fusion (bool, optional): Whether to fuse the computing of mask and softmax in attention layers. Defaults to ``False``. apply_query_key_layer_scaling (bool, optional): Whether or not to use layer index related scaling in computing attention scores. If ``True``, the scaling factor equals to sqrt(d) * (layer_index + 1). Defaults to ``True``. apply_residual_post_layernorm (bool, optional): If set ``True``, use original BERT residual connection ordering otherwise use Megatron BERT residual connection which is more stable when scaling model size introduced in https://arxiv.org/pdf/1909.08053.pdf. Default: ``False``. amp_enabled (bool, optional): Whether or not to set fp16 for embedding weight in T5 model. Defaults to ``False``. """ @configurable def __init__( self, vocab_size, hidden_size, hidden_layers, num_attention_heads, intermediate_size, embedding_dropout_prob, hidden_dropout_prob, attention_probs_dropout_prob, max_position_embeddings, initializer_range=0.02, layernorm_eps=1e-12, bias_gelu_fusion=False, bias_dropout_fusion=False, scale_mask_softmax_fusion=False, apply_query_key_layer_scaling=True, apply_residual_post_layernorm=False, amp_enabled=False, ) -> None: super().__init__() init_method = init_method_normal(initializer_range) scaled_init_method = scaled_init_method_normal(initializer_range, hidden_layers) self.embedding = T5Embedding( hidden_size=hidden_size, vocab_size=vocab_size, max_sequence_length=max_position_embeddings, embedding_dropout_prob=embedding_dropout_prob, init_method=init_method, amp_enabled=amp_enabled, ) self.extended_attn_mask = ExtendedMask() encoder_layers = flow.nn.ModuleList( [ TransformerLayer( hidden_size=hidden_size, ffn_hidden_size=intermediate_size, num_attention_heads=num_attention_heads, is_decoder=False, attention_dropout_prob=attention_probs_dropout_prob, output_dropout_prob=hidden_dropout_prob, layernorm_epsilon=layernorm_eps, init_method=init_method, output_layer_init_method=scaled_init_method, bias_gelu_fusion=bias_gelu_fusion, bias_dropout_fusion=bias_dropout_fusion, scale_mask_softmax_fusion=scale_mask_softmax_fusion, apply_query_key_layer_scaling=apply_query_key_layer_scaling, apply_residual_post_layernorm=apply_residual_post_layernorm, attn_mask_type=AttnMaskType.padding, layer_idx=i, ) for i in range(hidden_layers) ] ) encoder_final_layernorm = LayerNorm( (hidden_size,), eps=layernorm_eps, layer_idx=hidden_layers - 1, ) self.encoder = flow.nn.Sequential() self.encoder.add_module("layers", encoder_layers) self.encoder.add_module("final_layernorm", encoder_final_layernorm) decoder_layers = flow.nn.ModuleList( [ TransformerLayer( hidden_size=hidden_size, ffn_hidden_size=intermediate_size, num_attention_heads=num_attention_heads, is_decoder=True, attention_dropout_prob=attention_probs_dropout_prob, output_dropout_prob=hidden_dropout_prob, layernorm_epsilon=layernorm_eps, init_method=init_method, output_layer_init_method=scaled_init_method, bias_gelu_fusion=bias_gelu_fusion, bias_dropout_fusion=bias_dropout_fusion, scale_mask_softmax_fusion=scale_mask_softmax_fusion, apply_query_key_layer_scaling=apply_query_key_layer_scaling, attn_mask_type=AttnMaskType.padding, layer_idx=i, ) for i in range(hidden_layers, 2 * hidden_layers) ] ) decoder_final_layernorm = LayerNorm( (hidden_size,), eps=layernorm_eps, layer_idx=2 * hidden_layers - 1, ) self.decoder = flow.nn.Sequential() self.decoder.add_module("layers", decoder_layers) self.decoder.add_module("final_layernorm", decoder_final_layernorm) self.past_key_values = [None] * len(self.decoder.layers) self.encoder_states = None self.past_length = 0 self.lm_head = LMLogits(vocab_size, bias=True) @classmethod def from_config(cls, cfg): return { "vocab_size": cfg.vocab_size, "hidden_size": cfg.hidden_size, "hidden_layers": cfg.hidden_layers, "num_attention_heads": cfg.num_attention_heads, "intermediate_size": cfg.intermediate_size, "embedding_dropout_prob": cfg.embedding_dropout_prob, "hidden_dropout_prob": cfg.hidden_dropout_prob, "attention_probs_dropout_prob": cfg.attention_probs_dropout_prob, "max_position_embeddings": cfg.max_position_embeddings, "initializer_range": cfg.initializer_range, "layernorm_eps": cfg.layernorm_eps, "bias_gelu_fusion": cfg.bias_gelu_fusion, "bias_dropout_fusion": cfg.bias_dropout_fusion, "scale_mask_softmax_fusion": cfg.scale_mask_softmax_fusion, "apply_query_key_layer_scaling": cfg.apply_query_key_layer_scaling, "apply_residual_post_layernorm": cfg.apply_residual_post_layernorm, "amp_enabled": cfg.amp_enabled, } def forward( self, encoder_input_ids, decoder_input_ids, encoder_attn_mask, decoder_attn_mask, encoder_decoder_attn_mask, use_cache=False, ): """ Args: encoder_input_ids (flow.LongTensor): Indices of input sequence tokens in vocabulary for encoder. decoder_input_ids (flow.LongTensor): Indices of input sequence tokens in vocabulary for decoder. encoder_attn_mask (flow.BoolTensor): Mask for encoder to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. decoder_attn_mask (flow.BoolTensor): Mask for decoder to avoid performing attention on subsequent token indices. Mask values have the same meaning as encoder_attn_mask. encoder_decoder_attn_mask (flow.BoolTensor): Mask for decoder to avoid performing attention on encoder padded token indices. Mask values have the same meaning as encoder_attn_mask. use_cache (bool, optional): It will be set to True, when the model is in the inference phase and used for incremental decoding. Defaults to False. Returns: flow.Tensor: logits """ encoder_input_ids = encoder_input_ids.to_global(placement=dist.get_layer_placement(0)) decoder_input_ids = decoder_input_ids.to_global(placement=dist.get_layer_placement(0)) encoder_attn_mask = encoder_attn_mask.to_global(placement=dist.get_layer_placement(0)) decoder_attn_mask = decoder_attn_mask.to_global(placement=dist.get_layer_placement(0)) encoder_decoder_attn_mask = encoder_decoder_attn_mask.to_global( placement=dist.get_layer_placement(0) ) if use_cache and self.encoder_states is not None: encoder_states = self.encoder_states else: self.set_cache(encoder_states=None, past_key_values=None) encoder_attn_mask = self.extended_attn_mask(encoder_attn_mask) enc_embedding_output = self.embedding(encoder_input_ids) enc_hidden_states = enc_embedding_output for layer in self.encoder.layers: enc_hidden_states = layer(enc_hidden_states, encoder_attn_mask) encoder_states = self.encoder.final_layernorm(enc_hidden_states) decoder_attn_mask = self.extended_attn_mask(decoder_attn_mask) encoder_decoder_attn_mask = self.extended_attn_mask(encoder_decoder_attn_mask) dec_embedding_output = self.embedding(decoder_input_ids, self.past_length) dec_hidden_states = dec_embedding_output if use_cache: presents = [] for layer, past_key_value in zip(self.decoder.layers, self.past_key_values): dec_hidden_states = layer( dec_hidden_states, decoder_attn_mask, encoder_states, encoder_decoder_attn_mask, past_key_value=past_key_value, use_cache=use_cache, ) if use_cache: dec_hidden_states, present = dec_hidden_states presents.append(present) if use_cache: self.set_cache(encoder_states, past_key_values=presents) decoder_states = self.decoder.final_layernorm(dec_hidden_states) logits = self.lm_head(decoder_states, self.embedding.word_embeddings.weight) return logits def set_cache(self, encoder_states, past_key_values): self.encoder_states = encoder_states self.past_length = 0 if past_key_values is None else past_key_values[0][0].shape[2] if past_key_values is None: past_key_values = [None] * len(self.decoder.layers) assert len(past_key_values) == len(self.decoder.layers), ( f"past_key_values's length {len(past_key_values)} doesn't match " f"decoder num_layers' length {self.decoder.layers}" ) self.past_key_values = past_key_values class T5Loss(flow.nn.Module): def __init__(self) -> None: super().__init__() self.lm_loss = ParallelCrossEntropyLoss() def forward(self, logits, lm_labels, loss_mask): lm_loss = self.lm_loss(logits, lm_labels) loss_mask = loss_mask.to_global(placement=lm_loss.placement) loss_mask = loss_mask.float() denominator = loss_mask.sum().to_global( sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]) ) lm_loss = flow._C.amp_white_identity(lm_loss) lm_loss = flow._C.amp_black_identity(lm_loss) masked_lm_loss = flow.sum(lm_loss.view(-1) * loss_mask.view(-1)) / denominator masked_lm_loss = masked_lm_loss.to_global( sbp=dist.get_nd_sbp([flow.sbp.partial_sum, flow.sbp.broadcast]) ) return {"masked_lm_loss": masked_lm_loss} class T5ForPreTraining(flow.nn.Module): """ T5 Model with classification head on top. """ def __init__(self, cfg) -> None: super().__init__() self.t5_model = T5Model(cfg) self.loss_func = T5Loss() def set_cache(self, encoder_states, past_key_values): self.t5_model.set_cache(encoder_states, past_key_values) def forward( self, encoder_input_ids, decoder_input_ids, encoder_attn_mask, decoder_attn_mask, encoder_decoder_attn_mask, lm_labels=None, loss_mask=None, use_cache=False, ): """ Args: encoder_input_ids (flow.LongTensor): Indices of input sequence tokens in vocabulary for encoder. decoder_input_ids (flow.LongTensor): Indices of input sequence tokens in vocabulary for decoder. encoder_attn_mask (flow.BoolTensor): Mask for encoder to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. decoder_attn_mask (flow.BoolTensor): Mask for decoder to avoid performing attention on subsequent token indices. Mask values have the same meaning as encoder_attn_mask. encoder_decoder_attn_mask (flow.BoolTensor): Mask for decoder to avoid performing attention on encoder padded token indices. Mask values have the same meaning as encoder_attn_mask. lm_labels (flow.LongTensor, optional): Labels for computing the masked language modeling loss. Indices should be in `[-1, 0, ..., config.vocab_size]`. None for evaluating. loss_mask (flow.BoolTensor, optional): Mask to avoid performing loss computing on ignored tokens. Tokens with indices set to `-1` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. None for evaluating. use_cache (bool, optional): It will be set to True, when the model is in the inference phase and used for incremental decoding. Defaults to False. Returns: dict: A dict containing :code:`loss_value` or :code:`logits` depending on training or evaluation mode. :code:`{"masked_lm_loss": loss_value}` when training, :code:`{"prediction_scores": logits}` when evaluating. """ logits = self.t5_model( encoder_input_ids, decoder_input_ids, encoder_attn_mask, decoder_attn_mask, encoder_decoder_attn_mask, use_cache=use_cache, ) if lm_labels is not None: lm_loss = self.loss_func(logits, lm_labels, loss_mask) return lm_loss else: return { "prediction_scores": logits, } @staticmethod def set_pipeline_stage_id(model): dist_utils = dist.get_dist_util() # Set pipeline parallelism stage_id if hasattr(model.t5_model.encoder.final_layernorm, "config"): # Old API in OneFlow 0.8 for module_block in model.modules(): if isinstance(module_block.origin, T5Embedding): module_block.config.set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) elif isinstance(module_block.origin, ExtendedMask): module_block.config.set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) elif isinstance(module_block.origin, TransformerLayer): module_block.config.set_stage( dist_utils.get_layer_stage_id(module_block.layer_idx), dist.get_layer_placement(module_block.layer_idx), ) elif isinstance(module_block.origin, LMLogits): module_block.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) elif isinstance(module_block.origin, T5Loss): module_block.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.t5_model.encoder.final_layernorm.config.set_stage( dist_utils.get_layer_stage_id(model.t5_model.encoder.final_layernorm.layer_idx), dist.get_layer_placement(model.t5_model.encoder.final_layernorm.layer_idx), ) model.t5_model.decoder.final_layernorm.config.set_stage( dist_utils.get_layer_stage_id(model.t5_model.decoder.final_layernorm.layer_idx), dist.get_layer_placement(model.t5_model.decoder.final_layernorm.layer_idx), ) else: for module_block in model.modules(): if isinstance(module_block.to(nn.Module), T5Embedding): module_block.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) elif isinstance(module_block.to(nn.Module), ExtendedMask): module_block.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) elif isinstance(module_block.to(nn.Module), TransformerLayer): module_block.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(module_block.layer_idx), dist.get_layer_placement(module_block.layer_idx), ) elif isinstance(module_block.to(nn.Module), LMLogits): module_block.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) elif isinstance(module_block.to(nn.Module), T5Loss): module_block.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.t5_model.encoder.final_layernorm.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(model.t5_model.encoder.final_layernorm.layer_idx), dist.get_layer_placement(model.t5_model.encoder.final_layernorm.layer_idx), ) model.t5_model.decoder.final_layernorm.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(model.t5_model.decoder.final_layernorm.layer_idx), dist.get_layer_placement(model.t5_model.decoder.final_layernorm.layer_idx), )
22,776
42.88632
100
py
libai
libai-main/libai/models/swin_transformer.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import oneflow as flow import oneflow.nn as nn from flowvision.layers import trunc_normal_ from flowvision.models import to_2tuple from libai.config.config import configurable from libai.layers import MLP, DropPath, LayerNorm, Linear from libai.utils import distributed as dist def window_partition(x, window_size): B, H, W, C = x.shape x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows def window_reverse(windows, window_size, H, W): B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x class WindowAttention(nn.Module): """Window based multi-head self attention (W-MSA) module with relative position bias. It supports both of shifted and non-shifted window. Args: dim (int): Number of input channels. window_size (tuple[int]): The height and width of the window. num_heads (int): Number of attention heads. qkv_bias (bool, optional): If True, add a learnable bias to query,key,value. Default: True qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 proj_drop (float, optional): Dropout ratio of output. Default: 0.0 """ def __init__( self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0.0, proj_drop=0.0, fused_bias_add_dropout=False, layer_idx=0, ): super().__init__() self.dim = dim self.window_size = window_size # Wh, Ww self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 # define a parameter table of relative position bias self.relative_position_bias_table = nn.Parameter( flow.zeros( (2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads, placement=dist.get_layer_placement(layer_idx), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ) ) # 2*Wh-1 * 2*Ww-1, nH trunc_normal_(self.relative_position_bias_table, std=0.02) # get pair-wise relative position index for each token inside the window coords_h = flow.arange(self.window_size[0]) coords_w = flow.arange(self.window_size[1]) coords = flow.stack(flow.meshgrid(*[coords_h, coords_w])) # 2, Wh, Ww coords_flatten = flow.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] = ( relative_coords[:, :, 0] + self.window_size[0] - 1 ) # shift to start from 0 relative_coords[:, :, 1] = relative_coords[:, :, 1] + self.window_size[1] - 1 relative_coords[:, :, 0] = relative_coords[:, :, 0] * (2 * self.window_size[1] - 1) relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww self.register_buffer( "relative_position_index", relative_position_index.to_global( placement=dist.get_layer_placement(layer_idx), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ), ) self.qkv = Linear(dim, dim * 3, bias=qkv_bias, layer_idx=layer_idx) self.attn_drop = nn.Dropout(attn_drop) self.proj = Linear(dim, dim, layer_idx=layer_idx) self.proj_drop = nn.Dropout(proj_drop) self.softmax = nn.Softmax(dim=-1) self.fused_bias_add_dropout = fused_bias_add_dropout self.p = proj_drop def forward(self, x, mask): """ Args: x: input features with shape of (num_windows*B, N, C) mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None """ B_, N, C = x.shape qkv = ( self.qkv(x) .reshape(B_, N, 3, self.num_heads, C // self.num_heads) .permute(2, 0, 3, 1, 4) ) q, k, v = qkv[0], qkv[1], qkv[2] q = q * self.scale # attn = flow.matmul(q, k.transpose(-2, -1)) attn = flow.matmul(q, k, transpose_b=True) relative_position_bias = self.relative_position_bias_table[ self.relative_position_index.view(-1) ].view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1, ) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.permute( 2, 0, 1 ).contiguous() # nH, Wh*Ww, Wh*Ww unsqueeze_relative_position_bias = relative_position_bias.unsqueeze(0) attn = attn + unsqueeze_relative_position_bias if mask is not None: nW = mask.shape[0] attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N, N) attn = self.softmax(attn) else: attn = self.softmax(attn) attn = self.attn_drop(attn) x = flow.matmul(attn, v).transpose(1, 2).reshape(B_, N, C) if self.fused_bias_add_dropout: x = flow._C.matmul(x, self.proj.weight, transpose_a=False, transpose_b=True) x = flow._C.fused_bias_add_dropout(x, self.proj.bias, p=self.p, axis=2) else: x = self.proj(x) x = self.proj_drop(x) return x class SwinTransformerBlock(nn.Module): """Swin Transformer Block. Args: dim (int): Number of input channels. input_resolution (tuple[int]): Input resulotion. num_heads (int): Number of attention heads. window_size (int): Window size. shift_size (int): Shift size for SW-MSA. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. drop (float, optional): Dropout rate. Default: 0.0 attn_drop (float, optional): Attention dropout rate. Default: 0.0 drop_path (float, optional): Stochastic depth rate. Default: 0.0 act_layer (nn.Module, optional): Activation layer. Default: nn.GELU norm_layer (nn.Module, optional): Normalization layer. Default: libai.layers.LayerNorm """ def __init__( self, dim, input_resolution, num_heads, window_size=7, shift_size=0, mlp_ratio=4.0, qkv_bias=True, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=LayerNorm, layer_idx=0, ): super().__init__() self.dim = dim self.input_resolution = input_resolution self.num_heads = num_heads self.window_size = window_size self.shift_size = shift_size self.mlp_ratio = mlp_ratio self.layer_idx = layer_idx if min(self.input_resolution) <= self.window_size: # if window size is larger than input resolution, we don't partition windows self.shift_size = 0 self.window_size = min(self.input_resolution) assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" self.norm1 = norm_layer(dim, layer_idx=layer_idx) self.attn = WindowAttention( dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, fused_bias_add_dropout=True, layer_idx=layer_idx, ) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim, layer_idx=layer_idx) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = MLP( hidden_size=dim, ffn_hidden_size=mlp_hidden_dim, output_dropout_prob=drop, bias_gelu_fusion=True, bias_dropout_fusion=True, layer_idx=layer_idx, ) if self.shift_size > 0: # calculate attention mask for SW-MSA H, W = self.input_resolution img_mask = flow.zeros((1, H, W, 1)) # 1 H W 1 h_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) w_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) cnt = 0 for h in h_slices: for w in w_slices: img_mask[:, h, w, :] = cnt cnt = cnt + 1 mask_windows = window_partition( img_mask, self.window_size ) # nW, window_size, window_size, 1 mask_windows = mask_windows.view(-1, self.window_size * self.window_size) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill( attn_mask == 0, float(0.0) ) attn_mask = attn_mask.to_global( placement=dist.get_layer_placement(layer_idx), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ) else: attn_mask = None self.register_buffer("attn_mask", attn_mask) def forward(self, x): H, W = self.input_resolution B, L, C = x.shape assert L == H * W, "input feature has wrong size" shortcut = x x = self.norm1(x) x = x.view(B, H, W, C) # cyclic shift if self.shift_size > 0: shifted_x = flow.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_x = x # partition windows x_windows = window_partition( shifted_x, self.window_size ) # nW*B, window_size, window_size, C x_windows = x_windows.view( -1, self.window_size * self.window_size, C ) # nW*B, window_size*window_size, C # W-MSA/SW-MSA attn_windows = self.attn(x_windows, self.attn_mask) # nW*B, window_size*window_size, C # merge windows attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C # reverse cyclic shift if self.shift_size > 0: x = flow.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: x = shifted_x x = x.view(B, H * W, C) # FFN x = shortcut + self.drop_path(x) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class PatchMerging(nn.Module): """Patch Merging Layer. Args: input_resolution (tuple[int]): Resolution of input feature. dim (int): Number of input channels. norm_layer (nn.Module, optional): Normalization layer. Default: libai.layers.LayerNorm """ def __init__(self, input_resolution, dim, norm_layer=LayerNorm, layer_idx=0): super().__init__() self.input_resolution = input_resolution self.dim = dim self.reduction = Linear(4 * dim, 2 * dim, bias=False, layer_idx=layer_idx) self.norm = norm_layer(4 * dim, layer_idx=layer_idx) self.layer_idx = layer_idx def forward(self, x): """ x: B, H*W, C """ H, W = self.input_resolution B, L, C = x.shape assert L == H * W, "input feature has wrong size" assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even." x = x.view(B, H, W, C) x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C x = flow.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C x = self.norm(x) x = self.reduction(x) return x class PatchEmbed(nn.Module): """Image to Patch Embedding Args: img_size (int): Image size. Default: 224. patch_size (int): Patch token size. Default: 4. in_chans (int): Number of input image channels. Default: 3. embed_dim (int): Number of linear projection output channels. Default: 96. norm_layer (nn.Module, optional): Normalization layer. Default: None """ def __init__( self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None, layer_idx=0 ): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) patches_resolution = [ img_size[0] // patch_size[0], img_size[1] // patch_size[1], ] self.img_size = img_size self.patch_size = patch_size self.patches_resolution = patches_resolution self.num_patches = patches_resolution[0] * patches_resolution[1] self.in_chans = in_chans self.embed_dim = embed_dim self.proj = nn.Conv2d( in_chans, embed_dim, kernel_size=patch_size, stride=patch_size ).to_global( placement=dist.get_layer_placement(layer_idx), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ) if norm_layer is not None: self.norm = norm_layer(embed_dim, layer_idx=layer_idx) else: self.norm = None def forward(self, x): B, C, H, W = x.shape # FIXME look at relaxing size constraints assert ( H == self.img_size[0] and W == self.img_size[1] ), f"Input image size ({H}*{W}) doesn't match model({self.img_size[0]}*{self.img_size[1]})." x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C if self.norm is not None: x = self.norm(x) return x class BasicLayer(nn.Module): """A basic Swin Transformer layer for one stage. Args: dim (int): Number of input channels. input_resolution (tuple[int]): Input resolution. depth (int): Number of blocks. num_heads (int): Number of attention heads. window_size (int): Local window size. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. drop (float, optional): Dropout rate. Default: 0.0 attn_drop (float, optional): Attention dropout rate. Default: 0.0 drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 norm_layer (nn.Module, optional): Normalization layer. Default: libai.layers.LayerNorm downsample (nn.Module | None, optional): Downsample at the end of the layer. Default: None """ def __init__( self, dim, input_resolution, depth, num_heads, window_size, mlp_ratio=4.0, qkv_bias=True, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, norm_layer=LayerNorm, downsample=None, layer_id_offset=0, ): super().__init__() self.dim = dim self.input_resolution = input_resolution self.depth = depth self.layer_id_offset = layer_id_offset # build blocks self.blocks = nn.ModuleList( [ SwinTransformerBlock( dim=dim, input_resolution=input_resolution, num_heads=num_heads, window_size=window_size, shift_size=0 if (i % 2 == 0) else window_size // 2, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer, layer_idx=layer_id_offset + i, ) for i in range(depth) ] ) # patch merging layer if downsample is not None: self.downsample = downsample( input_resolution, dim=dim, norm_layer=norm_layer, layer_idx=layer_id_offset + depth - 1, ) else: self.downsample = None def forward(self, x): layer_idx = self.layer_id_offset for i in range(len(self.blocks)): x = x.to_global(placement=dist.get_layer_placement(layer_idx)) x = self.blocks[i](x) layer_idx += 1 if self.downsample is not None: x = self.downsample(x) return x class SwinTransformer(nn.Module): """Swin Transformer in LiBai. LiBai implement of: `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows <https://arxiv.org/pdf/2103.14030>`_ Args: img_size (int, tuple(int)): Input image size. Default 224 patch_size (int, tuple(int)): Patch size. Default: 4 in_chans (int): Number of input image channels. Default: 3 num_classes (int): Number of classes for classification head. Default: 1000 embed_dim (int): Patch embedding dimension. Default: 96 depths (tuple(int)): Depth of each Swin Transformer layer. num_heads (tuple(int)): Number of attention heads in different layers. window_size (int): Window size. Default: 7 mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None drop_rate (float): Dropout rate. Default: 0 attn_drop_rate (float): Attention dropout rate. Default: 0 drop_path_rate (float): Stochastic depth rate. Default: 0.1 norm_layer (nn.Module): Normalization layer. Default: libai.layers.LayerNorm. ape (bool): If True, add absolute position embedding to the patch embedding. Default: False patch_norm (bool): If True, add normalization after patch embedding. Default: True loss_func (callable, optional): Loss function for computing the total loss between logits and labels """ @configurable def __init__( self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4.0, qkv_bias=True, qk_scale=None, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.1, norm_layer=LayerNorm, ape=False, patch_norm=True, loss_func=None, **kwargs, ): super().__init__() self.num_classes = num_classes self.num_layers = len(depths) self.embed_dim = embed_dim self.ape = ape self.patch_norm = patch_norm self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) self.mlp_ratio = mlp_ratio # split image into non-overlapping patches self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if self.patch_norm else None, layer_idx=0, ) num_patches = self.patch_embed.num_patches patches_resolution = self.patch_embed.patches_resolution self.patches_resolution = patches_resolution # absolute position embedding if self.ape: self.absolute_pos_embed = nn.Parameter( flow.zeros(1, num_patches, embed_dim), placement=dist.get_layer_placement(0), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ) trunc_normal_(self.absolute_pos_embed, std=0.02) self.pos_drop = nn.Dropout(p=drop_rate) # stochastic depth dpr = [ x.item() for x in flow.linspace(0, drop_path_rate, sum(depths)) ] # stochastic depth decay rule # build layers self.layers = nn.ModuleList() layer_id_offset = 0 for i_layer in range(self.num_layers): layer = BasicLayer( dim=int(embed_dim * 2 ** i_layer), input_resolution=( patches_resolution[0] // (2 ** i_layer), patches_resolution[1] // (2 ** i_layer), ), depth=depths[i_layer], num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])], norm_layer=norm_layer, downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, layer_id_offset=layer_id_offset, ) layer_id_offset += depths[i_layer] self.layers.append(layer) self.norm = norm_layer(self.num_features, layer_idx=-1) self.avgpool = nn.AdaptiveAvgPool1d(1) self.head = ( Linear(self.num_features, num_classes, layer_idx=-1) if num_classes > 0 else nn.Identity() ) # Loss func self.loss_func = nn.CrossEntropyLoss() if loss_func is None else loss_func self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @classmethod def from_config(cls, cfg): return { "img_size": cfg.img_size, "patch_size": cfg.patch_size, "in_chans": cfg.in_chans, "num_classes": cfg.num_classes, "embed_dim": cfg.embed_dim, "depths": cfg.depths, "num_heads": cfg.num_heads, "window_size": cfg.window_size, "mlp_ratio": cfg.mlp_ratio, "qkv_bias": cfg.qkv_bias, "qk_scale": cfg.qk_scale, "drop_rate": cfg.drop_rate, "drop_path_rate": cfg.drop_path_rate, "ape": cfg.ape, "patch_norm": cfg.patch_norm, "loss_func": cfg.loss_func, } def forward_features(self, x): x = self.patch_embed(x) if self.ape: x = x + self.absolute_pos_embed x = self.pos_drop(x) for layer in self.layers: x = layer(x) x = self.norm(x) # B L C x = self.avgpool(x.transpose(1, 2)) # B C 1 x = flow.flatten(x, 1) return x def forward(self, images, labels=None): """ Args: images (flow.Tensor): training samples. labels (flow.LongTensor, optional): training targets Returns: dict: A dict containing :code:`loss_value` or :code:`logits` depending on training or evaluation mode. :code:`{"losses": loss_value}` when training, :code:`{"prediction_scores": logits}` when evaluating. """ x = self.forward_features(images) x = self.head(x) if labels is not None and self.training: losses = self.loss_func(x, labels) return {"losses": losses} else: return {"prediction_scores": x} @staticmethod def set_pipeline_stage_id(model): dist_utils = dist.get_dist_util() # Set pipeline parallelism stage_id if hasattr(model.patch_embed, "config"): # Old API in OneFlow 0.8 model.patch_embed.config.set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) model.pos_drop.config.set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) for module_block in model.modules(): if isinstance(module_block.origin, SwinTransformerBlock): module_block.config.set_stage( dist_utils.get_layer_stage_id(module_block.layer_idx), dist.get_layer_placement(module_block.layer_idx), ) elif isinstance(module_block.origin, PatchMerging): module_block.config.set_stage( dist_utils.get_layer_stage_id(module_block.layer_idx), dist.get_layer_placement(module_block.layer_idx), ) model.norm.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.head.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.avgpool.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.loss_func.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) else: model.patch_embed.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) model.pos_drop.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) for module_block in model.modules(): if isinstance(module_block.to(nn.Module), SwinTransformerBlock): module_block.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(module_block.layer_idx), dist.get_layer_placement(module_block.layer_idx), ) elif isinstance(module_block.to(nn.Module), PatchMerging): module_block.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(module_block.layer_idx), dist.get_layer_placement(module_block.layer_idx), ) model.norm.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.head.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.avgpool.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.loss_func.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) @staticmethod def set_activation_checkpoint(model): for module_block in model.modules(): if hasattr(module_block, "origin"): # Old API in OneFlow 0.8 if isinstance(module_block.origin, SwinTransformerBlock): module_block.config.activation_checkpointing = True else: if isinstance(module_block.to(nn.Module), SwinTransformerBlock): module_block.to(flow.nn.graph.GraphModule).activation_checkpointing = True
29,510
37.177232
100
py
libai
libai-main/libai/models/bert_model.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import oneflow as flow from oneflow import nn from libai.config import configurable from libai.layers import ( Embedding, LayerNorm, Linear, LMLogits, ParallelCrossEntropyLoss, TransformerLayer, VocabEmbedding, build_activation, ) from libai.layers.attention import AttnMaskType from libai.utils import distributed as dist from .utils import init_method_normal, scaled_init_method_normal class BertExtendedAttnMask(nn.Module): def forward(self, attention_mask): # We create a 3D attention mask from a 2D tensor mask. # [b, 1, s] attention_mask_b1s = attention_mask.unsqueeze(1) # [b, s, 1] attention_mask_bs1 = attention_mask.unsqueeze(2) # [b, s, s] attention_mask_bss = attention_mask_b1s * attention_mask_bs1 # [b, 1, s, s] extended_attention_mask = attention_mask_bss.unsqueeze(1) return extended_attention_mask class BertEmbeddings(nn.Module): def __init__( self, vocab_size, hidden_size, max_sequence_length, embedding_dropout_prob, num_tokentypes=0, init_method=nn.init.xavier_normal_, amp_enabled=False, ): super().__init__() self.vocab_embeddings = VocabEmbedding( vocab_size, hidden_size, init_method=init_method, amp_enabled=amp_enabled ) self.position_embeddings = Embedding( max_sequence_length, hidden_size, init_method=init_method, amp_enabled=amp_enabled ) # NOTE(l1aoxingyu): Set position_ids sbp sign to [B, B] initially, because position_ids is a # 1D-tensor from 0 to seq_length, if set to [S(0), B] at first, then position_ids # will split at the first dim of hierarchy. self.position_ids = flow.arange( max_sequence_length, dtype=flow.long, sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), placement=dist.get_layer_placement(0), ).unsqueeze(0) if num_tokentypes > 0: self.tokentype_embeddings = Embedding( num_tokentypes, hidden_size, init_method=init_method, amp_enabled=amp_enabled ) self.tokentype_ids = flow.zeros( self.position_ids.size(), dtype=flow.long, sbp=self.position_ids.sbp, placement=self.position_ids.placement, ) else: self.tokentype_embeddings = None self.embedding_dropout = nn.Dropout(embedding_dropout_prob) def forward(self, input_ids, tokentype_ids=None, position_ids=None): seq_length = input_ids.size()[1] word_embeddings = self.vocab_embeddings(input_ids) if position_ids is None: # Change position_ids sbp sign: [B, B] -> [S(0), B] position_ids = ( self.position_ids[:, :seq_length].expand_as(input_ids).to_global(sbp=input_ids.sbp) ) position_embeddings = self.position_embeddings(position_ids) embeddings = word_embeddings + position_embeddings if self.tokentype_embeddings is not None: if tokentype_ids is None: tokentype_ids = ( self.tokentype_ids[:, :seq_length] .expand_as(input_ids) .to_global(sbp=input_ids.sbp) ) embeddings = embeddings + self.tokentype_embeddings(tokentype_ids) embeddings = self.embedding_dropout(embeddings) return embeddings def word_embeddings(self): return self.vocab_embeddings.weight class BertLMPredictionHead(nn.Module): def __init__(self, hidden_size, init_method): super().__init__() self.dense = Linear( hidden_size, hidden_size, bias=True, parallel="data", init_method=init_method, layer_idx=-1, ) self.activation_func = build_activation("gelu") self.layernorm = LayerNorm((hidden_size,), layer_idx=-1) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation_func(hidden_states) hidden_states = hidden_states.to_global( grad_sbp=dist.get_nd_sbp([flow.sbp.split(0), flow.sbp.split(2)]) ) # NOTE(l1aoxingyu): hidden_states shape is [B, S, H] whose sbp sign: [S(0), S(2)] # Change from [S(0), S(2)] -> [S(0), B] because layernorm cannot get inputs with sbp S(2) hidden_states = hidden_states.to_global( sbp=dist.get_nd_sbp([flow.sbp.split(0), flow.sbp.broadcast]) ) hidden_states = self.layernorm(hidden_states) return hidden_states class BertPooler(nn.Module): """Pooler layer. Pool hidden states of the first token and add a linear transformation followed by a tanh. Args: hidden_size: hidden state feature dimension """ def __init__(self, hidden_size, init_method): super().__init__() self.dense = Linear( hidden_size, hidden_size, bias=True, parallel="col", init_method=init_method, layer_idx=-1, ) self.activation_func = build_activation("tanh") def forward(self, hidden_states): """Just "pool" the model by simply taking the [CLS] token corresponding to the first token.""" # hidden_states: [bsz, seq_len, hidden_size] select_token_tensor = hidden_states[:, 0, :] pooled_output = self.dense(select_token_tensor) pooled_output = self.activation_func(pooled_output) return pooled_output class BertLoss(nn.Module): def __init__(self, add_binary_head): super().__init__() self.add_binary_head = add_binary_head self.lm_loss = ParallelCrossEntropyLoss() def forward(self, lm_output, lm_labels, loss_mask, binary_logits, ns_labels): lm_labels = lm_labels.to_global(placement=lm_output.placement) loss_mask = loss_mask.to_global(placement=lm_output.placement) binary_logits = binary_logits.to_global(placement=lm_output.placement) ns_labels = ns_labels.to_global(placement=lm_output.placement) lm_loss = self.lm_loss(lm_output, lm_labels) loss_mask = loss_mask.float() # Change loss_mask.sum() sbp sign from [P, B] -> [B, B] # because (lm_loss * loss_mask) / loss_mask.sum() cannot accept P / P denominator = ( loss_mask.sum().to_global(sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast])) + 1e-7 ) masked_lm_loss = flow.sum(lm_loss.view(-1) * loss_mask.view(-1)) / denominator # NOTE(l1aoxingyu): Change lm loss sbp sign [P, P] -> [P, B] to add with sop loss # whose sbp sign: [P, B] masked_lm_loss = masked_lm_loss.to_global( sbp=dist.get_nd_sbp([flow.sbp.partial_sum, flow.sbp.broadcast]) ) loss_dict = {"lm_loss": masked_lm_loss} if self.add_binary_head: sop_loss = flow._C.cross_entropy( binary_logits, ns_labels, ignore_index=-1, reduction="none" ).mean() loss_dict["sop_loss"] = sop_loss return loss_dict class BertModel(nn.Module): """The bare Bert Model transformer outputting raw hidden-states without any specific head on top. Args: vocab_size (int): The size of vocabulary file. hidden_size (int): The size of hidden states. hidden_layers (int): The number of ``TransformerLayer`` in encoder. num_attention_heads (int): The number of attention heads for each attention layer of ``TransformerLayer``. intermediate_size (int): The size of intermediate layer in feed-forward network for each ``TransformerLayer``. hidden_dropout_prob (float, optional): The dropout ratio for the output for each TransformerLayer. Defaults to 0.0. attention_probs_dropout_prob (float, optional): The dropout ratio for the output of each attention layer in ``TransformerLayer``. Defaults to 0.0. max_position_embeddings (int): Max sequence length of input, defines the shape of Position Embeddings in ``BertEmbedding``. num_tokentypes (int, optional): Number of segment token indices. Defaults to 2. add_pooling_layer (bool, optional): Whether or not averaging or pooling the sequence of hidden-states for the whole input sequence. Defaults to ``True``. initializer_range (float, optional): Sigma of the normal distribution in the initialization method. Defaults to 0.02. layernorm_epsilon (float, optional): The epsilon of LayerNorm layer. Defaults to 1e-5. bias_gelu_fusion (bool, optional): Whether or not to fuse the computing of bias and gelu. Defaults to ``False``. bias_dropout_fusion (bool, optional): Whether or not to fuse the computing of dropout and bias. Defaults to ``False``. scale_mask_softmax_fusion (bool, optional): Whether to fuse the computing of mask and softmax in attention layers. Defaults to ``False``. apply_query_key_layer_scaling (bool, optional): Whether or not to use layer index related scaling in computing attention scores. If ``True``, the scaling factor equals to sqrt(d) * (layer_index + 1). Defaults to ``True``. apply_residual_post_layernorm (bool, optional): If set ``True``, use original BERT residual connection ordering otherwise use Megatron BERT residual connection which is more stable when scaling model size introduced in https://arxiv.org/pdf/1909.08053.pdf. Default: ``False``. amp_enabled (bool, optional): Whether or not to set fp16 for embedding weight in T5 model. Defaults to ``False``. """ @configurable def __init__( self, vocab_size, hidden_size, hidden_layers, num_attention_heads, intermediate_size, hidden_dropout_prob, attention_probs_dropout_prob, max_position_embeddings, num_tokentypes=2, add_pooling_layer=True, initializer_range=0.02, layernorm_eps=1e-12, bias_gelu_fusion=True, bias_dropout_fusion=True, scale_mask_softmax_fusion=True, apply_query_key_layer_scaling=True, apply_residual_post_layernorm=False, amp_enabled=False, ): super().__init__() init_method = init_method_normal(initializer_range) scaled_init_method = scaled_init_method_normal(initializer_range, hidden_layers) # Embeddings self.embeddings = BertEmbeddings( vocab_size, hidden_size, max_position_embeddings, hidden_dropout_prob, num_tokentypes, init_method, amp_enabled, ) # Mask generation self.extended_attn_mask = BertExtendedAttnMask() # Encoders self.encoders = nn.ModuleList( [ TransformerLayer( hidden_size, intermediate_size, num_attention_heads, attention_dropout_prob=attention_probs_dropout_prob, output_dropout_prob=hidden_dropout_prob, layernorm_epsilon=layernorm_eps, bias_gelu_fusion=bias_gelu_fusion, bias_dropout_fusion=bias_dropout_fusion, scale_mask_softmax_fusion=scale_mask_softmax_fusion, apply_query_key_layer_scaling=apply_query_key_layer_scaling, init_method=init_method, output_layer_init_method=scaled_init_method, apply_residual_post_layernorm=apply_residual_post_layernorm, attn_mask_type=AttnMaskType.padding, # bert mask type layer_idx=i, ) for i in range(hidden_layers) ] ) self.final_layernorm = LayerNorm((hidden_size,), eps=layernorm_eps, layer_idx=-1) self.pooler = BertPooler(hidden_size, init_method) if add_pooling_layer else None @classmethod def from_config(cls, cfg): return { "vocab_size": cfg.vocab_size, "hidden_size": cfg.hidden_size, "hidden_layers": cfg.hidden_layers, "num_attention_heads": cfg.num_attention_heads, "intermediate_size": cfg.intermediate_size, "hidden_dropout_prob": cfg.hidden_dropout_prob, "attention_probs_dropout_prob": cfg.attention_probs_dropout_prob, "max_position_embeddings": cfg.max_position_embeddings, "num_tokentypes": cfg.num_tokentypes, "add_pooling_layer": cfg.add_pooling_layer, "initializer_range": cfg.initializer_range, "layernorm_eps": cfg.layernorm_eps, "bias_gelu_fusion": cfg.bias_gelu_fusion, "bias_dropout_fusion": cfg.bias_dropout_fusion, "scale_mask_softmax_fusion": cfg.scale_mask_softmax_fusion, "apply_query_key_layer_scaling": cfg.apply_query_key_layer_scaling, "apply_residual_post_layernorm": cfg.apply_residual_post_layernorm, "amp_enabled": cfg.amp_enabled, } def forward(self, input_ids, attention_mask, tokentype_ids=None): """ Args: input_ids (flow.LongTensor): Indices of input sequence tokens in vocabulary. attention_mask (flow.BoolTensor): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. tokentype_ids (flow.LongTensor, optional): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`. Defaults to None. """ extended_attention_mask = self.extended_attn_mask(attention_mask) embedding_output = self.embeddings(input_ids, tokentype_ids) hidden_states = embedding_output for layer in self.encoders: hidden_states = layer(hidden_states, extended_attention_mask) encoder_output = self.final_layernorm(hidden_states) pooled_output = self.pooler(encoder_output) if self.pooler is not None else None return encoder_output, pooled_output def word_embeddings_weight(self): return self.embeddings.word_embeddings() class BertPreTrainingHeads(nn.Module): def __init__(self, vocab_size, hidden_size, init_method, add_binary_head=True): super().__init__() self.predictions = BertLMPredictionHead(hidden_size, init_method) self.seq_relationship = Linear( hidden_size, 2, bias=True, parallel="data", init_method=init_method, layer_idx=-1, ) self.lm_logits = LMLogits(vocab_size, bias=True) self.loss_func = BertLoss(add_binary_head) def forward( self, sequence_output, pooled_output, word_embeddings_weight, ns_labels, lm_labels, loss_mask, ): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) prediction_scores = self.lm_logits(prediction_scores, word_embeddings_weight) if lm_labels is not None: return self.loss_func( prediction_scores, lm_labels, loss_mask, seq_relationship_score, ns_labels ) return { "prediction_scores": prediction_scores, "seq_relationship_score": seq_relationship_score, } class BertForPreTraining(nn.Module): """Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next sentence prediction (classification)` head. """ def __init__(self, cfg): super().__init__() self.bert = BertModel(cfg) self.cls_head = BertPreTrainingHeads( cfg.vocab_size, cfg.hidden_size, init_method_normal(cfg.initializer_range), cfg.add_binary_head, ) def forward( self, input_ids, attention_mask, tokentype_ids=None, ns_labels=None, lm_labels=None, loss_mask=None, ): """ Args: input_ids (flow.LongTensor): Indices of input sequence tokens in vocabulary. attention_mask (flow.BoolTensor): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. tokentype_ids (flow.LongTensor, optional): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`. Defaults to None. ns_labels (flow.LongTensor, optional): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see `input_ids` docstring). Indices should be in `[0, 1]`: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. lm_labels (flow.LongTensor, optional): Labels for computing the masked language modeling loss. Indices should be in `[-1, 0, ..., config.vocab_size]`. loss_mask (flow.BoolTensor, optional): Mask to avoid performing loss computing on ignored tokens. Tokens with indices set to `-1` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ input_ids = input_ids.to_global(placement=dist.get_layer_placement(0)) attention_mask = attention_mask.to_global(placement=dist.get_layer_placement(0)) tokentype_ids = tokentype_ids.to_global(placement=dist.get_layer_placement(0)) outputs = self.bert(input_ids, attention_mask, tokentype_ids) sequence_output, pooled_output = outputs[:2] return self.cls_head( sequence_output, pooled_output, self.bert.word_embeddings_weight(), ns_labels, lm_labels, loss_mask, ) @staticmethod def set_pipeline_stage_id(model): dist_utils = dist.get_dist_util() # Set pipeline parallelism stage_id if hasattr(model.bert.final_layernorm, "config"): # Old API in OneFlow 0.8 for module_block in model.modules(): # module.origin can get the original module if isinstance(module_block.origin, BertEmbeddings): module_block.config.set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) elif isinstance(module_block.origin, BertExtendedAttnMask): module_block.config.set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) elif isinstance(module_block.origin, TransformerLayer): module_block.config.set_stage( dist_utils.get_layer_stage_id(module_block.layer_idx), dist.get_layer_placement(module_block.layer_idx), ) elif isinstance(module_block.origin, BertPooler): module_block.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) elif isinstance(module_block.origin, BertPreTrainingHeads): module_block.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) # Set the last layernorm stage id model.bert.final_layernorm.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) else: for module_block in model.modules(): if isinstance(module_block.to(nn.Module), BertEmbeddings): module_block.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) elif isinstance(module_block.to(nn.Module), BertExtendedAttnMask): module_block.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) elif isinstance(module_block.to(nn.Module), TransformerLayer): module_block.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(module_block.layer_idx), dist.get_layer_placement(module_block.layer_idx), ) elif isinstance(module_block.to(nn.Module), BertPooler): module_block.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) elif isinstance(module_block.to(nn.Module), BertPreTrainingHeads): module_block.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) # Set the last layernorm stage id model.bert.final_layernorm.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) class BertForClassification(nn.Module): def __init__(self, cfg): super().__init__() self.cfg = cfg self.num_labels = cfg.num_labels self.bert = BertModel(cfg) self.classifier = Linear( cfg.hidden_size, cfg.num_labels, bias=True, parallel="row", init_method=init_method_normal(cfg.initializer_range), layer_idx=-1, ) classifier_dropout = ( cfg.classifier_dropout if cfg.classifier_dropout is not None else cfg.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) def forward(self, input_ids, attention_mask, tokentype_ids=None, labels=None, **kwargs): labels = labels if labels is not None else kwargs.get("ns_labels") outputs = self.bert(input_ids, attention_mask, tokentype_ids) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) if labels is not None: loss_fct = nn.CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) loss = loss.to_global(sbp=dist.get_nd_sbp([flow.sbp.partial_sum, flow.sbp.broadcast])) return {"cls_loss": loss} else: return {"logits": logits}
24,272
40.001689
100
py
libai
libai-main/libai/models/vision_transformer.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import oneflow as flow import oneflow.nn as nn from flowvision.layers.weight_init import trunc_normal_ import libai.utils.distributed as dist from libai.config.config import configurable from libai.layers import LayerNorm, Linear, PatchEmbedding, TransformerLayer class VisionTransformer(nn.Module): """Vision Transformer in LiBai. LiBai's implementation of: `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>`_ Args: img_size (int, tuple(int)): input image size patch_size (int, tuple(int)): patch size in_chans (int): number of input channels embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim drop_rate (float): dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate num_classes (int): number of classes for classification head loss_func (callable, optional): loss function for computing the total loss between logits and labels """ @configurable def __init__( self, img_size=224, patch_size=16, in_chans=3, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4.0, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, num_classes=1000, loss_func=None, ): super().__init__() self.img_size = img_size self.num_classes = num_classes self.patch_embed = PatchEmbedding( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, ) ffn_size = int(embed_dim * mlp_ratio) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter( flow.zeros( 1, 1, embed_dim, sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), placement=dist.get_layer_placement(0), ) ) self.pos_embed = nn.Parameter( flow.zeros( 1, num_patches + 1, embed_dim, sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), placement=dist.get_layer_placement(0), ) ) self.pos_drop = nn.Dropout(p=drop_rate) dpr = [ x.item() for x in flow.linspace(0, drop_path_rate, depth) ] # stochastic depth decay rule self.blocks = nn.Sequential( *[ TransformerLayer( hidden_size=embed_dim, ffn_hidden_size=ffn_size, num_attention_heads=num_heads, attention_dropout_prob=attn_drop_rate, output_dropout_prob=drop_rate, drop_path_prob=dpr[i], layer_idx=i, ) for i in range(depth) ] ) self.norm = LayerNorm(embed_dim, layer_idx=-1) self.head = Linear(embed_dim, num_classes, layer_idx=-1) # loss func self.loss_func = nn.CrossEntropyLoss() if loss_func is None else loss_func # weight init trunc_normal_(self.pos_embed, std=0.02) trunc_normal_(self.cls_token, std=0.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, Linear): trunc_normal_(m.weight, std=0.02) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def no_weight_decay(self): return {"pos_embed", "cls_token"} @classmethod def from_config(cls, cfg): return { "img_size": cfg.img_size, "patch_size": cfg.patch_size, "in_chans": cfg.in_chans, "embed_dim": cfg.embed_dim, "depth": cfg.depth, "num_heads": cfg.num_heads, "mlp_ratio": cfg.mlp_ratio, "drop_rate": cfg.drop_rate, "attn_drop_rate": cfg.attn_drop_rate, "drop_path_rate": cfg.drop_path_rate, "num_classes": cfg.num_classes, "loss_func": cfg.loss_func, } def forward_features(self, x): # patch embedding x = self.patch_embed(x) cls_token = self.cls_token.expand( x.shape[0], -1, -1 ) # stole cls_tokens impl from Phil Wang, thanks cls_token = cls_token.to_global(sbp=x.sbp, placement=cls_token.placement) x = flow.cat((cls_token, x), dim=1) # position embedding pos_embed = self.pos_embed.expand(x.shape[0], -1, -1) pos_embed = pos_embed.to_global(sbp=x.sbp, placement=pos_embed.placement) x = self.pos_drop(x + pos_embed) # transformer block x = self.blocks(x) return x def forward_head(self, x): x = self.norm(x) outcome = x[:, 0] outcome = self.head(outcome) return outcome def forward(self, images, labels=None): """ Args: images (flow.Tensor): training samples. labels (flow.LongTensor, optional): training targets Returns: dict: A dict containing :code:`loss_value` or :code:`logits` depending on training or evaluation mode. :code:`{"losses": loss_value}` when training, :code:`{"prediction_scores": logits}` when evaluating. """ x = self.forward_features(images) x = self.forward_head(x) if labels is not None and self.training: losses = self.loss_func(x, labels) return {"losses": losses} else: return {"prediction_scores": x} @staticmethod def set_pipeline_stage_id(model): dist_utils = dist.get_dist_util() # Set pipeline parallelism stage_id if hasattr(model.pos_embed, "config"): # Old API in OneFlow 0.8 for module_block in model.modules(): if isinstance(module_block.origin, PatchEmbedding): module_block.config.set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) elif isinstance(module_block.origin, TransformerLayer): module_block.config.set_stage( dist_utils.get_layer_stage_id(module_block.layer_idx), dist.get_layer_placement(module_block.layer_idx), ) # Set pos_embed and cls_token stage id model.pos_embed.config.set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) model.cls_token.config.set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) model.pos_drop.config.set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) model.norm.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.head.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.loss_func.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) else: for module_block in model.modules(): if isinstance(module_block.to(nn.Module), PatchEmbedding): module_block.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) elif isinstance(module_block.to(nn.Module), TransformerLayer): module_block.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(module_block.layer_idx), dist.get_layer_placement(module_block.layer_idx), ) # Set pos_embed and cls_token stage id model.pos_embed.to(flow.nn.graph.GraphTensor).set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) model.cls_token.to(flow.nn.graph.GraphTensor).set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) model.pos_drop.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) model.norm.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.head.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.loss_func.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) )
10,057
36.529851
85
py
libai
libai-main/libai/models/resmlp.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -------------------------------------------------------- # ResMLP Model # References: # resmlp: https://github.com/facebookresearch/deit/blob/main/resmlp_models.py # -------------------------------------------------------- import oneflow as flow import oneflow.nn as nn from flowvision.layers.weight_init import trunc_normal_ import libai.utils.distributed as dist from libai.config import configurable from libai.layers import MLP, DropPath, LayerNorm, Linear, PatchEmbedding class Affine(nn.Module): def __init__(self, dim, *, layer_idx=0): super().__init__() self.alpha = nn.Parameter( flow.ones( dim, placement=dist.get_layer_placement(layer_idx), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ) ) self.beta = nn.Parameter( flow.zeros( dim, placement=dist.get_layer_placement(layer_idx), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ), ) self.layer_idx = layer_idx def forward(self, x): x = x.to_global(placement=dist.get_layer_placement(self.layer_idx)) return self.alpha * x + self.beta class layers_scale_mlp_blocks(nn.Module): def __init__( self, dim, drop=0.0, drop_path=0.0, init_values=1e-4, num_patches=196, *, layer_idx=0 ): super().__init__() self.norm1 = Affine(dim, layer_idx=layer_idx) self.attn = Linear(num_patches, num_patches, layer_idx=layer_idx) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = Affine(dim, layer_idx=layer_idx) self.mlp = MLP(hidden_size=dim, ffn_hidden_size=int(4.0 * dim), layer_idx=layer_idx) self.gamma_1 = nn.Parameter( init_values * flow.ones( dim, sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), placement=dist.get_layer_placement(layer_idx), ), requires_grad=True, ) self.gamma_2 = nn.Parameter( init_values * flow.ones( dim, sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), placement=dist.get_layer_placement(layer_idx), ), requires_grad=True, ) self.layer_idx = layer_idx def forward(self, x): x = x.to_global(placement=dist.get_layer_placement(self.layer_idx)) x = x + self.drop_path( self.gamma_1 * self.attn(self.norm1(x).transpose(1, 2)).transpose(1, 2) ) x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) return x class ResMLP(nn.Module): """ResMLP in LiBai. LiBai's implementation of: `ResMLP: Feedforward networks for image classification with data-efficient training <https://arxiv.org/abs/2105.03404>`_ Args: img_size (int, tuple(int)): input image size patch_size (int, tuple(int)): patch size in_chans (int): number of input channels embed_dim (int): embedding dimension depth (int): depth of transformer drop_rate (float): dropout rate drop_path_rate (float): stochastic depth rate init_scale (float): the layer scale ratio num_classes (int): number of classes for classification head loss_func (callable, optional): loss function for computing the total loss between logits and labels """ @configurable def __init__( self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, depth=12, drop_rate=0.0, drop_path_rate=0.0, init_scale=1e-4, num_classes=1000, loss_func=None, ): super().__init__() self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim self.patch_embed = PatchEmbedding( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, ) num_patches = self.patch_embed.num_patches dpr = [drop_path_rate for i in range(depth)] # stochastic depth decay rule self.blocks = nn.ModuleList( [ layers_scale_mlp_blocks( dim=embed_dim, drop=drop_rate, drop_path=dpr[i], init_values=init_scale, num_patches=num_patches, layer_idx=i, ) for i in range(depth) ] ) self.norm = Affine(embed_dim, layer_idx=-1) self.head = ( Linear(embed_dim, num_classes, layer_idx=-1) if num_classes > 0 else nn.Identity() ) # loss func self.loss_func = nn.CrossEntropyLoss() if loss_func is None else loss_func # weight init self.apply(self._init_weights) @classmethod def from_config(cls, cfg): return { "img_size": cfg.img_size, "patch_size": cfg.patch_size, "in_chans": cfg.in_chans, "embed_dim": cfg.embed_dim, "depth": cfg.depth, "drop_rate": cfg.drop_rate, "drop_path_rate": cfg.drop_path_rate, "init_scale": cfg.init_scale, "num_classes": cfg.num_classes, "loss_func": cfg.loss_func, } def _init_weights(self, m): if isinstance(m, Linear): trunc_normal_(m.weight, std=0.02) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def forward_features(self, x): x = self.patch_embed(x) # layer scale mlp blocks for i, blk in enumerate(self.blocks): x = blk(x) return x def forward_head(self, x): B = x.shape[0] x = self.norm(x) x = x.mean(dim=1).reshape(B, 1, -1) return self.head(x[:, 0]) def forward(self, images, labels=None): """ Args: images (flow.Tensor): training samples. labels (flow.LongTensor, optional): training targets Returns: dict: A dict containing :code:`loss_value` or :code:`logits` depending on training or evaluation mode. :code:`{"losses": loss_value}` when training, :code:`{"prediction_scores": logits}` when evaluating. """ x = self.forward_features(images) x = self.forward_head(x) if labels is not None and self.training: losses = self.loss_func(x, labels) return {"losses": losses} else: return {"prediction_scores": x} @staticmethod def set_pipeline_stage_id(model): dist_utils = dist.get_dist_util() # Set pipeline parallelism stage_id if hasattr(model.loss_func, "config"): # Old API in OneFlow 0.8 for module_block in model.modules(): # module.origin can get the original module if isinstance(module_block.origin, PatchEmbedding): module_block.config.set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) elif isinstance(module_block.origin, layers_scale_mlp_blocks): module_block.config.set_stage( dist_utils.get_layer_stage_id(module_block.layer_idx), dist.get_layer_placement(module_block.layer_idx), ) # Set norm and head stage id model.norm.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.head.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.loss_func.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) else: for module_block in model.modules(): if isinstance(module_block.to(nn.Module), PatchEmbedding): module_block.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) elif isinstance(module_block.to(nn.Module), layers_scale_mlp_blocks): module_block.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(module_block.layer_idx), dist.get_layer_placement(module_block.layer_idx), ) # Set norm and head stage id model.norm.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.head.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.loss_func.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) @staticmethod def set_activation_checkpoint(model): for module_block in model.modules(): if hasattr(module_block, "origin"): # Old API in OneFlow 0.8 if isinstance(module_block.origin, layers_scale_mlp_blocks): module_block.config.activation_checkpointing = True else: if isinstance(module_block.to(nn.Module), layers_scale_mlp_blocks): module_block.to(nn.graph.GraphModule).activation_checkpointing = True
10,622
34.888514
94
py
libai
libai-main/libai/models/__init__.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .bert_model import BertForPreTraining, BertModel, BertForClassification from .roberta_model import RobertaForPreTraining, RobertaForCausalLM, RobertaModel from .build import build_graph, build_model from .t5_model import T5ForPreTraining, T5Model from .gpt_model import GPTForPreTraining, GPTModel from .vision_transformer import VisionTransformer from .swin_transformer import SwinTransformer from .swin_transformer_v2 import SwinTransformerV2 from .resmlp import ResMLP __all__ = [ "build_model", "build_graph", "BertModel", "BertForPreTraining", "BertForClassification", "RobertaModel", "RobertaForCausalLM", "RobertaForPreTraining", "T5Model", "T5ForPreTraining", "GPTModel", "GPTForPreTraining", "VisionTransformer", "SwinTransformer", "SwinTransformerV2", "ResMLP", ]
1,466
31.6
82
py
libai
libai-main/libai/models/roberta_model.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import oneflow as flow from oneflow import nn from libai.config import configurable from libai.layers import ( Embedding, LayerNorm, Linear, LMLogits, ParallelCrossEntropyLoss, TransformerLayer, VocabEmbedding, build_activation, ) from libai.utils import distributed as dist from .bert_model import BertEmbeddings, BertExtendedAttnMask, BertModel, BertPooler from .utils import init_method_normal class RobertaExtendedAttnMask(BertExtendedAttnMask): """ Same as BertExtendedAttnMask. """ class RobertaEmbeddings(BertEmbeddings): """ Same as BertEmbeddings with a tiny tweak for vocab_embeddings and position_embeddings. """ def __init__( self, vocab_size, hidden_size, max_sequence_length, embedding_dropout_prob, num_tokentypes=0, pad_token_id=1, init_method=nn.init.xavier_normal_, amp_enabled=False, ): super().__init__( vocab_size, hidden_size, max_sequence_length, embedding_dropout_prob, num_tokentypes=num_tokentypes, init_method=init_method, amp_enabled=amp_enabled, ) self.pad_token_id = pad_token_id self.vocab_embeddings = VocabEmbedding( vocab_size, hidden_size, init_method=init_method, amp_enabled=amp_enabled, padding_idx=pad_token_id, ) self.position_embeddings = Embedding( max_sequence_length, hidden_size, init_method=init_method, amp_enabled=amp_enabled, padding_idx=pad_token_id, ) if num_tokentypes > 0: self.tokentype_embeddings = Embedding( num_tokentypes, hidden_size, init_method=init_method, amp_enabled=amp_enabled ) self.tokentype_ids = flow.zeros( 1, max_sequence_length, dtype=flow.long, sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), placement=dist.get_layer_placement(0), ) else: self.tokentype_embeddings = None def forward(self, input_ids, tokentype_ids=None, position_ids=None): seq_length = input_ids.size()[1] word_embeddings = self.vocab_embeddings(input_ids) if position_ids is None: position_ids = self.create_position_ids_from_input_ids(input_ids, self.pad_token_id) position_embeddings = self.position_embeddings(position_ids) embeddings = word_embeddings + position_embeddings if self.tokentype_embeddings is not None: if tokentype_ids is None: tokentype_ids = ( self.tokentype_ids[:, :seq_length] .expand_as(input_ids) .to_global(sbp=input_ids.sbp) ) embeddings = embeddings + self.tokentype_embeddings(tokentype_ids) embeddings = self.embedding_dropout(embeddings) return embeddings def create_position_ids_from_input_ids(self, input_ids, pad_token_id): mask = input_ids.ne(pad_token_id).int() position_ids = (flow.cumsum(mask, dim=1).type_as(mask)) * mask + pad_token_id position_ids = position_ids.to_global(sbp=input_ids.sbp, placement=input_ids.placement) return position_ids class RobertaPooler(BertPooler): """ Same as BertPooler. """ class RobertaLoss(nn.Module): def __init__(self): super().__init__() self.lm_loss = ParallelCrossEntropyLoss() def forward(self, lm_output, lm_labels, loss_mask): lm_labels = lm_labels.to_global(placement=lm_output.placement) loss_mask = loss_mask.to_global(placement=lm_output.placement) lm_loss = self.lm_loss(lm_output, lm_labels) loss_mask = loss_mask.float() # Change loss_mask.sum() sbp sign from [P, B] -> [B, B] # because (lm_loss * loss_mask) / loss_mask.sum() cannot accept P / P denominator = loss_mask.sum().to_global( sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]) ) masked_lm_loss = flow.sum(lm_loss.view(-1) * loss_mask.view(-1)) / denominator masked_lm_loss = masked_lm_loss.to_global( sbp=dist.get_nd_sbp([flow.sbp.partial_sum, flow.sbp.broadcast]) ) loss_dict = {"lm_loss": masked_lm_loss} return loss_dict class RobertaModel(BertModel): """The bare Roberta Model transformer outputting raw hidden-states without any specific head on top. Args: vocab_size (int): The size of vocabulary file. hidden_size (int): The size of hidden states. hidden_layers (int): The number of ``TransformerLayer`` in encoder. num_attention_heads (int): The number of attention heads for each attention layer of ``TransformerLayer``. intermediate_size (int): The size of intermediate layer in feed-forward network for each ``TransformerLayer``. hidden_dropout_prob (float, optional): The dropout ratio for the output for each TransformerLayer. Defaults to 0.0. attention_probs_dropout_prob (float, optional): The dropout ratio for the output of each attention layer in ``TransformerLayer``. Defaults to 0.0. max_position_embeddings (int): Max sequence length of input, defines the shape of Position Embeddings in ``RobertaEmbeddings``. type_vocab_size (int, optional): Number of segment token indices. Defaults to 2. add_pooling_layer (bool, optional): Whether or not averaging or pooling the sequence of hidden-states for the whole input sequence. Defaults to ``True``. initializer_range (float, optional): Sigma of the normal distribution in the initialization method. Defaults to 0.02. layer_norm_eps (float, optional): The epsilon of LayerNorm layer. Defaults to 1e-5. pad_token_id (int, optional): The token id used for padding. Defaults to 1. bias_gelu_fusion (bool, optional): Whether or not to fuse the computing of bias and gelu. Defaults to ``False``. bias_dropout_fusion (bool, optional): Whether or not to fuse the computing of dropout and bias. Defaults to ``False``. scale_mask_softmax_fusion (bool, optional): Whether to fuse the computing of mask and softmax in attention layers. Defaults to ``False``. apply_query_key_layer_scaling (bool, optional): Whether or not to use layer index related scaling in computing attention scores. If ``True``, the scaling factor equals to sqrt(d) * (layer_index + 1). Defaults to ``True``. apply_residual_post_layernorm (bool, optional): If set ``True``, use original BERT(Roberta) residual connection ordering otherwise use Megatron BERT residual connection which is more stable when scaling model size introduced in https://arxiv.org/pdf/1909.08053.pdf. Default: ``False``. amp_enabled (bool, optional): Whether or not to set fp16 for embedding weight in T5 model. Defaults to ``False``. """ @configurable def __init__( self, vocab_size, hidden_size, hidden_layers, num_attention_heads, intermediate_size, hidden_dropout_prob, attention_probs_dropout_prob, max_position_embeddings, num_tokentypes=2, add_pooling_layer=True, initializer_range=0.02, layernorm_eps=1e-12, pad_token_id=1, bias_gelu_fusion=True, bias_dropout_fusion=True, scale_mask_softmax_fusion=True, apply_query_key_layer_scaling=True, apply_residual_post_layernorm=False, amp_enabled=False, ): super().__init__( vocab_size, hidden_size, hidden_layers, num_attention_heads, intermediate_size, hidden_dropout_prob, attention_probs_dropout_prob, max_position_embeddings, num_tokentypes=num_tokentypes, add_pooling_layer=add_pooling_layer, initializer_range=initializer_range, layernorm_eps=layernorm_eps, bias_gelu_fusion=bias_gelu_fusion, bias_dropout_fusion=bias_dropout_fusion, scale_mask_softmax_fusion=scale_mask_softmax_fusion, apply_query_key_layer_scaling=apply_query_key_layer_scaling, apply_residual_post_layernorm=apply_residual_post_layernorm, amp_enabled=amp_enabled, ) init_method = init_method_normal(initializer_range) # Embeddings self.embeddings = RobertaEmbeddings( vocab_size, hidden_size, max_position_embeddings, hidden_dropout_prob, num_tokentypes, pad_token_id, init_method, amp_enabled, ) # Mask generation self.extended_attn_mask = RobertaExtendedAttnMask() self.pooler = RobertaPooler(hidden_size, init_method) if add_pooling_layer else None @classmethod def from_config(cls, cfg): return { "vocab_size": cfg.vocab_size, "hidden_size": cfg.hidden_size, "hidden_layers": cfg.hidden_layers, "num_attention_heads": cfg.num_attention_heads, "intermediate_size": cfg.intermediate_size, "hidden_dropout_prob": cfg.hidden_dropout_prob, "attention_probs_dropout_prob": cfg.attention_probs_dropout_prob, "max_position_embeddings": cfg.max_position_embeddings, "num_tokentypes": cfg.num_tokentypes, "add_pooling_layer": cfg.add_pooling_layer, "initializer_range": cfg.initializer_range, "layernorm_eps": cfg.layernorm_eps, "pad_token_id": cfg.pad_token_id, "bias_gelu_fusion": cfg.bias_gelu_fusion, "bias_dropout_fusion": cfg.bias_dropout_fusion, "scale_mask_softmax_fusion": cfg.scale_mask_softmax_fusion, "apply_query_key_layer_scaling": cfg.apply_query_key_layer_scaling, "apply_residual_post_layernorm": cfg.apply_residual_post_layernorm, "amp_enabled": cfg.amp_enabled, } class RobertaLMHead(nn.Module): def __init__(self, vocab_size, hidden_size, init_method, layer_norm_eps): super().__init__() self.dense = Linear( hidden_size, hidden_size, bias=True, parallel="data", init_method=init_method, layer_idx=-1, ) self.activation_func = build_activation("gelu") self.layernorm = LayerNorm((hidden_size,), eps=layer_norm_eps, layer_idx=-1) # NOTE(xzp): LMLogits as a decoder:nn.Linear(hidden_size, vocab_size), # it shares the roberta.word_embeddings.weight self.lm_logits = LMLogits(vocab_size, bias=True) def forward(self, hidden_states, word_embeddings_weight): hidden_states = self.dense(hidden_states) hidden_states = self.activation_func(hidden_states) hidden_states = hidden_states.to_global( sbp=dist.get_nd_sbp([flow.sbp.split(0), flow.sbp.broadcast]) ) hidden_states = self.layernorm(hidden_states) hidden_states = self.lm_logits(hidden_states, word_embeddings_weight) return hidden_states class RobertaPreTrainedModel(nn.Module): @staticmethod def set_pipeline_stage_id(model): dist_utils = dist.get_dist_util() # Set pipeline parallelism stage_id if hasattr(model.roberta.final_layernorm, "config"): # Old API in OneFlow 0.8 for module_block in model.modules(): # module.origin can get the original module if isinstance(module_block.origin, RobertaEmbeddings): module_block.config.set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) elif isinstance(module_block.origin, RobertaExtendedAttnMask): module_block.config.set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) elif isinstance(module_block.origin, TransformerLayer): module_block.config.set_stage( dist_utils.get_layer_stage_id(module_block.layer_idx), dist.get_layer_placement(module_block.layer_idx), ) # `add_pooling_layer` in RobertaForMaskedLM and RobertaForCausalLM. # default to False. elif isinstance(module_block.origin, RobertaPooler): module_block.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) elif isinstance(module_block.origin, RobertaLMHead): module_block.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) # Set the last layernorm stage id model.roberta.final_layernorm.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) else: for module_block in model.modules(): # module.origin can get the original module if isinstance(module_block.to(nn.Module), RobertaEmbeddings): module_block.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) elif isinstance(module_block.to(nn.Module), RobertaExtendedAttnMask): module_block.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) elif isinstance(module_block.to(nn.Module), TransformerLayer): module_block.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(module_block.layer_idx), dist.get_layer_placement(module_block.layer_idx), ) # `add_pooling_layer` in RobertaForMaskedLM and RobertaForCausalLM. # default to False. elif isinstance(module_block.to(nn.Module), RobertaPooler): module_block.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) elif isinstance(module_block.to(nn.Module), RobertaLMHead): module_block.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) # Set the last layernorm stage id model.roberta.final_layernorm.to(nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) class RobertaForPreTraining(RobertaPreTrainedModel): def __init__(self, cfg): super().__init__() cfg.add_pooling_layer = False self.roberta = RobertaModel(cfg) self.lm_head = RobertaLMHead( cfg.vocab_size, cfg.hidden_size, init_method_normal(cfg.initializer_range), cfg.layernorm_eps, ) self.loss_fc = RobertaLoss() def forward( self, input_ids, attention_mask, tokentype_ids=None, lm_labels=None, loss_mask=None, ): """ Args: input_ids (flow.LongTensor): Indices of input sequence tokens in vocabulary. attention_mask (flow.BoolTensor): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. tokentype_ids (flow.LongTensor, optional): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`. Defaults to None. labels (flow.LongTensor, optional): Labels for computing the masked language modeling loss. Indices should be in `[-1, 0, ..., config.vocab_size]`. Defaults to None. loss_mask (flow.BoolTensor, optional): Mask to avoid performing loss computing on ignored tokens. Tokens with indices set to `-1` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Defaults to None. """ input_ids = input_ids.to_global(placement=dist.get_layer_placement(0)) attention_mask = attention_mask.to_global(placement=dist.get_layer_placement(0)) tokentype_ids = tokentype_ids.to_global(placement=dist.get_layer_placement(0)) outputs = self.roberta(input_ids, attention_mask, tokentype_ids=tokentype_ids) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output, self.roberta.word_embeddings_weight()) if lm_labels is not None: return self.loss_fc(prediction_scores, lm_labels, loss_mask) return {"prediction_scores": prediction_scores} class RobertaForCausalLM(RobertaPreTrainedModel): def __init__(self, cfg): super().__init__() cfg.add_pooling_layer = False self.roberta = RobertaModel(cfg) self.lm_head = RobertaLMHead( cfg.vocab_size, cfg.hidden_size, init_method_normal(cfg.initializer_range), cfg.layernorm_eps, ) self.loss_fc = RobertaLoss() def forward( self, input_ids, attention_mask, tokentype_ids=None, position_ids=None, labels=None, loss_mask=None, ): """ Args: input_ids (flow.LongTensor): Indices of input sequence tokens in vocabulary. attention_mask (flow.BoolTensor): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. tokentype_ids (flow.LongTensor, optional): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`. Defaults to None. position_ids (flow.LongTensor, optional): Indices of positions of each input sequence tokens in the position embeddings. Defaults to None. labels (flow.LongTensor, optional): Labels for computing the masked language modeling loss. Indices should be in `[-1, 0, ..., config.vocab_size]`. Defaults to None. loss_mask (flow.BoolTensor, optional): Mask to avoid performing loss computing on ignored tokens. Tokens with indices set to `-1` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Defaults to None. """ outputs = self.roberta(input_ids, attention_mask, position_ids, tokentype_ids) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output, self.roberta.word_embeddings_weight()) if labels is not None: # next-token prediction task, shift prediction_scores and labels by one. shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() shifted_prediction_scores = shifted_prediction_scores.to_global( sbp=prediction_scores.sbp ) shifted_labels = labels[:, 1:].contiguous() shifted_labels = shifted_labels.to_global(sbp=shifted_labels.sbp) lm_loss = self.loss_fc(shifted_prediction_scores, shifted_labels, loss_mask) return {"lm_loss": lm_loss} return {"prediction_scores": prediction_scores}
21,479
40.871345
99
py
libai
libai-main/libai/models/swin_transformer_v2.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import oneflow as flow import oneflow.nn as nn import oneflow.nn.functional as F from flowvision.layers import trunc_normal_ from flowvision.models import to_2tuple from libai.config.config import configurable from libai.layers import MLP, DropPath, LayerNorm, Linear from libai.utils import distributed as dist def window_partition(x, window_size): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows def window_reverse(windows, window_size, H, W): """ Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x class WindowAttention(nn.Module): r"""Window based multi-head self attention (W-MSA) module with relative position bias. It supports both of shifted and non-shifted window. Args: dim (int): Number of input channels. window_size (tuple[int]): The height and width of the window. num_heads (int): Number of attention heads. qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 proj_drop (float, optional): Dropout ratio of output. Default: 0.0 pretrained_window_size (tuple[int]): The height and width of the window in pre-training. """ def __init__( self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0.0, proj_drop=0.0, pretrained_window_size=[0, 0], fused_bias_add_dropout=False, layer_idx=0, ): super().__init__() self.dim = dim self.window_size = window_size # Wh, Ww self.pretrained_window_size = pretrained_window_size self.fused_bias_add_dropout = fused_bias_add_dropout self.num_heads = num_heads self.layer_idx = layer_idx self.p = proj_drop self.logit_scale = nn.Parameter( flow.log( 10 * flow.ones( 1, num_heads, 1, 1, placement=dist.get_layer_placement(layer_idx), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ) ), requires_grad=True, ) # NOTE: generate meta network, using mlp to generate continuous relative position bias self.cpb_mlp = nn.Sequential( Linear(2, 512, bias=True, layer_idx=layer_idx), nn.ReLU(inplace=True), Linear(512, num_heads, bias=False, layer_idx=layer_idx), ).to_global( placement=dist.get_layer_placement(layer_idx), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ) # NOTE: get relative_coords_table relative_coords_h = flow.arange( -(self.window_size[0] - 1), self.window_size[0], dtype=flow.float32 ) relative_coords_w = flow.arange( -(self.window_size[1] - 1), self.window_size[1], dtype=flow.float32 ) relative_coords_table = ( flow.stack(flow.meshgrid(*[relative_coords_h, relative_coords_w])) .permute(1, 2, 0) .contiguous() .unsqueeze(0) ) # 1, 2*Wh-1, 2*Ww-1, 2 # NOTE: For any relative coordinate, constrain it to -8~8 (window size) if pretrained_window_size[0] > 0: relative_coords_table[:, :, :, 0] = relative_coords_table[:, :, :, 0] / ( pretrained_window_size[0] - 1 ) relative_coords_table[:, :, :, 1] = relative_coords_table[:, :, :, 1] / ( pretrained_window_size[1] - 1 ) else: relative_coords_table[:, :, :, 0] = relative_coords_table[:, :, :, 0] / ( self.window_size[0] - 1 ) relative_coords_table[:, :, :, 1] = relative_coords_table[:, :, :, 1] / ( self.window_size[1] - 1 ) relative_coords_table = relative_coords_table * 8 # NOTE: y=sign(x)*log(|x|+1) relative_coords_table = ( flow.sign(relative_coords_table) * flow.log2(flow.abs(relative_coords_table) + 1.0) / math.log2(8.0) ) self.register_buffer( "relative_coords_table", relative_coords_table.to_global( placement=dist.get_layer_placement(layer_idx), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ), ) # NOTE: get pair-wise relative position index for each token inside the window coords_h = flow.arange(self.window_size[0]) coords_w = flow.arange(self.window_size[1]) coords = flow.stack(flow.meshgrid(*[coords_h, coords_w])) # 2, Wh, Ww coords_flatten = flow.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] = ( relative_coords[:, :, 0] + self.window_size[0] - 1 ) # shift to start from 0 relative_coords[:, :, 1] = relative_coords[:, :, 1] + self.window_size[1] - 1 relative_coords[:, :, 0] = relative_coords[:, :, 0] * (2 * self.window_size[1] - 1) relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww self.register_buffer( "relative_position_index", relative_position_index.to_global( placement=dist.get_layer_placement(layer_idx), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ), ) self.qkv = Linear(dim, dim * 3, bias=False, layer_idx=layer_idx) if qkv_bias: self.q_bias = nn.Parameter( flow.zeros( dim, placement=dist.get_layer_placement(layer_idx), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ) ) self.v_bias = nn.Parameter( flow.zeros( dim, placement=dist.get_layer_placement(layer_idx), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ) ) else: self.q_bias = None self.v_bias = None self.attn_drop = nn.Dropout(attn_drop) self.proj = Linear(dim, dim, layer_idx=layer_idx) self.proj_drop = nn.Dropout(proj_drop) self.softmax = nn.Softmax(dim=-1) def forward(self, x, mask=None): """ Args: x: input features with shape of (num_windows*B, N, C) mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None """ B_, N, C = x.shape qkv_bias = None if self.q_bias is not None: qkv_bias = flow.concat( [ self.q_bias, flow.zeros( self.v_bias.shape, requires_grad=False, placement=dist.get_layer_placement( self.layer_idx, device_type=self.v_bias.placement.type ), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ), self.v_bias, ], dim=0, ) qkv = self.qkv(x) + qkv_bias.unsqueeze(0).unsqueeze(0) qkv = qkv.reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # NOTE: cosine attention attn = F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1) # NOTE: a learnable scalar logit_scale = flow.clamp(self.logit_scale, min=-1e6, max=math.log(1.0 / 0.01)).exp() attn = attn * logit_scale # NOTE: use relative_coords_table and meta network to generate relative_position_bias relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view( -1, self.num_heads ) relative_position_bias = relative_position_bias_table[ self.relative_position_index.view(-1) ].view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 ) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.permute( 2, 0, 1 ).contiguous() # nH, Wh*Ww, Wh*Ww # NOTE: constrained to a range of -16~16 relative_position_bias = 16 * flow.sigmoid(relative_position_bias).unsqueeze(0) attn = attn + relative_position_bias if mask is not None: nW = mask.shape[0] attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N, N) attn = self.softmax(attn) else: attn = self.softmax(attn) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B_, N, C) if self.fused_bias_add_dropout: x = flow._C.matmul(x, self.proj.weight, transpose_a=False, transpose_b=True) x = flow._C.fused_bias_add_dropout(x, self.proj.bias, p=self.p, axis=2) else: x = self.proj(x) x = self.proj_drop(x) return x class SwinTransformerBlock(nn.Module): r"""Swin Transformer Block. Args: dim (int): Number of input channels. input_resolution (tuple[int]): Input resulotion. num_heads (int): Number of attention heads. window_size (int): Window size. shift_size (int): Shift size for SW-MSA. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True drop (float, optional): Dropout rate. Default: 0.0 attn_drop (float, optional): Attention dropout rate. Default: 0.0 drop_path (float, optional): Stochastic depth rate. Default: 0.0 act_layer (nn.Module, optional): Activation layer. Default: nn.GELU norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm pretrained_window_size (int): Window size in pre-training. """ def __init__( self, dim, input_resolution, num_heads, window_size=7, shift_size=0, mlp_ratio=4.0, qkv_bias=True, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=LayerNorm, pretrained_window_size=0, layer_idx=0, ): super().__init__() self.dim = dim self.input_resolution = input_resolution self.num_heads = num_heads self.window_size = window_size self.shift_size = shift_size self.mlp_ratio = mlp_ratio self.layer_idx = layer_idx if min(self.input_resolution) <= self.window_size: # if window size is larger than input resolution, we don't partition windows self.shift_size = 0 self.window_size = min(self.input_resolution) assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" self.norm1 = norm_layer(dim, layer_idx=layer_idx) self.attn = WindowAttention( dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, pretrained_window_size=to_2tuple(pretrained_window_size), fused_bias_add_dropout=True, layer_idx=layer_idx, ) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim, layer_idx=layer_idx) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = MLP( hidden_size=dim, ffn_hidden_size=mlp_hidden_dim, output_dropout_prob=drop, bias_gelu_fusion=True, bias_dropout_fusion=True, layer_idx=layer_idx, ) if self.shift_size > 0: # calculate attention mask for SW-MSA H, W = self.input_resolution img_mask = flow.zeros((1, H, W, 1)) # 1 H W 1 h_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) w_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) cnt = 0 for h in h_slices: for w in w_slices: img_mask[:, h, w, :] = cnt cnt = cnt + 1 mask_windows = window_partition( img_mask, self.window_size ) # nW, window_size, window_size, 1 mask_windows = mask_windows.view(-1, self.window_size * self.window_size) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = ( attn_mask.masked_fill(attn_mask != 0, float(-100.0)) .masked_fill(attn_mask == 0, float(0.0)) .to_global( placement=dist.get_layer_placement(layer_idx), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ) ) else: attn_mask = None self.register_buffer("attn_mask", attn_mask) def forward(self, x): H, W = self.input_resolution B, L, C = x.shape assert L == H * W, "input feature has wrong size" shortcut = x x = x.view(B, H, W, C) # cyclic shift if self.shift_size > 0: shifted_x = flow.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_x = x # partition windows x_windows = window_partition( shifted_x, self.window_size ) # nW*B, window_size, window_size, C x_windows = x_windows.view( -1, self.window_size * self.window_size, C ) # nW*B, window_size*window_size, C # W-MSA/SW-MSA attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C # merge windows attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C # reverse cyclic shift if self.shift_size > 0: x = flow.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: x = shifted_x x = x.view(B, H * W, C) # NOTE: res-post-norm x = shortcut + self.drop_path(self.norm1(x)) # NOTE: res-post-norm x = x + self.drop_path(self.norm2(self.mlp(x))) return x class PatchMerging(nn.Module): """Patch Merging Layer. Args: input_resolution (tuple[int]): Resolution of input feature. dim (int): Number of input channels. norm_layer (nn.Module, optional): Normalization layer. Default: libai.layers.LayerNorm """ def __init__(self, input_resolution, dim, norm_layer=LayerNorm, layer_idx=0): super().__init__() self.input_resolution = input_resolution self.dim = dim self.reduction = Linear(4 * dim, 2 * dim, bias=False, layer_idx=layer_idx) # NOTE: swinv2-> 2*dim, swin-> 4*dim self.norm = norm_layer(2 * dim, layer_idx=layer_idx) self.layer_idx = layer_idx def forward(self, x): """ x: B, H*W, C """ H, W = self.input_resolution B, L, C = x.shape assert L == H * W, "input feature has wrong size" assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even." x = x.view(B, H, W, C) x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C x = flow.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C # NOTE: post-res-norm, a change that swin-v2 compared to swin x = self.reduction(x) x = self.norm(x) return x class BasicLayer(nn.Module): """A basic Swin Transformer layer for one stage. Args: dim (int): Number of input channels. input_resolution (tuple[int]): Input resolution. depth (int): Number of blocks. num_heads (int): Number of attention heads. window_size (int): Local window size. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True drop (float, optional): Dropout rate. Default: 0.0 attn_drop (float, optional): Attention dropout rate. Default: 0.0 drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None pretrained_window_size (int): Local window size in pre-training. """ def __init__( self, dim, input_resolution, depth, num_heads, window_size, mlp_ratio=4.0, qkv_bias=True, drop=0.0, attn_drop=0.0, drop_path=0.0, norm_layer=LayerNorm, downsample=None, pretrained_window_size=0, layer_id_offset=0, ): super().__init__() self.dim = dim self.input_resolution = input_resolution self.depth = depth self.layer_id_offset = layer_id_offset # build blocks self.blocks = nn.ModuleList( [ SwinTransformerBlock( dim=dim, input_resolution=input_resolution, num_heads=num_heads, window_size=window_size, shift_size=0 if (i % 2 == 0) else window_size // 2, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer, pretrained_window_size=pretrained_window_size, layer_idx=layer_id_offset + i, ) for i in range(depth) ] ) # patch merging layer if downsample is not None: self.downsample = downsample( input_resolution, dim=dim, norm_layer=norm_layer, layer_idx=layer_id_offset + depth - 1, ) else: self.downsample = None def forward(self, x): layer_idx = self.layer_id_offset for blk in self.blocks: x = x.to_global( placement=dist.get_layer_placement(layer_idx, device_type=x.placement.type) ) x = blk(x) layer_idx += 1 if self.downsample is not None: x = self.downsample(x) return x def _init_respostnorm(self): for blk in self.blocks: nn.init.constant_(blk.norm1.bias, 0) nn.init.constant_(blk.norm1.weight, 0) nn.init.constant_(blk.norm2.bias, 0) nn.init.constant_(blk.norm2.weight, 0) class PatchEmbed(nn.Module): r"""Image to Patch Embedding Args: img_size (int): Image size. Default: 224. patch_size (int): Patch token size. Default: 4. in_chans (int): Number of input image channels. Default: 3. embed_dim (int): Number of linear projection output channels. Default: 96. norm_layer (nn.Module, optional): Normalization layer. Default: None """ def __init__( self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None, layer_idx=0 ): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]] self.img_size = img_size self.patch_size = patch_size self.patches_resolution = patches_resolution self.num_patches = patches_resolution[0] * patches_resolution[1] self.in_chans = in_chans self.embed_dim = embed_dim self.proj = nn.Conv2d( in_chans, embed_dim, kernel_size=patch_size, stride=patch_size ).to_global( placement=dist.get_layer_placement(layer_idx), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ) if norm_layer is not None: self.norm = norm_layer(embed_dim) else: self.norm = None def forward(self, x): B, C, H, W = x.shape # FIXME look at relaxing size constraints assert ( H == self.img_size[0] and W == self.img_size[1] ), f"Input image size ({H}*{W}) doesn't match model \ ({self.img_size[0]}*{self.img_size[1]})." x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C if self.norm is not None: x = self.norm(x) return x class SwinTransformerV2(nn.Module): r"""Swin Transformer Args: img_size (int | tuple(int)): Input image size. Default 224 patch_size (int | tuple(int)): Patch size. Default: 4 in_chans (int): Number of input image channels. Default: 3 num_classes (int): Number of classes for classification head. Default: 1000 embed_dim (int): Patch embedding dimension. Default: 96 depths (tuple(int)): Depth of each Swin Transformer layer. num_heads (tuple(int)): Number of attention heads in different layers. window_size (int): Window size. Default: 7 mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True drop_rate (float): Dropout rate. Default: 0 attn_drop_rate (float): Attention dropout rate. Default: 0 drop_path_rate (float): Stochastic depth rate. Default: 0.1 norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. ape (bool): If True, add absolute position embedding to the patch embedding. Default: False patch_norm (bool): If True, add normalization after patch embedding. Default: True pretrained_window_sizes (tuple(int)): Pretrained window sizes of each layer. """ @configurable def __init__( self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4.0, qkv_bias=True, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.1, norm_layer=LayerNorm, ape=False, patch_norm=True, pretrained_window_sizes=[0, 0, 0, 0], loss_func=None, ): super().__init__() self.num_classes = num_classes self.num_layers = len(depths) self.embed_dim = embed_dim self.ape = ape self.patch_norm = patch_norm self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) self.mlp_ratio = mlp_ratio # split image into non-overlapping patches self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if self.patch_norm else None, layer_idx=0, ) num_patches = self.patch_embed.num_patches patches_resolution = self.patch_embed.patches_resolution self.patches_resolution = patches_resolution # absolute position embedding if self.ape: self.absolute_pos_embed = nn.Parameter( flow.zeros( 1, num_patches, embed_dim, placement=dist.get_layer_placement(0), sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), ) ) trunc_normal_(self.absolute_pos_embed, std=0.02) self.pos_drop = nn.Dropout(p=drop_rate) # stochastic depth dpr = [ x.item() for x in flow.linspace(0, drop_path_rate, sum(depths)) ] # stochastic depth decay rule # build layers self.layers = nn.ModuleList() layer_id_offset = 0 for i_layer in range(self.num_layers): layer = BasicLayer( dim=int(embed_dim * 2 ** i_layer), input_resolution=( patches_resolution[0] // (2 ** i_layer), patches_resolution[1] // (2 ** i_layer), ), depth=depths[i_layer], num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])], norm_layer=norm_layer, downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, pretrained_window_size=pretrained_window_sizes[i_layer], layer_id_offset=layer_id_offset, ) layer_id_offset += depths[i_layer] self.layers.append(layer) self.norm = norm_layer(self.num_features, layer_idx=-1) self.avgpool = nn.AdaptiveAvgPool1d(1) self.head = ( Linear(self.num_features, num_classes, layer_idx=-1) if num_classes > 0 else nn.Identity() ) self.loss_func = nn.CrossEntropyLoss() if loss_func is None else loss_func self.apply(self._init_weights) for bly in self.layers: bly._init_respostnorm() def _init_weights(self, m): if isinstance(m, Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @classmethod def from_config(cls, cfg): return { "img_size": cfg.img_size, "patch_size": cfg.patch_size, "in_chans": cfg.in_chans, "num_classes": cfg.num_classes, "embed_dim": cfg.embed_dim, "depths": cfg.depths, "num_heads": cfg.num_heads, "window_size": cfg.window_size, "mlp_ratio": cfg.mlp_ratio, "qkv_bias": cfg.qkv_bias, "drop_rate": cfg.drop_rate, "drop_path_rate": cfg.drop_path_rate, "ape": cfg.ape, "patch_norm": cfg.patch_norm, "pretrained_window_sizes": cfg.pretrained_window_sizes, "loss_func": cfg.loss_func, } def forward_features(self, x): x = self.patch_embed(x) if self.ape: x = x + self.absolute_pos_embed x = self.pos_drop(x) for layer in self.layers: x = layer(x) x = self.norm(x) # B L C x = self.avgpool(x.transpose(1, 2)) # B C 1 x = flow.flatten(x, 1) return x def forward(self, images, labels=None): """ Args: images (flow.Tensor): training samples. labels (flow.LongTensor, optional): training targets Returns: dict: A dict containing :code:`loss_value` or :code:`logits` depending on training or evaluation mode. :code:`{"losses": loss_value}` when training, :code:`{"prediction_scores": logits}` when evaluating. """ x = self.forward_features(images) x = self.head(x) if labels is not None and self.training: losses = self.loss_func(x, labels) return {"losses": losses} else: return {"prediction_scores": x} @staticmethod def set_pipeline_stage_id(model): dist_utils = dist.get_dist_util() # Set pipeline parallelism stage_id if hasattr(model.patch_embed, "config"): # Old API in OneFlow 0.8 model.patch_embed.config.set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) model.pos_drop.config.set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) for module_block in model.modules(): if isinstance(module_block.origin, SwinTransformerBlock): module_block.config.set_stage( dist_utils.get_layer_stage_id(module_block.layer_idx), dist.get_layer_placement(module_block.layer_idx), ) elif isinstance(module_block.origin, PatchMerging): module_block.config.set_stage( dist_utils.get_layer_stage_id(module_block.layer_idx), dist.get_layer_placement(module_block.layer_idx), ) model.norm.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.head.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.avgpool.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.loss_func.config.set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) else: model.patch_embed.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) model.pos_drop.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(0), dist.get_layer_placement(0) ) for module_block in model.modules(): if isinstance(module_block.to(nn.Module), SwinTransformerBlock): module_block.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(module_block.layer_idx), dist.get_layer_placement(module_block.layer_idx), ) elif isinstance(module_block.to(nn.Module), PatchMerging): module_block.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(module_block.layer_idx), dist.get_layer_placement(module_block.layer_idx), ) model.norm.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.head.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.avgpool.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) model.loss_func.to(flow.nn.graph.GraphModule).set_stage( dist_utils.get_layer_stage_id(-1), dist.get_layer_placement(-1) ) @staticmethod def set_activation_checkpoint(model): for module_block in model.modules(): if hasattr(module_block, "origin"): # Old API in OneFlow 0.8 if isinstance(module_block.origin, SwinTransformerBlock): module_block.config.activation_checkpointing = True else: if isinstance(module_block.to(nn.Module), SwinTransformerBlock): module_block.to(flow.nn.graph.GraphModule).activation_checkpointing = True
34,295
37.064373
100
py
libai
libai-main/libai/models/build.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libai.config import instantiate, try_get_key def build_model(cfg): """Build the whole model architecture, defined by ``cfg.model``. Note that it does not load any weights from ``cfg``. """ model = instantiate(cfg) return model def build_graph(cfg, model, optimizer=None, lr_scheduler=None, is_train=False): """Build the `nn.Graph`, defined by ``cfg.graph``.""" auto_parallel_conf = try_get_key(cfg, "graph.auto_parallel", default=None) if is_train: # Set train graph assert optimizer is not None, "optimizer must be set for train graph" assert lr_scheduler is not None, "lr_scheduler must be set for train graph" graph = cfg.graph.train_graph graph.model = model graph.optimizer = optimizer graph.lr_scheduler = lr_scheduler graph.fp16 = try_get_key(cfg, "train.amp.enabled", default=False) graph.activation_checkpoint = try_get_key( cfg, "train.activation_checkpoint.enabled", default=False ) graph.zero_optim = try_get_key(cfg, "train.zero_optimization.enabled", default=False) graph.zero_stage = try_get_key(cfg, "train.zero_optimization.stage", default=1) graph.grad_acc_steps = try_get_key(cfg, "train.num_accumulation_steps", default=1) graph.global_mode = try_get_key(cfg, "graph.global_mode", default=None) graph.auto_parallel_conf = auto_parallel_conf return instantiate(graph) else: # Set eval graph graph = cfg.graph.eval_graph graph.model = model graph.auto_parallel_conf = auto_parallel_conf return instantiate(graph)
2,277
41.185185
93
py
libai
libai-main/libai/models/utils/weight_init.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import oneflow.nn as nn def init_method_normal(sigma, mean=0.0): """Init method based on N(0, sigma).""" def init_(tensor): return nn.init.normal_(tensor, mean=mean, std=sigma) return init_ def scaled_init_method_normal(sigma, num_layers, mean=0.0): """Init method based on N(0, sigma/sqrt(2*num_layers).""" std = sigma / math.sqrt(2.0 * num_layers) def init_(tensor): return nn.init.normal_(tensor, mean=mean, std=std) return init_
1,119
28.473684
74
py
libai
libai-main/libai/models/utils/graph_base.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import oneflow as flow from oneflow import nn from oneflow.utils.global_view import global_mode from libai.layers import TransformerLayer from libai.utils import distributed as dist logger = logging.getLogger(__name__) class GraphBase(nn.Graph): def __init__( self, model: nn.Module, optimizer: flow.optim.Optimizer = None, lr_scheduler: flow.optim.lr_scheduler = None, fp16=False, activation_checkpoint=False, grad_acc_steps=1, zero_optim=False, zero_stage=0, is_train=True, auto_parallel_conf=None, global_mode=None, ): super().__init__() self.model = model self.is_train = is_train self.global_mode = global_mode if is_train: self.add_optimizer(optimizer, lr_sch=lr_scheduler) if fp16: self.config.enable_amp(True) grad_scaler = flow.amp.GradScaler( init_scale=65536.0 * dist.get_data_parallel_size(), growth_factor=2.0, backoff_factor=0.5, growth_interval=2000, ) self.set_grad_scaler(grad_scaler) if grad_acc_steps > 1: self.config.set_gradient_accumulation_steps(grad_acc_steps) if activation_checkpoint: self.set_activation_checkpoint() if zero_optim: self.config.enable_zero(True, stage=zero_stage) self.set_pipeline_stage_id() self.config.allow_fuse_add_to_output(True) self.config.allow_fuse_model_update_ops(True) self.config.allow_fuse_cast_scale(True) # Enable cuda stream for computation and communication as the same stream. # This will reduce memory when using model parallelism. dist_util = dist.get_dist_util() if dist_util.is_tensor_model_parallel() or dist_util.is_pipeline_model_parallel(): flow.boxing.nccl.enable_use_compute_stream(True) # auto_parallel if auto_parallel_conf is not None and auto_parallel_conf.enabled: try: self.config.enable_auto_parallel(True) self.config.enable_auto_parallel_ignore_user_sbp_config( auto_parallel_conf.enable_auto_parallel_ignore_user_sbp_config ) self.config.set_auto_parallel_computation_cost_ratio(0.05) self.config.set_auto_parallel_wait_time(1.65e4) self.config.enable_auto_parallel_trunk_algo(auto_parallel_conf.trunk_algo) self.config.enable_auto_parallel_sbp_collector(auto_parallel_conf.sbp_collector) except RuntimeWarning: import warnings warnings.warn( "The version of oneflow don't support auto_parallel.\n" "Please reinstall the oneflow nightly:\n" "python3 -m pip install --pre oneflow -f https://staging.oneflow.info/branch/master/[PLATFORM]" # noqa ) def build(self, **kwargs): if self.is_train: placement_sbp_dict = ( dict( placement=flow.env.all_device_placement("cuda"), sbp=flow.sbp.split(0), ) if self.global_mode.enabled else {} ) with global_mode(self.global_mode.enabled, **placement_sbp_dict): logger.info( "Start compling the train graph which may take some time. " "Please wait for a moment ..." ) loss_dict = self.model(**kwargs) losses = sum(v for k, v in loss_dict.items() if "loss" in k) losses.backward() return loss_dict else: logger.info( "Start compiling the eval graph which may take some time. " "Please wait for a moment ..." ) return self.model(**kwargs) def set_activation_checkpoint(self): if hasattr(self.model, "origin"): if hasattr(type(self.model.origin), "set_activation_checkpoint"): type(self.model.origin).set_activation_checkpoint(self.model) else: for module_block in self.model.modules(): if isinstance(module_block.origin, TransformerLayer): module_block.config.activation_checkpointing = True else: if hasattr(type(self.model.to(nn.Module)), "set_activation_checkpoint"): type(self.model.to(nn.Module)).set_activation_checkpoint(self.model) else: for module_block in self.model.modules(): if isinstance(module_block.to(nn.Module), TransformerLayer): module_block.to(nn.graph.GraphModule).activation_checkpointing = True def set_pipeline_stage_id(self): if hasattr(self.model, "origin"): if hasattr(type(self.model.origin), "set_pipeline_stage_id"): type(self.model.origin).set_pipeline_stage_id(self.model) else: if hasattr(type(self.model.to(nn.Module)), "set_pipeline_stage_id"): type(self.model.to(nn.Module)).set_pipeline_stage_id(self.model)
6,034
38.966887
123
py
libai
libai-main/libai/models/utils/__init__.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .graph_base import GraphBase from .weight_init import init_method_normal, scaled_init_method_normal from .model_loader.base_loader import ModelLoaderHuggerFace, ModelLoaderLiBai from .model_loader.bert_loader import BertLoaderHuggerFace, BertLoaderLiBai from .model_loader.roberta_loader import RobertaLoaderHuggerFace, RobertaLoaderLiBai from .model_loader.gpt_loader import GPT2LoaderHuggerFace, GPT2LoaderLiBai from .model_loader.swin_loader import SwinLoaderHuggerFace, SwinLoaderLiBai from .model_loader.swinv2_loader import SwinV2LoaderHuggerFace, SwinV2LoaderLiBai from .model_loader.vit_loader import ViTLoaderHuggerFace, ViTLoaderLiBai
1,271
49.88
84
py
libai
libai-main/libai/models/utils/model_loader/gpt_loader.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from .base_loader import ModelLoaderHuggerFace, ModelLoaderLiBai class GPT2LoaderHuggerFace(ModelLoaderHuggerFace): def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs): super().__init__(model, libai_cfg, pretrained_model_path, **kwargs) """NOTE: base_model_prefix_1 is GPT's prefix in Transformers. base_model_prefix_2 is GPT's prefix in LiBai.""" self.base_model_prefix_1 = "transformer" self.base_model_prefix_2 = "GPT_model" def _convert_state_dict(self, flow_state_dict, cfg): """Convert state_dict's keys to match model. Args: flow_state_dict (OrderedDict): model state dict. cfg (dict): model's default config dict in LiBai. Returns: OrderedDict: flow state dict. """ # The converted checkpoint. oneflow_state_dict = flow_state_dict.copy() old_keys = list(oneflow_state_dict.keys()) # Get configs num_heads = cfg.get("num_attention_heads") hidden_size = cfg.get("hidden_size") head_size = int(hidden_size / num_heads) # prefix has_prefix = any(s.startswith(self.base_model_prefix_1) for s in oneflow_state_dict) prefix1 = self.base_model_prefix_1 + "." if has_prefix else "" prefix2 = "GPT_model.transformer." layer_idx = 2 if has_prefix else 1 # Convert Embedding layers. new_key = "GPT_model.embeddings.token_embeddings.weight" old_keys.remove(prefix1 + "wte.weight") oneflow_state_dict[new_key] = oneflow_state_dict.pop(prefix1 + "wte.weight") new_key = "GPT_model.embeddings.position_embeddings.weight" old_keys.remove(prefix1 + "wpe.weight") oneflow_state_dict[new_key] = oneflow_state_dict.pop(prefix1 + "wpe.weight") for key in old_keys: keys = key.split(".") if layer_idx >= len(keys): continue layer = keys[layer_idx] # Convert transformer layers. if "h." in key: if "ln_1" in key: if "weight" in key: new_key = prefix2 + "layers." + layer + ".input_layernorm.weight" else: new_key = prefix2 + "layers." + layer + ".input_layernorm.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "ln_2" in key: if "weight" in key: new_key = prefix2 + "layers." + layer + ".post_attention_layernorm.weight" else: new_key = prefix2 + "layers." + layer + ".post_attention_layernorm.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "attn" in key: if "c_attn" in key: if "weight" in key: new_key = ( prefix2 + "layers." + layer + ".self_attention.query_key_value.weight" ) else: new_key = ( prefix2 + "layers." + layer + ".self_attention.query_key_value.bias" ) qkv = oneflow_state_dict.pop(key) if qkv.ndim > 1: qkv = qkv.transpose(1, 0) qkv = self._fix_qkv_ordering(qkv, head_size, num_heads) oneflow_state_dict[new_key] = qkv elif "c_proj" in key: if "weight" in key: new_key = prefix2 + "layers." + layer + ".self_attention.dense.weight" elif "bias" in key: new_key = prefix2 + "layers." + layer + ".self_attention.dense.bias" value = oneflow_state_dict.pop(key) if value.ndim > 1: value = value.transpose(1, 0) oneflow_state_dict[new_key] = value elif "mlp" in key: if "c_fc" in key: if "weight" in key: new_key = prefix2 + "layers." + layer + ".mlp.dense_h_to_4h.weight" elif "bias" in key: new_key = prefix2 + "layers." + layer + ".mlp.dense_h_to_4h.bias" value = oneflow_state_dict.pop(key) if value.ndim > 1: value = value.transpose(1, 0) oneflow_state_dict[new_key] = value elif "c_proj" in key: if "weight" in key: new_key = prefix2 + "layers." + layer + ".mlp.dense_4h_to_h.weight" elif "bias" in key: new_key = prefix2 + "layers." + layer + ".mlp.dense_4h_to_h.bias" value = oneflow_state_dict.pop(key) if value.ndim > 1: value = value.transpose(1, 0) oneflow_state_dict[new_key] = value elif "ln_f" in key: if "weight" in key: new_key = prefix2 + "layernorm_f.weight" elif "bias" in key: new_key = prefix2 + "layernorm_f.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) return oneflow_state_dict def _load_config_from_json(self, config_file): """load config from `config.json`, and update default config. Args: config_file (str): Path of config file. """ with open(config_file, mode="r", encoding="utf-8") as f: cfg_dict = json.load(f) # update libai_cfg by config.json self._update_cfg("hidden_layers", cfg_dict["n_layer"]) self._update_cfg("hidden_size", cfg_dict["n_embd"]) self._update_cfg("num_attention_heads", cfg_dict["n_head"]) self._update_cfg("max_seq_length", cfg_dict["n_positions"]) self._update_cfg("embedding_dropout_prob", cfg_dict["embd_pdrop"]) self._update_cfg("attention_dropout_prob", cfg_dict["attn_pdrop"]) self._update_cfg("output_dropout_prob", cfg_dict["resid_pdrop"]) self._update_cfg("layernorm_epsilon", cfg_dict["layer_norm_epsilon"]) self._update_cfg("vocab_size", cfg_dict["vocab_size"]) self._update_cfg("initializer_range", cfg_dict["initializer_range"]) self._update_cfg( "ffn_hidden_size", cfg_dict.get("n_inner") if cfg_dict.get("n_inner") is not None else 4 * self.libai_cfg["hidden_size"], ) # update libai_cfg by kwargs for k, v in self.kwargs.items(): self._update_cfg(k, v) self._update_cfg_log() class GPT2LoaderLiBai(ModelLoaderLiBai): def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs): super().__init__(model, libai_cfg, pretrained_model_path, **kwargs) self.base_model_prefix_2 = "GPT_model"
7,937
44.36
100
py
libai
libai-main/libai/models/utils/model_loader/bert_loader.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import oneflow as flow from .base_loader import ModelLoaderHuggerFace, ModelLoaderLiBai class BertLoaderHuggerFace(ModelLoaderHuggerFace): def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs): super().__init__(model, libai_cfg, pretrained_model_path, **kwargs) """NOTE: base_model_prefix_1 is BERT's prefix in Transformers. base_model_prefix_2 is BERT's prefix in LiBai.""" self.base_model_prefix_1 = "bert" self.base_model_prefix_2 = "bert" def _convert_state_dict(self, flow_state_dict, cfg): """Convert state_dict's keys to match model. Args: flow_state_dict (OrderedDict): model state dict. cfg (dict): model's default config dict in LiBai. Returns: OrderedDict: flow state dict. """ # The converted checkpoint. oneflow_state_dict = flow_state_dict.copy() # Get configs num_heads = cfg.get("num_attention_heads") hidden_size = cfg.get("hidden_size") layers = cfg.get("hidden_layers") head_size = int(hidden_size / num_heads) # prefix has_prefix = any(s.startswith(self.base_model_prefix_1) for s in oneflow_state_dict) prefix = "bert." if has_prefix else "" index_idx = 3 if has_prefix else 2 qkv_idx = 6 if has_prefix else 5 old_keys = oneflow_state_dict.keys() for key in list(old_keys): # Convert bert's embedding layers if "embeddings" in key: if "word_embeddings" in key: new_key = key.replace("word_embeddings", "vocab_embeddings") oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "token_type_embeddings" in key: new_key = key.replace("token_type_embeddings", "tokentype_embeddings") oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "LayerNorm.weight" in key: new_key = prefix + "encoders.0.input_layernorm.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "LayerNorm.bias" in key: new_key = prefix + "encoders.0.input_layernorm.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) else: oneflow_state_dict[key] = oneflow_state_dict[key] # Convert bert's attention layers elif "attention" in key: if "self" in key: index = key.split(".")[index_idx] if ( prefix + "encoders." + index + ".self_attention.query_key_value.weight" in oneflow_state_dict.keys() ): continue q_w = key.replace(key.split(".")[qkv_idx], "query").replace( key.split(".")[qkv_idx + 1], "weight" ) k_w = q_w.replace("query", "key") v_w = q_w.replace("query", "value") q_b = q_w.replace("weight", "bias") k_b = k_w.replace("weight", "bias") v_b = v_w.replace("weight", "bias") qkv_w = flow.cat( ( oneflow_state_dict.pop(q_w), oneflow_state_dict.pop(k_w), oneflow_state_dict.pop(v_w), ), dim=0, ) qkv_b = flow.cat( ( oneflow_state_dict.pop(q_b), oneflow_state_dict.pop(k_b), oneflow_state_dict.pop(v_b), ), dim=-1, ) qkv_w = self._fix_qkv_ordering(qkv_w, head_size, num_heads) qkv_b = self._fix_qkv_ordering(qkv_b, head_size, num_heads) new_key = ( prefix + "encoders." + index + ".self_attention.query_key_value.weight" ) oneflow_state_dict[new_key] = qkv_w new_key = prefix + "encoders." + index + ".self_attention.query_key_value.bias" oneflow_state_dict[new_key] = qkv_b elif "output" in key: index = key.split(".")[index_idx] if "dense" in key: if "weight" in key: new_key = prefix + "encoders." + index + ".self_attention.dense.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "bias" in key: new_key = prefix + "encoders." + index + ".self_attention.dense.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "LayerNorm" in key: if "weight" in key: new_key = ( prefix + "encoders." + index + ".post_attention_layernorm.weight" ) oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "bias" in key: new_key = ( prefix + "encoders." + index + ".post_attention_layernorm.bias" ) oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) # Convert bert's intermediate layers elif "intermediate" in key: index = key.split(".")[index_idx] if ( prefix + "encoders." + index + ".mlp.dense_h_to_4h.weight" in oneflow_state_dict.keys() ): continue if "weight" in key: w = key b = key.replace("weight", "bias") new_key = prefix + "encoders." + index + ".mlp.dense_h_to_4h.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(w) new_key = new_key.replace("weight", "bias") oneflow_state_dict[new_key] = oneflow_state_dict.pop(b) # Convert bert's output layers elif "output" in key: index = key.split(".")[index_idx] if "dense.weight" in key: if ( prefix + "encoders." + index + ".mlp.dense_4h_to_h.weight" in oneflow_state_dict.keys() ): continue w = key b = w.replace("weight", "bias") new_key = prefix + "encoders." + index + ".mlp.dense_4h_to_h.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(w) new_key = new_key.replace("weight", "bias") oneflow_state_dict[new_key] = oneflow_state_dict.pop(b) elif "LayerNorm.weight" in key: if ( prefix + "encoders." + str(int(index) + 1) + ".input_layernorm.weight" in oneflow_state_dict.keys() ): continue w = key b = w.replace("weight", "bias") if index == str(layers - 1): new_key = prefix + "final_layernorm.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(w) new_key = new_key.replace("weight", "bias") oneflow_state_dict[new_key] = oneflow_state_dict.pop(b) continue new_key = prefix + "encoders." + str(int(index) + 1) + ".input_layernorm.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(w) new_key = new_key.replace("weight", "bias") oneflow_state_dict[new_key] = oneflow_state_dict.pop(b) # Convert bert's pooler layers elif "pooler" in key: if "weight" in key: new_key = prefix + "pooler.dense.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "bias" in key: new_key = prefix + "pooler.dense.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) # Convert cls_head layers elif "cls" in key: if "predictions.bias" in key: new_key = "cls_head.lm_logits.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "dense.weight" in key: new_key = "cls_head.predictions.dense.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "dense.bias" in key: new_key = "cls_head.predictions.dense.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "LayerNorm.weight" in key: new_key = "cls_head.predictions.layernorm.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "LayerNorm.bias" in key: new_key = "cls_head.predictions.layernorm.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "seq_relationship" in key: new_key = key.replace("cls", "cls_head") oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) else: oneflow_state_dict[key] = oneflow_state_dict.pop(key) return oneflow_state_dict def _load_config_from_json(self, config_file): """load config from `config.json`, and update default config. Args: config_file (str): Path of config file. """ with open(config_file, mode="r", encoding="utf-8") as f: cfg_dict = json.load(f) # update libai_cfg by config.json self._update_cfg("vocab_size", cfg_dict["vocab_size"]) self._update_cfg("hidden_size", cfg_dict["hidden_size"]) self._update_cfg("hidden_layers", cfg_dict["num_hidden_layers"]) self._update_cfg("num_attention_heads", cfg_dict["num_attention_heads"]) self._update_cfg("intermediate_size", cfg_dict["intermediate_size"]) self._update_cfg("hidden_dropout_prob", cfg_dict["hidden_dropout_prob"]) self._update_cfg("attention_probs_dropout_prob", cfg_dict["attention_probs_dropout_prob"]) self._update_cfg("max_position_embeddings", cfg_dict["max_position_embeddings"]) self._update_cfg("num_tokentypes", cfg_dict["type_vocab_size"]) self._update_cfg("initializer_range", cfg_dict["initializer_range"]) self._update_cfg("layernorm_eps", cfg_dict["layer_norm_eps"]) # update libai_cfg by kwargs for k, v in self.kwargs.items(): self._update_cfg(k, v) # use original BERT residual connection ordering self.libai_cfg.apply_residual_post_layernorm = True self._update_cfg_log() class BertLoaderLiBai(ModelLoaderLiBai): def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs): super().__init__(model, libai_cfg, pretrained_model_path, **kwargs) self.base_model_prefix_2 = "bert"
12,335
45.727273
100
py
libai
libai-main/libai/models/utils/model_loader/swin_loader.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import oneflow as flow from .base_loader import ModelLoaderHuggerFace, ModelLoaderLiBai class SwinLoaderHuggerFace(ModelLoaderHuggerFace): def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs): super().__init__(model, libai_cfg, pretrained_model_path, **kwargs) """NOTE: base_model_prefix_1 is SWIN's prefix in Transformers. base_model_prefix_2 is SWIN's prefix in LiBai.""" self.base_model_prefix_1 = "swin" self.base_model_prefix_2 = "" def _convert_state_dict(self, flow_state_dict, cfg=None): """Convert state_dict's keys to match model. Args: flow_state_dict (OrderedDict): model state dict. cfg (dict): model's default config dict. Returns: OrderedDict: flow state dict. """ # The converted checkpoint. oneflow_state_dict = flow_state_dict.copy() # prefix has_prefix = any(s.startswith(self.base_model_prefix_1) for s in oneflow_state_dict) index_idx_1 = 3 if has_prefix else 2 index_idx_2 = 5 if has_prefix else 4 old_keys = oneflow_state_dict.keys() for key in list(old_keys): # Convert swin's embedding layers if "embeddings" in key: if "patch_embeddings.projection" in key: if "weight" in key: new_key = "patch_embed.proj.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "bias" in key: new_key = "patch_embed.proj.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "norm" in key: if "weight" in key: new_key = "patch_embed.norm.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "bias" in key: new_key = "patch_embed.norm.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) # Convert swin's layernorm layers elif "layernorm_before" in key: index_layer = key.split(".")[index_idx_1] index_block = key.split(".")[index_idx_2] if "weight" in key: new_key = "layers." + index_layer + ".blocks." + index_block + ".norm1.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "bias" in key: new_key = "layers." + index_layer + ".blocks." + index_block + ".norm1.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "layernorm_after" in key: index_layer = key.split(".")[index_idx_1] index_block = key.split(".")[index_idx_2] if "weight" in key: new_key = "layers." + index_layer + ".blocks." + index_block + ".norm2.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "bias" in key: new_key = "layers." + index_layer + ".blocks." + index_block + ".norm2.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) # Convert swin's attention layers elif "attention" in key: index_layer = key.split(".")[index_idx_1] index_block = key.split(".")[index_idx_2] if "self" in key: if ( "relative_position_bias_table" in key ): # convert relative_position_bias_table but not index new_key = ( "layers." + index_layer + ".blocks." + index_block + ".attn.relative_position_bias_table" ) oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "relative_position_index" in key: new_key = ( "layers." + index_layer + ".blocks." + index_block + ".attn.relative_position_index" ) oneflow_state_dict.pop(key) else: if ( "layers." + index_layer + ".blocks." + index_block + ".attn.qkv.weight" in oneflow_state_dict.keys() ): continue q_w = key k_w = q_w.replace("query", "key") v_w = q_w.replace("query", "value") q_b = q_w.replace("weight", "bias") k_b = k_w.replace("weight", "bias") v_b = v_w.replace("weight", "bias") qkv_w = flow.cat( ( oneflow_state_dict.pop(q_w), oneflow_state_dict.pop(k_w), oneflow_state_dict.pop(v_w), ), dim=0, ) qkv_b = flow.cat( ( oneflow_state_dict.pop(q_b), oneflow_state_dict.pop(k_b), oneflow_state_dict.pop(v_b), ), dim=-1, ) new_key = ( "layers." + index_layer + ".blocks." + index_block + ".attn.qkv.weight" ) oneflow_state_dict[new_key] = qkv_w new_key = new_key.replace("weight", "bias") oneflow_state_dict[new_key] = qkv_b elif "output" in key: if "dense" in key: if "weight" in key: new_key = ( "layers." + index_layer + ".blocks." + index_block + ".attn.proj.weight" ) oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) if "bias" in key: new_key = ( "layers." + index_layer + ".blocks." + index_block + ".attn.proj.bias" ) oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "intermediate" in key: index_layer = key.split(".")[index_idx_1] index_block = key.split(".")[index_idx_2] if "weight" in key: if ( "layers." + index_layer + ".blocks." + index_block + ".mlp.dense_h_to_4h.weight" in oneflow_state_dict.keys() ): continue w = key b = key.replace("weight", "bias") new_key = ( "layers." + index_layer + ".blocks." + index_block + ".mlp.dense_h_to_4h.weight" ) oneflow_state_dict[new_key] = oneflow_state_dict.pop(w) new_key = new_key.replace("weight", "bias") oneflow_state_dict[new_key] = oneflow_state_dict.pop(b) elif "output" in key: index_layer = key.split(".")[index_idx_1] index_block = key.split(".")[index_idx_2] if "dense.weight" in key: if ( "layers." + index_layer + ".blocks." + index_block + ".mlp.dense_4h_to_h.weight" in oneflow_state_dict.keys() ): continue w = key b = w.replace("weight", "bias") new_key = ( "layers." + index_layer + ".blocks." + index_block + ".mlp.dense_4h_to_h.weight" ) oneflow_state_dict[new_key] = oneflow_state_dict.pop(w) new_key = new_key.replace("weight", "bias") oneflow_state_dict[new_key] = oneflow_state_dict.pop(b) elif "downsample" in key: index_layer = key.split(".")[index_idx_1] if "reduction.weight" in key: new_key = "layers." + index_layer + ".downsample.reduction.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "norm" in key: if ( "layers." + index_layer + ".downsample.norm.weight" in oneflow_state_dict.keys() ): continue w = key b = w.replace("weight", "bias") new_key = "layers." + index_layer + ".downsample.norm.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(w) new_key = new_key.replace("weight", "bias") oneflow_state_dict[new_key] = oneflow_state_dict.pop(b) elif "layernorm" in key: if "weight" in key: new_key = "norm.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "bias" in key: new_key = "norm.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "classifier" in key: if "weight" in key: new_key = "head.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "bias" in key: new_key = "head.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) else: oneflow_state_dict[key] = oneflow_state_dict.pop(key) return oneflow_state_dict def _load_config_from_json(self, config_file): """load config from `config.json`, and update default config. Args: config_file (str): Path of config file. """ with open(config_file, mode="r", encoding="utf-8") as f: cfg_dict = json.load(f) # update libai_cfg by config.json self._update_cfg("img_size", cfg_dict["image_size"]) self._update_cfg("patch_size", cfg_dict["patch_size"]) self._update_cfg("embed_dim", cfg_dict["embed_dim"]) self._update_cfg("depths", cfg_dict["depths"]) self._update_cfg("num_heads", cfg_dict["num_heads"]) self._update_cfg("window_size", cfg_dict["window_size"]) self._update_cfg("mlp_ratio", cfg_dict["mlp_ratio"]) self._update_cfg("qkv_bias", cfg_dict["qkv_bias"]) self._update_cfg("drop_path_rate", cfg_dict["drop_path_rate"]) # update libai_cfg by kwargs for k, v in self.kwargs.items(): self._update_cfg(k, v) self._update_cfg_log() class SwinLoaderLiBai(ModelLoaderLiBai): def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs): super().__init__(model, libai_cfg, pretrained_model_path, **kwargs) self.base_model_prefix_2 = ""
12,908
42.173913
99
py
libai
libai-main/libai/models/utils/model_loader/vit_loader.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import oneflow as flow from .base_loader import ModelLoaderHuggerFace, ModelLoaderLiBai class ViTLoaderHuggerFace(ModelLoaderHuggerFace): def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs): super().__init__(model, libai_cfg, pretrained_model_path, **kwargs) """NOTE: base_model_prefix_1 is ViT's prefix in Transformers. base_model_prefix_2 is ViT's prefix in LiBai.""" self.base_model_prefix_1 = "vit" self.base_model_prefix_2 = "" def _convert_state_dict(self, flow_state_dict, cfg=None): """Convert state_dict's keys to match model. Args: flow_state_dict (OrderedDict): model state dict. cfg (dict): model's default config dict. Returns: OrderedDict: flow state dict. """ # The converted checkpoint. oneflow_state_dict = flow_state_dict.copy() # Get configs num_heads = cfg.get("num_heads") hidden_size = cfg.get("embed_dim") head_size = int(hidden_size / num_heads) # prefix has_prefix = any(s.startswith(self.base_model_prefix_1) for s in oneflow_state_dict) index_idx = 3 if has_prefix else 2 old_keys = oneflow_state_dict.keys() for key in list(old_keys): # Convert vit's embedding layers if "embeddings" in key: if "cls_token" in key: new_key = "cls_token" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "position_embeddings" in key: new_key = "pos_embed" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "patch_embeddings.projection" in key: if "weight" in key: new_key = "patch_embed.proj.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "bias" in key: new_key = "patch_embed.proj.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) # Convert vit's layernorm layers elif "layernorm_before" in key: index_block = key.split(".")[index_idx] if "weight" in key: new_key = "blocks." + index_block + ".input_layernorm.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "bias" in key: new_key = "blocks." + index_block + ".input_layernorm.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "layernorm_after" in key: index_block = key.split(".")[index_idx] if "weight" in key: new_key = "blocks." + index_block + ".post_attention_layernorm.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "bias" in key: new_key = "blocks." + index_block + ".post_attention_layernorm.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) # Convert vit's attention layers elif "attention" in key: index_block = key.split(".")[index_idx] if "attention.attention" in key: if ( "blocks." + index_block + ".self_attention.query_key_value.weight" in oneflow_state_dict.keys() ): continue q_w = key k_w = q_w.replace("query", "key") v_w = q_w.replace("query", "value") q_b = q_w.replace("weight", "bias") k_b = k_w.replace("weight", "bias") v_b = v_w.replace("weight", "bias") qkv_w = flow.cat( ( oneflow_state_dict.pop(q_w), oneflow_state_dict.pop(k_w), oneflow_state_dict.pop(v_w), ), dim=0, ) qkv_b = flow.cat( ( oneflow_state_dict.pop(q_b), oneflow_state_dict.pop(k_b), oneflow_state_dict.pop(v_b), ), dim=-1, ) qkv_w = self._fix_qkv_ordering(qkv_w, head_size, num_heads) qkv_b = self._fix_qkv_ordering(qkv_b, head_size, num_heads) new_key = "blocks." + index_block + ".self_attention.query_key_value.weight" oneflow_state_dict[new_key] = qkv_w new_key = new_key.replace("weight", "bias") oneflow_state_dict[new_key] = qkv_b elif "output" in key: if "dense" in key: if "weight" in key: new_key = "blocks." + index_block + ".self_attention.dense.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) if "bias" in key: new_key = "blocks." + index_block + ".self_attention.dense.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "intermediate" in key: index_block = key.split(".")[index_idx] if "weight" in key: if ( "blocks." + index_block + ".mlp.dense_h_to_4h.weight" in oneflow_state_dict.keys() ): continue w = key b = key.replace("weight", "bias") new_key = "blocks." + index_block + ".mlp.dense_h_to_4h.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(w) new_key = new_key.replace("weight", "bias") oneflow_state_dict[new_key] = oneflow_state_dict.pop(b) elif "output" in key: index_block = key.split(".")[index_idx] if "dense.weight" in key: if ( "blocks." + index_block + ".mlp.dense_4h_to_h.weight" in oneflow_state_dict.keys() ): continue w = key b = w.replace("weight", "bias") new_key = "blocks." + index_block + ".mlp.dense_4h_to_h.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(w) new_key = new_key.replace("weight", "bias") oneflow_state_dict[new_key] = oneflow_state_dict.pop(b) elif "layernorm" in key: if "weight" in key: new_key = "norm.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "bias" in key: new_key = "norm.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "classifier" in key: if "weight" in key: new_key = "head.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "bias" in key: new_key = "head.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) else: oneflow_state_dict[key] = oneflow_state_dict.pop(key) return oneflow_state_dict def _load_config_from_json(self, config_file): """load config from `config.json`, and update default config. Args: config_file (str): Path of config file. """ with open(config_file, mode="r", encoding="utf-8") as f: cfg_dict = json.load(f) # update libai_cfg by config.json self._update_cfg("img_size", cfg_dict["image_size"]) self._update_cfg("patch_size", cfg_dict["patch_size"]) self._update_cfg("in_chans", cfg_dict["num_channels"]) self._update_cfg("embed_dim", cfg_dict["hidden_size"]) self._update_cfg("depth", cfg_dict["num_hidden_layers"]) self._update_cfg("num_heads", cfg_dict["num_attention_heads"]) self._update_cfg("attn_drop_rate", cfg_dict["attention_probs_dropout_prob"]) self._update_cfg("drop_rate", cfg_dict["hidden_dropout_prob"]) # update libai_cfg by kwargs for k, v in self.kwargs.items(): self._update_cfg(k, v) self._update_cfg_log() class ViTLoaderLiBai(ModelLoaderLiBai): def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs): super().__init__(model, libai_cfg, pretrained_model_path, **kwargs) self.base_model_prefix_2 = ""
9,666
41.774336
96
py
libai
libai-main/libai/models/utils/model_loader/base_loader.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import copy import logging import os import omegaconf import oneflow as flow from termcolor import colored import libai.utils.distributed as dist from libai.config import LazyCall from libai.models.build import build_model logger = logging.getLogger(__name__) WEIGHTS_NAME_PT = "pytorch_model.bin" CONFIG_NAME = "config.json" def _load_state_dict_into_model(model_to_load, state_dict, start_prefix): """load state dict into model Args: model_to_load (nn.Module): Model to be loaded. state_dict (OrderedDict): State dict of pretrained model. start_prefix (str): Start prefix. Returns: list: error message about loading. """ metadata = getattr(state_dict, "_metadata", None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata error_msgs = [] def load(module, prefix=""): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) args = (state_dict, prefix, local_metadata, True, [], [], error_msgs) module._load_from_state_dict(*args) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + ".") load(model_to_load, prefix=start_prefix) return error_msgs class ModelLoader(object): def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs): """Class used to load the [`transformers`](https://huggingface.co/models) pretrained model or `OneFlow` pretrained model. Args: model (libai.models): Model to be loaded in Libai. libai_cfg (dict): The config of model in LiBai, you can import it from `libai.config.configs.common.models`. pretrained_model_path (str): The directory path of pretrained model, which contains model weights file and config file. output_loading_info (`bool`, *optional*, defaults to `False`): Whether to return a dictionary containing missing keys, unexpected keys and error messages. """ self.model = model self.libai_cfg = libai_cfg self.pretrained_model_path = pretrained_model_path self.kwargs = kwargs self.output_loading_info = kwargs.pop("output_loading_info", False) def _state_dict_to_global(self, flow_state_dict=None, mode="libai"): """Tensor in OneFlow state dict to global according to model's sbp and placement. Args: flow_state_dict (OrderedDict): State dict of OneFlow's pretrained model. """ assert mode in ["libai", "pytorch"], f"not support for mode {mode}" if mode == "libai" or dist.is_main_process(): prefix = self.base_model_prefix_2 # Checkpoint has_prefix_module = any( s.startswith(self.base_model_prefix_2) for s in flow_state_dict.keys() ) # Module expects_prefix_module = any( s.startswith(prefix) for s in self.model.state_dict().keys() ) start_prefix = "" if has_prefix_module else prefix + "." loaded_keys = [start_prefix + key for key in flow_state_dict.keys()] else: prefix, has_prefix_module, expects_prefix_module, loaded_keys = [None] * 4 flow_state_dict = collections.OrderedDict() prefix = dist.broadcast_py_object(prefix, src=0) has_prefix_module = dist.broadcast_py_object(has_prefix_module, src=0) expects_prefix_module = dist.broadcast_py_object(expects_prefix_module, src=0) loaded_keys = dist.broadcast_py_object(loaded_keys, src=0) # to global for key, value in self.model.state_dict().items(): if not expects_prefix_module: key = prefix + "." + key if key in loaded_keys: if not has_prefix_module: key = ".".join(key.split(".")[1:]) if mode == "pytorch": flow_state_dict[key] = flow.to_global( flow_state_dict[key] if dist.is_main_process() else flow.Tensor(None), sbp=flow.sbp.broadcast, placement=flow.placement("cpu", ranks=[0]), ) flow_state_dict[key] = flow.to_global( flow_state_dict[key], sbp=value.sbp, placement=flow.placement("cpu", ranks=list(value.placement.ranks)), ) return flow_state_dict def _load_pretrained_model( self, model, state_dict, pretrained_model_path, ignore_mismatched_sizes=False, ): """Load pretrained model. Args: model (libai.models): The model to be loaded. state_dict (OrderedDict): state dict. loaded_keys (list): keys of state dict. pretrained_model_path (str): pretrained modelE path. ignore_mismatched_sizes (bool): Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model, defaults to `False`. """ model_state_dict = model.state_dict() expected_keys = list(model_state_dict.keys()) prefix = self.base_model_prefix_2 loaded_keys = state_dict.keys() if len(prefix) > 0: has_prefix_module = any(s.startswith(prefix) for s in loaded_keys) expects_prefix_module = any(s.startswith(prefix) for s in expected_keys) else: has_prefix_module = False expects_prefix_module = False remove_prefix_from_model = not has_prefix_module and expects_prefix_module add_prefix_to_model = has_prefix_module and not expects_prefix_module if remove_prefix_from_model: expected_keys_not_prefixed = [s for s in expected_keys if not s.startswith(prefix)] expected_keys = [ ".".join(s.split(".")[1:]) if s.startswith(prefix) else s for s in expected_keys ] elif add_prefix_to_model: expected_keys = [".".join([prefix, s]) for s in expected_keys] missing_keys = list(set(expected_keys) - set(loaded_keys)) unexpected_keys = list(set(loaded_keys) - set(expected_keys)) start_prefix = "" model_to_load = model if ( len(self.base_model_prefix_2) > 0 and not hasattr(model, self.base_model_prefix_2) and has_prefix_module ): start_prefix = self.base_model_prefix_2 + "." if ( len(self.base_model_prefix_2) > 0 and hasattr(model, self.base_model_prefix_2) and not has_prefix_module ): model_to_load = getattr(model, self.base_model_prefix_2) if any(key in expected_keys_not_prefixed for key in loaded_keys): raise ValueError("The state dict of the model you are loading is corrupted.") def _find_mismatched_keys( state_dict, model_state_dict, loaded_keys, add_prefix_to_model, remove_prefix_from_model, ignore_mismatched_sizes, ): mismatched_keys = [] if ignore_mismatched_sizes: for checkpoint_key in loaded_keys: model_key = checkpoint_key if remove_prefix_from_model: model_key = f"{prefix}.{checkpoint_key}" elif add_prefix_to_model: model_key = ".".join(checkpoint_key.split(".")[1:]) if ( model_key in model_state_dict and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape ): mismatched_keys.append( ( checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape, ) ) del state_dict[checkpoint_key] return mismatched_keys if state_dict is not None: mismatched_keys = _find_mismatched_keys( state_dict, model_state_dict, loaded_keys, add_prefix_to_model, remove_prefix_from_model, ignore_mismatched_sizes, ) error_msgs = _load_state_dict_into_model(model_to_load, state_dict, start_prefix) if dist.get_local_rank() == 0: if len(error_msgs) > 0: error_msg = "\n\t".join(error_msgs) raise RuntimeError( f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}" ) if len(unexpected_keys) > 0: logger.warning( f"Some weights of the model checkpoint at {pretrained_model_path} " "were not used when " f"initializing {model.__class__.__name__}:\n {unexpected_keys}\n" ) else: logger.info( f"All model checkpoint weights were used when initializing " f"{model.__class__.__name__}.\n" ) if len(missing_keys) > 0: logger.warning( f"Some weights of {model.__class__.__name__} were not initialized " f"from the model checkpoint at {pretrained_model_path}:\n " f"{missing_keys} \n" ) elif len(mismatched_keys) == 0: logger.info( f"All the weights of {model.__class__.__name__} were initialized " f"from the model checkpoint at {pretrained_model_path}.\n" ) if len(mismatched_keys) > 0: mismatched_warning = "\n".join( [ f"- {key}: found shape {shape1} in the checkpoint and {shape2}" "in the model instantiated" for key, shape1, shape2 in mismatched_keys ] ) logger.warning( f"Some weights of {model.__class__.__name__} were not initialized" f"from the model checkpoint at {pretrained_model_path} " f"and are newly initialized because the shapes did not" f"match:\n{mismatched_warning}\n" ) return model, missing_keys, unexpected_keys, mismatched_keys, error_msgs class ModelLoaderLiBai(ModelLoader): """Class used to load `OneFlow` pretrained model. Args: model (libai.models): Model to be loaded in Libai. libai_cfg (dict): The config of model in LiBai, you can import it from `libai.config.configs.common.models`. pretrained_model_path (str): The file path of pretrained model. output_loading_info (`bool`, *optional*, defaults to `False`): Whether to return a dictionary containing missing keys, unexpected keys and error messages. """ def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs): super().__init__(model, libai_cfg, pretrained_model_path, **kwargs) self.base_model_prefix_2 = None # prefix in LiBai def _load_flow_state_dict(self, state_dict_file): # load oneflow_model state_dict = flow.load(state_dict_file, global_src_rank=0) return state_dict def load(self): """Load model. # For example: # .. code-block:: python >>> import libai >>> from libai.config.configs.common.models.bert import cfg >>> from model_loader import BertLoaderLiBai >>> loder = BertLoaderLiBai( libai.models.BertModel, cfg, 'path/bert-base-chinese' ) >>> bert = loder.load() """ flow_state_dict = self._load_flow_state_dict(self.pretrained_model_path) # Instance model if isinstance(self.model, omegaconf.dictconfig.DictConfig): self.model.cfg = self.libai_cfg self.model = build_model(self.model) else: self.model = build_model(LazyCall(self.model)(cfg=self.libai_cfg)) # State_dict to global self._state_dict_to_global(flow_state_dict, mode="libai") # Load ( model, missing_keys, unexpected_keys, mismatched_keys, error_msgs, ) = self._load_pretrained_model(self.model, flow_state_dict, self.pretrained_model_path) if self.output_loading_info: loading_info = { "missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "mismatched_keys": mismatched_keys, "error_msgs": error_msgs, } return model, loading_info return model class ModelLoaderHuggerFace(ModelLoader): """Class used to load the [`transformers`](https://huggingface.co/models) pretrained model. """ def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs): super().__init__(model, libai_cfg, pretrained_model_path, **kwargs) self.base_model_prefix_1 = None # prefix in Transformers self.base_model_prefix_2 = None # prefix in LiBai self.origin_libai_cfg = copy.deepcopy(self.libai_cfg) self.changed_keys = set() # Store the changed configuration def _convert_tensor(self, tensor): """Convert PyTorch tensor to OneFlow tensor. Args: tensor (torch.Tensor): The source tensor. Returns: flow.Tensor: The target tensor. """ tensor = tensor.float() return flow.Tensor(tensor.detach().cpu().numpy()) def _convert_tensors(self, torch_state_dict): for k, v in torch_state_dict.items(): torch_state_dict[k] = self._convert_tensor(v) return torch_state_dict def _fix_key(self, state_dict): """Fix the key in state dict: Convert "gamma" to "weight" and "beta" to "bias". Args: state_dict (OrderedDict): state dict of pretrained model. Returns: OrderedDict: State dict after fix key. """ old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if "gamma" in key: new_key = key.replace("gamma", "weight") if "beta" in key: new_key = key.replace("beta", "bias") if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) return state_dict def _fix_qkv_ordering( self, qkv, head_size, num_heads, hidden_size=None, checkpoint_version=0.0 ): # TODO(xzp): Different versions checkpoint hidden_size = (head_size * num_heads) if hidden_size is None else hidden_size num_of_qkv = qkv.shape[0] // (head_size * num_heads) mode = "weight" if qkv.ndim > 1 else "bias" if mode == "weight": qkv = qkv.view([num_of_qkv, num_heads, head_size, hidden_size]) qkv = ( qkv.permute(1, 0, 2, 3) .contiguous() .view(num_of_qkv * head_size * num_heads, hidden_size) ) elif mode == "bias": qkv = qkv.view(num_of_qkv, num_heads, head_size) qkv = qkv.permute(1, 0, 2).contiguous().view(-1) return qkv def _convert_state_dict(self, flow_state_dict, cfg): """A function used to convert the checkpoint file of Huggingface to LiBai. Args: torch_state_dict (OrderedDict): torch state dict. cfg (dict): model's default config dict in LiBai. Returns: OrderedDict: flow state dict. """ raise NotImplementedError("_convert_state_dict not implemented") def _load_config_from_json(self, config_file): """load config from `config.json`, and update default config. Args: config_file (str): Path of config file. """ raise NotImplementedError("_load_config_from_json not implemented") def _load_torch_state_dict(self, state_dict_file): try: import torch except ImportError: raise ImportError("Load torch state dict need torch.") # load pytorch_model.bin state_dict = torch.load(state_dict_file, map_location="cpu") return state_dict def _update_cfg(self, keys_libai, value_target): """Update the libai_cfg according to target_cfg. Args: keys_libai (str): The key of libai_cfg. value_target (int | float): The value of target_cfg. """ if keys_libai not in self.libai_cfg.keys(): return if self.libai_cfg[keys_libai] != value_target: self.libai_cfg[keys_libai] = value_target def _update_cfg_log(self): if dist.get_local_rank() == 0: for key in sorted(self.libai_cfg): if self.origin_libai_cfg[key] == self.libai_cfg[key]: continue self.changed_keys.add(key) temp_key = colored(key, "yellow") logger.info( f"changed libai model cfg {temp_key} : " f"{self.origin_libai_cfg[key]} -> {self.libai_cfg[key]} " ) logger.warning( "The following model configurations has been modified according " "to `config.json` or kwargs: \n" f"{self.changed_keys} \n" ) if dist.get_pipeline_parallel_size() > 1: logger.warning( colored( "If you use pipeline parallel, please " "confirm the setting of `train.dist.pipeline_num_layers` \n", "red", ) ) def load(self): """Load model. # For example: # .. code-block:: python >>> import libai >>> from configs.common.models.bert import cfg >>> from libai.models.utils import BertLoaderHuggerFace >>> loader = BertLoaderHuggerFace( libai.models.BertModel, cfg, 'path/bert-base-chinese' ) >>> bert = loader.load() """ if dist.is_main_process(): if os.path.isdir(self.pretrained_model_path): # state_dict file pytorch if os.path.isfile(os.path.join(self.pretrained_model_path, WEIGHTS_NAME_PT)): model_file = os.path.join(self.pretrained_model_path, WEIGHTS_NAME_PT) else: raise EnvironmentError( f"Error no file named {WEIGHTS_NAME_PT} found" f"in directory {self.pretrained_model_path}." ) # config file if os.path.isfile(os.path.join(self.pretrained_model_path, CONFIG_NAME)): config_file = os.path.join(self.pretrained_model_path, CONFIG_NAME) # Load config and update config. self._load_config_from_json(config_file) else: import warnings warnings.warn( f"Error no file named {CONFIG_NAME} found in directory" f"{self.pretrained_model_path}", RuntimeWarning, ) else: raise EnvironmentError(f"{self.pretrained_model_path} is not a directory.") logger.info("loading torch model...") torch_state_dict = self._load_torch_state_dict(model_file) torch_state_dict = self._fix_key(torch_state_dict) logger.info("transfering torch model into oneflow model...") flow_state_dict = self._convert_tensors(torch_state_dict) flow_state_dict = self._convert_state_dict(torch_state_dict, self.libai_cfg) else: flow_state_dict = None self.libai_cfg = dist.broadcast_py_object(self.libai_cfg, src=0) # Instance model logger.info("building LiBai model...") if isinstance(self.model, omegaconf.dictconfig.DictConfig): self.model.cfg = self.libai_cfg self.model = build_model(self.model) else: self.model = build_model(LazyCall(self.model)(cfg=self.libai_cfg)) # State_dict to global logger.info("transfering state_dict local to global...") flow_state_dict = self._state_dict_to_global(flow_state_dict, mode="pytorch") logger.info("loading model weights into LiBai...") # Load ( model, missing_keys, unexpected_keys, mismatched_keys, error_msgs, ) = self._load_pretrained_model(self.model, flow_state_dict, self.pretrained_model_path) if self.output_loading_info: loading_info = { "missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "mismatched_keys": mismatched_keys, "error_msgs": error_msgs, } return model, loading_info return model
22,702
36.964883
100
py
libai
libai-main/libai/models/utils/model_loader/swinv2_loader.py
# coding=utf-8 # Copyright 2021 The OneFlow Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import oneflow as flow from .base_loader import ModelLoaderHuggerFace, ModelLoaderLiBai class SwinV2LoaderHuggerFace(ModelLoaderHuggerFace): def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs): super().__init__(model, libai_cfg, pretrained_model_path, **kwargs) """NOTE: base_model_prefix_1 is SWINV2's prefix in Transformers. base_model_prefix_2 is SWINV2's prefix in LiBai.""" self.base_model_prefix_1 = "swinv2" self.base_model_prefix_2 = "" def _convert_state_dict(self, flow_state_dict, cfg=None): """Convert state_dict's keys to match model. Args: flow_state_dict (OrderedDict): model state dict. cfg (dict): model's default config dict. Returns: OrderedDict: flow state dict. """ # The converted checkpoint. oneflow_state_dict = flow_state_dict.copy() # prefix has_prefix = any(s.startswith(self.base_model_prefix_1) for s in oneflow_state_dict) index_idx_1 = 3 if has_prefix else 2 index_idx_2 = 5 if has_prefix else 4 old_keys = oneflow_state_dict.keys() for key in list(old_keys): # Convert swinv2's embedding layers if "embeddings" in key: if "patch_embeddings.projection" in key: if "weight" in key: new_key = "patch_embed.proj.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) if "bias" in key: new_key = "patch_embed.proj.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "norm" in key: if "weight" in key: new_key = "patch_embed.norm.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) if "bias" in key: new_key = "patch_embed.norm.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) # Convert swinv2's layernorm layers elif "layernorm_before" in key: index_layer = key.split(".")[index_idx_1] index_block = key.split(".")[index_idx_2] if "weight" in key: new_key = "layers." + index_layer + ".blocks." + index_block + ".norm1.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "bias" in key: new_key = "layers." + index_layer + ".blocks." + index_block + ".norm1.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "layernorm_after" in key: index_layer = key.split(".")[index_idx_1] index_block = key.split(".")[index_idx_2] if "weight" in key: new_key = "layers." + index_layer + ".blocks." + index_block + ".norm2.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "bias" in key: new_key = "layers." + index_layer + ".blocks." + index_block + ".norm2.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) # Convert swinv2's attention layers elif "attention" in key: index_layer = key.split(".")[index_idx_1] index_block = key.split(".")[index_idx_2] if "self" in key: if ( "relative_position_bias_table" in key ): # convert relative_position_bias_table but not index new_key = ( "layers." + index_layer + ".blocks." + index_block + ".attn.relative_position_bias_table" ) oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "relative_position_index" in key: new_key = ( "layers." + index_layer + ".blocks." + index_block + ".attn.relative_position_index" ) oneflow_state_dict.pop(key) elif "continuous_position_bias_mlp" in key: if ( "layers." + index_layer + ".blocks." + index_block + ".attn.cpb_mlp" + ".0.weight" ) in oneflow_state_dict.keys(): continue new_key = ( "layers." + index_layer + ".blocks." + index_block + ".attn.cpb_mlp" ) m_1_w = key m_1_b = key.replace(".0.weight", ".0.bias") m_2_w = key.replace(".0.weight", ".2.weight") oneflow_state_dict[new_key + ".0.weight"] = oneflow_state_dict.pop(m_1_w) oneflow_state_dict[new_key + ".0.bias"] = oneflow_state_dict.pop(m_1_b) oneflow_state_dict[new_key + ".2.weight"] = oneflow_state_dict.pop(m_2_w) elif "logit_scale" in key: new_key = ( "layers." + index_layer + ".blocks." + index_block + ".attn.logit_scale" ) oneflow_state_dict[new_key] = oneflow_state_dict.pop(key)[None, ...] else: if ( "layers." + index_layer + ".blocks." + index_block + ".attn.qkv.weight" in oneflow_state_dict.keys() ): continue q_w = key k_w = q_w.replace("query", "key") v_w = q_w.replace("query", "value") q_b = q_w.replace("weight", "bias") v_b = v_w.replace("weight", "bias") qkv_w = flow.cat( ( oneflow_state_dict.pop(q_w), oneflow_state_dict.pop(k_w), oneflow_state_dict.pop(v_w), ), dim=0, ) new_key = ( "layers." + index_layer + ".blocks." + index_block + ".attn.qkv.weight" ) oneflow_state_dict[new_key] = qkv_w new_key = ( "layers." + index_layer + ".blocks." + index_block + ".attn.q_bias" ) oneflow_state_dict[new_key] = oneflow_state_dict.pop(q_b) new_key = new_key.replace("q_bias", "v_bias") oneflow_state_dict[new_key] = oneflow_state_dict.pop(v_b) elif "output" in key: if "dense" in key: if "weight" in key: new_key = ( "layers." + index_layer + ".blocks." + index_block + ".attn.proj.weight" ) oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) if "bias" in key: new_key = ( "layers." + index_layer + ".blocks." + index_block + ".attn.proj.bias" ) oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "intermediate" in key: index_layer = key.split(".")[index_idx_1] index_block = key.split(".")[index_idx_2] if "weight" in key: if ( "layers." + index_layer + ".blocks." + index_block + ".mlp.dense_h_to_4h.weight" in oneflow_state_dict.keys() ): continue w = key b = key.replace("weight", "bias") new_key = ( "layers." + index_layer + ".blocks." + index_block + ".mlp.dense_h_to_4h.weight" ) oneflow_state_dict[new_key] = oneflow_state_dict.pop(w) new_key = new_key.replace("weight", "bias") oneflow_state_dict[new_key] = oneflow_state_dict.pop(b) elif "output" in key: index_layer = key.split(".")[index_idx_1] index_block = key.split(".")[index_idx_2] if "dense.weight" in key: if ( "layers." + index_layer + ".blocks." + index_block + ".mlp.dense_4h_to_h.weight" in oneflow_state_dict.keys() ): continue w = key b = w.replace("weight", "bias") new_key = ( "layers." + index_layer + ".blocks." + index_block + ".mlp.dense_4h_to_h.weight" ) oneflow_state_dict[new_key] = oneflow_state_dict.pop(w) new_key = new_key.replace("weight", "bias") oneflow_state_dict[new_key] = oneflow_state_dict.pop(b) elif "downsample" in key: index_layer = key.split(".")[index_idx_1] if "reduction.weight" in key: new_key = "layers." + index_layer + ".downsample.reduction.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "norm" in key: if ( "layers." + index_layer + ".downsample.norm.weight" in oneflow_state_dict.keys() ): continue w = key b = w.replace("weight", "bias") new_key = "layers." + index_layer + ".downsample.norm.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(w) new_key = new_key.replace("weight", "bias") oneflow_state_dict[new_key] = oneflow_state_dict.pop(b) elif "layernorm" in key: if "weight" in key: new_key = "norm.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "bias" in key: new_key = "norm.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "classifier" in key: if "weight" in key: new_key = "head.weight" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) elif "bias" in key: new_key = "head.bias" oneflow_state_dict[new_key] = oneflow_state_dict.pop(key) else: oneflow_state_dict[key] = oneflow_state_dict.pop(key) return oneflow_state_dict def _load_config_from_json(self, config_file): """load config from `config.json`, and update default config. Args: config_file (str): Path of config file. """ with open(config_file, mode="r", encoding="utf-8") as f: cfg_dict = json.load(f) # update libai_cfg by config.json self._update_cfg("img_size", cfg_dict["image_size"]) self._update_cfg("patch_size", cfg_dict["patch_size"]) self._update_cfg("embed_dim", cfg_dict["embed_dim"]) self._update_cfg("depths", cfg_dict["depths"]) self._update_cfg("num_heads", cfg_dict["num_heads"]) self._update_cfg("window_size", cfg_dict["window_size"]) self._update_cfg("mlp_ratio", cfg_dict["mlp_ratio"]) self._update_cfg("qkv_bias", cfg_dict["qkv_bias"]) self._update_cfg("drop_path_rate", cfg_dict["drop_path_rate"]) self._update_cfg("pretrained_window_sizes", cfg_dict["pretrained_window_sizes"]) # update libai_cfg by kwargs for k, v in self.kwargs.items(): self._update_cfg(k, v) self._update_cfg_log() class SwinV2LoaderLiBai(ModelLoaderLiBai): def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs): super().__init__(model, libai_cfg, pretrained_model_path, **kwargs) self.base_model_prefix_2 = ""
14,228
43.886435
100
py