Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
null | Multi-domain-learning-FAS-main/source_multi_domain/config.py | # -*- coding: utf-8 -*-
# Copyright 2022
#
# Authors: Xiao Guo, Yaojie Liu, Anil Jain, and Xiaoming Liu.
#
# All Rights Reserved.s
#
# This research is based upon work supported by the Office of the Director of
# National Intelligence (ODNI), Intelligence Advanced Research Projects Activity
# (IARPA), via IARPA R&D Contract No. 2017-17020200004. The views and
# conclusions contained herein are those of the authors and should not be
# interpreted as necessarily representing the official policies or endorsements,
# either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The
# U.S. Government is authorized to reproduce and distribute reprints for
# Governmental purposes not withstanding any copyright annotation thereon.
# ==============================================================================
import tensorflow as tf
import os
import glob
import abc
import csv
from utils import file_reader
# Configuration class.
class Config(object):
"""
the meta configuration class.
Attributes:
-----------
configurations: config, config_siw, and config_oulu.
modules: gen_pretrained, gen, RE, multi-disc and optimizers.
various directories for checkpoints.
log: log handler.
Methods:
-----------
basic functions: update_lr, _restore, _save.
optimization functions: train and train_step.
"""
# Config.
LOG_DEVICE_PLACEMENT = False
IMG_SIZE = 256
MAP_SIZE = 32
FIG_SIZE = 128
# Training meta.
STEPS_PER_EPOCH = 1000
IMG_LOG_FR = 100
TXT_LOG_FR = 1000
# Initial learning rate.
lr = 1e-4
LEARNING_RATE_DECAY_FACTOR = 0.89 # The decay to use for the moving average.
LEARNING_MOMENTUM = 0.999
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
GAN = 'ls' # 'hinge', 'ls'
DECAY_STEP = 10
n_layer_D = 4
def __init__(self, args):
self.MAX_EPOCH = args.epoch
self.GPU_INDEX = args.cuda
self.phase = args.stage
assert self.phase in ['pretrain', 'ft', 'ub'], print("Please offer the valid phase!")
self.type = args.type
self.SET = args.set
self.illu_dict = dict()
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
tf.config.experimental.set_memory_growth(gpus[self.GPU_INDEX], True)
tf.config.experimental.set_visible_devices(gpus[self.GPU_INDEX], 'GPU')
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU")
except RuntimeError as e:
print(e) # Virtual devices must be set before GPUs have been initialized
@abc.abstractmethod
def search_folder(self, root_dir, sub_id, stype):
pass
@abc.abstractmethod
def search_folder_wrapper(self, root_dir, filenames):
pass
def _filter_ill_siw(self, old_list=[], state='pretrain'):
'''assigns siw to different illumination.'''
new_list = []
assert state in ['pretrain', 'ft'], print("Please offer the right stage.")
target_list = ['0', '1', '3', '4'] if state == 'pretrain' else ['2']
for _ in old_list:
sub_id, label_cur = _.split('/')[-1], self.illu_dict[sub_id]
if label_cur in target_list:
new_list.append(_)
return new_list
def _filter_ill_oulu(self, old_list=[], state='pretrain'):
'''assigns oulu to different illumination.'''
new_list = []
assert state in ['pretrain', 'ft'], print("Please offer the right stage.")
target_list = ['1', '2'] if state == 'pretrain' else ['3']
sess_3_subs = ['5','10','15','20','25','30','35','40','50','60'] # session#3 subject.
for _ in old_list:
device_id, sess_id, sub_id, sp_id = _.split('/')[-1].split('_')
if (sess_id in target_list) or (sub_id not in target_list and sub_id in sess_3_subs):
new_list.append(_)
return new_list
def illu_list_gen(self, li_list, sp_list, dataset_name, state):
'''calls fuctions to assign either oulu or siw into different illuminations.'''
if dataset_name == 'Oulu':
return self._filter_ill_oulu(li_list, state), self._filter_ill_oulu(sp_list, state)
elif dataset_name == 'SiW':
return self._filter_ill_siw(li_list, state), self._filter_ill_siw(sp_list, state)
else:
return
def _construct_ill_dict(self, dataset_name):
'''each subject is associated with one illumination.'''
csv_file_name = "/user/guoxia11/cvl/anti_spoofing/illumination_estimation/DPR/combine_label_illu.csv"
csv_file = open(csv_file_name)
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count != 0 and "siwm" not in row[0]:
sub_id, label = row[0].split('/')[-2], row[1]
self.illu_dict[sub_id] = label
line_count += 1
csv_file.close()
def compile(self, dataset_name='SiW'):
'''generates train and test list for SIW and Oulu.'''
assert dataset_name in ['SiW', 'Oulu'], print("Please offer the correct dataset.")
# Training data.
#########################################################
## In Oulu subject # in A is 21, B, C and D are around 6.
## In SiW subject # in A, B, C and D are 80, 15, 13 and 7.
if self.phase == 'pretrain':
filenames = file_reader(self.pretrain_train)
elif self.phase == 'ft':
filenames = file_reader(self.type_train)
elif self.phase == 'ub':
filenames0 = file_reader(self.pretrain_train)
filenames1 = file_reader(self.type_train)
filenames = filenames0 + filenames1
if dataset_name == 'SiWM-v2':
self._construct_ill_dict(dataset_name)
if self.phase=='pretrain':
## the new version pretrain has all 1,2 but small amount of 3.
self.LI_DATA_DIR, self.SP_DATA_DIR = self.search_folder_wrapper(self.root_dir, filenames)
self.LI_DATA_DIR, self.SP_DATA_DIR = self.illu_list_gen(self.LI_DATA_DIR, self.SP_DATA_DIR, dataset_name, state='pretrain')
elif self.phase=='ft':
## the new version ft in ill, it combines BCD, with small amount of 1,2, most 3.
self.LI_DATA_DIR, self.SP_DATA_DIR = self.search_folder_wrapper(self.root_dir, filenames)
if self.type == 'illu':
self.LI_DATA_DIR, self.SP_DATA_DIR = self.illu_list_gen(self.LI_DATA_DIR, self.SP_DATA_DIR, dataset_name, state='ft')
elif self.phase=='ub':
new_li, new_sp = [], []
self.LI_DATA_DIR, self.SP_DATA_DIR = self.search_folder_wrapper(self.root_dir, filenames0)
new_li_pre, new_sp_pre = self.illu_list_gen(self.LI_DATA_DIR, self.SP_DATA_DIR, dataset_name, state='pretrain')
self.LI_DATA_DIR, self.SP_DATA_DIR = self.search_folder_wrapper(self.root_dir, filenames1)
if self.type=='illu':
new_li_ft, new_sp_ft = self.illu_list_gen(self.LI_DATA_DIR, self.SP_DATA_DIR, dataset_name, state='ft')
else:
new_li_ft, new_sp_ft = self.LI_DATA_DIR, self, SP_DATA_DIR
self.LI_DATA_DIR = new_li_pre + new_li_ft
self.SP_DATA_DIR = new_sp_pre + new_sp_ft
else:
assert False, print("Please offer a right phase to work.")
# Val/Test data.
with open(self.pretrain_test, 'r') as f:
filenames = f.read().split('\n')
self.LI_DATA_DIR_TEST, self.SP_DATA_DIR_TEST = self.search_folder_wrapper(self.root_dir, filenames)
with open(self.type_test, 'r') as f:
filenames = f.read().split('\n')
self.LI_DATA_DIR_TEST_B, self.SP_DATA_DIR_TEST_B = self.search_folder_wrapper(self.root_dir, filenames)
class Config_oulu(Config):
def __init__(self, args):
super().__init__(args)
self.dataset = 'oulu'
self.BATCH_SIZE = 1
self.root_dir = "/user/guoxia11/cvlshare/Databases/Oulu/bin/"
root_dir_id = '/user/guoxia11/cvl/anti_spoofing/stats_update/oulu_datalist/'
self.pretrain_train = root_dir_id + 'A_train_oulu.txt'
self.pretrain_test = root_dir_id + 'A_test_oulu.txt'
if self.type == 'age':
self.type_train = root_dir_id + 'C_train_oulu.txt'
self.type_test = root_dir_id + 'C_test_oulu.txt'
elif self.type == 'spoof':
self.type_train = root_dir_id + 'B_train_oulu.txt'
self.type_test = root_dir_id + 'B_test_oulu.txt'
elif self.type == 'race':
self.type_train = root_dir_id + 'D_train_oulu.txt'
self.type_test = root_dir_id + 'D_test_oulu.txt'
elif self.type == 'illu':
self.type_train = root_dir_id + 'E_train_oulu.txt'
self.type_test = root_dir_id + 'E_test_oulu.txt'
else:
assert False, print("wait to implement...")
# overriding abstract method
def search_folder(self, root_dir, sub_id, stype):
if stype == 'Live':
folder_list = glob.glob(root_dir+f'train/live/*{sub_id}*')
folder_list += glob.glob(root_dir+f'eval/live/*{sub_id}*')
folder_list += glob.glob(root_dir+f'test/live/*{sub_id}*')
elif stype == 'Spoof':
folder_list = glob.glob(root_dir+f'train/spoof/*{sub_id}*')
folder_list += glob.glob(root_dir+f'eval/spoof/*{sub_id}*')
folder_list += glob.glob(root_dir+f'test/spoof/*{sub_id}*')
else:
assert False, print("Please offer a valid stype here.")
return folder_list
# overriding abstract method
def search_folder_wrapper(self, root_dir, filenames):
li_list, sp_list = [], []
for x in filenames:
if x not in ["0", ""]:
sub_id = '0'+x if len(x) == 1 else x
li_list += self.search_folder(root_dir=root_dir, sub_id=sub_id, stype="Live")
sp_list += self.search_folder(root_dir=root_dir, sub_id=sub_id, stype="Spoof")
return li_list, sp_list
class Config_siw(Config):
def __init__(self, args):
super().__init__(args)
self.dataset = "SiW"
self.BATCH_SIZE = 1
root_dir_id = '/user/guoxia11/cvl/anti_spoofing/stats_update/SiW_datalist/'
self.root_dir = "/user/guoxia11/cvlshare/Databases/SiW/bin/"
self.pretrain_train = root_dir_id + 'A_train_sub_id_siw.txt'
self.pretrain_test = root_dir_id + 'A_test_sub_id_siw.txt'
if self.type == 'age':
self.type_train = root_dir_id + 'C_train_sub_id_siw.txt'
self.type_test = root_dir_id + 'C_test_sub_id_siw.txt'
elif self.type == 'spoof':
self.type_train = root_dir_id + 'B_train_sub_id_siw.txt'
self.type_test = root_dir_id + 'B_test_sub_id_siw.txt'
elif self.type == 'race':
self.type_train = root_dir_id + 'D_train_sub_id_siw.txt'
self.type_test = root_dir_id + 'D_test_sub_id_siw.txt'
elif self.type == 'illu':
self.type_train = root_dir_id + 'E_train_sub_id_siw.txt'
self.type_test = root_dir_id + 'E_test_sub_id_siw.txt'
else:
assert False, print("wait to implement...")
# overriding abstract method
def search_folder(self, root_dir, sub_id, stype):
if stype == 'Live':
folder_list = glob.glob(root_dir+f'train/live/{sub_id}*')
folder_list += glob.glob(root_dir+f'test/live/{sub_id}*')
elif stype == 'Spoof':
folder_list = glob.glob(root_dir+f'train/spoof/{sub_id}*')
folder_list += glob.glob(root_dir+f'test/spoof/{sub_id}*')
else:
assert False, print("Please offer a valid stype here.")
return folder_list
# overriding abstract method
def search_folder_wrapper(self, root_dir, filenames):
li_list, sp_list = [], []
for x in filenames:
if x not in ["0", ""]:
digit_len = len(x)
if digit_len == 1:
sub_id = '00'+x
elif digit_len == 2:
sub_id = '0'+x
else:
sub_id = x
li_list += self.search_folder(root_dir=root_dir, sub_id=sub_id, stype="Live")
sp_list += self.search_folder(root_dir=root_dir, sub_id=sub_id, stype="Spoof")
return li_list, sp_list
class Config_siwm(Config):
def __init__(self, args):
super().__init__(args)
self.dataset = "SiWM-v2"
self.BATCH_SIZE = 2
root_dir_id = "/user/guoxia11/cvl/anti_spoofing/"
## 1707 samples, with 940 subjects; balanced among subjects.
self.pretrain_train = root_dir_id + 'spoof_type_list/pretrain_A_train_balanced.txt'
self.pretrain_test = root_dir_id + 'spoof_type_list/pretrain_A_test.txt'
if self.type == 'age':
self.type_train = root_dir_id + "age_list/list/age_B_train_ub.txt"
self.type_test = root_dir_id + "age_list/list/age_B_test.txt"
elif self.type == 'spoof':
## 1707 samples, with 65 subjects; balanced among subjects.
self.type_train = root_dir_id + "spoof_type_list/B_train_spoof_balanced_ub.txt"
self.type_test = root_dir_id + "spoof_type_list/B_test_spoof.txt"
elif self.type == 'race':
self.type_train = root_dir_id + "race_list/race_small_B_train_ub.txt"
self.type_test = root_dir_id + "race_list/race_B_test.txt"
elif self.type == 'illu':
self.type_train = root_dir_id + "age_list/list/ill_E_train_ub.txt"
self.type_test = root_dir_id + "age_list/list/ill_E_test.txt"
else:
assert False, print("wait to implement...")
# overriding the compile method.
def compile(self, dataset_name='SiWM-v2'):
'''generates train and test list for SIW-Mv2.'''
# Train data.
## GX: compile_siwm does not have filter_out process for the new illumination.
self.SP_DATA_DIR, self.LI_DATA_DIR = [], []
if self.phase == 'pretrain':
with open(self.pretrain_train, 'r') as f:
filenames = f.read().split('\n')
elif self.phase == 'ft':
with open(self.type_train, 'r') as f:
filenames = f.read().split('\n')
elif self.phase == 'ub':
with open(self.pretrain_train, 'r') as f:
filenames = f.read().split('\n')
new_pretrain = filenames
with open(self.type_train, 'r') as f:
filenames = f.read().split('\n')
filenames = new_pretrain + filenames
for x in filenames:
if x == '':
continue
elif 'Live' not in x:
self.SP_DATA_DIR.append('/user/guoxia11/cvlshare/cvl-guoxia11/Spoof/'+x)
else:
self.LI_DATA_DIR.append('/user/guoxia11/cvlshare/cvl-guoxia11/Live/'+x)
# Test_A data.
self.SP_DATA_DIR_TEST, self.LI_DATA_DIR_TEST = [], []
with open(self.pretrain_test, 'r') as f:
filenames = f.read().split('\n')
for x in filenames:
if x == '':
continue
elif 'Live' not in x:
self.SP_DATA_DIR_TEST.append('/user/guoxia11/cvlshare/cvl-guoxia11/Spoof/'+x)
else:
self.LI_DATA_DIR_TEST.append('/user/guoxia11/cvlshare/cvl-guoxia11/Live/'+x)
# Test_B data.
self.SP_DATA_DIR_TEST_B, self.LI_DATA_DIR_TEST_B = [], []
with open(self.type_test, 'r') as f:
filenames = f.read().split('\n')
for x in filenames:
if x == '':
continue
elif 'Live' not in x:
self.SP_DATA_DIR_TEST_B.append('/user/guoxia11/cvlshare/cvl-guoxia11/Spoof/'+x)
else:
self.LI_DATA_DIR_TEST_B.append('/user/guoxia11/cvlshare/cvl-guoxia11/Live/'+x)
| 14,092 | 38.810734 | 126 | py |
null | Multi-domain-learning-FAS-main/source_multi_domain/README.md | # Multi-domain Learning for Updating Face Anti-spoofing Models
<p align="center">
<img src="https://github.com/CHELSEA234/Multi-domain-learning-FAS/blob/main/source_multi_domain/figures/overall_architecture.jpg" alt="drawing" width="1000"/>
</p>
This page contains the official implementation of our ECCV2022 oral paper "Multi-domain Learning for Updating Face Anti-spoofing Models". [[Arxiv]](https://arxiv.org/pdf/2208.11148.pdf) [[SiW-Mv2 Dataset]](http://cvlab.cse.msu.edu/pdfs/guo_liu_jain_liu_eccv2022_supp.pdf)
**Our algorithm has been officially accepted and delivered to the [IAPRA ODIN](https://www.iarpa.gov/research-programs/odin) program**!
Authors: [Xiao Guo](https://scholar.google.com/citations?user=Gkc-lAEAAAAJ&hl=en), [Yaojie Liu](https://yaojieliu.github.io/), [Anil Jain](https://www.cse.msu.edu/~jain/), [Xiaoming Liu](http://cvlab.cse.msu.edu/)
## Dataset
The FASMD Dataset is constructed on SiW-Mv2, SiW, and Oulu-NPU. It consists of five sub-datasets: dataset A is the
source domain dataset, and B, C, D and E are four target domain datasets.
<p align="center">
<img src="https://github.com/CHELSEA234/Multi-domain-learning-FAS/blob/main/source_multi_domain/figures/Dataset_demo.png" alt="drawing" width="800"/>
<img src="https://github.com/CHELSEA234/Multi-domain-learning-FAS/blob/main/source_multi_domain/figures/distribution.png" alt="drawing" width="800"/>
<img src="https://github.com/CHELSEA234/Multi-domain-learning-FAS/blob/main/source_multi_domain/figures/age_gallery.png" alt="drawing" width="800"/>
</p>
## Train and Inference
- After setting up the dataset path, you can run the training code as shown below:
```
python train_architecture.py
```
- To run the testing code, which will save scores in csv file.
```
python test_architecture.py
```
## Pre-trained model
The pre-trained model can be found in [link](https://drive.google.com/drive/folders/1CHIzOUyy3YvpDi-gP6nCIdOPHJWWxQQo?usp=sharing), or you can find in the `source/save_model_trained` folder.
## Reference
If you would like to use our work, please cite:
```Bibtex
@inproceedings{xiaoguo2022MDFAS,
title={Multi-domain Learning for Updating Face Anti-spoofing Models},
author={Guo, Xiao and Liu, Yaojie and Jain, Anil and Liu, Xiaoming},
booktitle={ECCV},
year={2022}
}
```
This github will continue to update in the near future. If you have any question, please contact: [Xiao Guo](guoxia11@msu.edu)
| 2,450 | 51.148936 | 272 | md |
null | Multi-domain-learning-FAS-main/source_multi_domain/parameters.py | # Copyright 2022
#
# Authors: Xiao Guo, Yaojie Liu, Anil Jain, and Xiaoming Liu.
#
# All Rights Reserved.s
#
# This research is based upon work supported by the Office of the Director of
# National Intelligence (ODNI), Intelligence Advanced Research Projects Activity
# (IARPA), via IARPA R&D Contract No. 2017-17020200004. The views and
# conclusions contained herein are those of the authors and should not be
# interpreted as necessarily representing the official policies or endorsements,
# either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The
# U.S. Government is authorized to reproduce and distribute reprints for
# Governmental purposes not withstanding any copyright annotation thereon.
# ==============================================================================
uv =[[0.19029412,0.19795537 ,0.21318457 ,0.22828290 ,0.24970947 ,0.28816611 ,0.33394283 ,0.39239809 ,0.47876307 ,0.56515092 ,0.62323409 ,0.66867208 ,0.70676976 ,0.72820741 ,0.74272829 ,0.75663871 ,0.76398379 ,0.25338903 ,0.28589997 ,0.32738855 ,0.36722445 ,0.40321609 ,0.55088127 ,0.58705842 ,0.62712812 ,0.66933709 ,0.70184904 ,0.47813031 ,0.47830373 ,0.47872066 ,0.47870359 ,0.43102017 ,0.45095450 ,0.47804111 ,0.50489837 ,0.52461874 ,0.30827355 ,0.33330417 ,0.36890128 ,0.40203944 ,0.37214473 ,0.33496466 ,0.55122417 ,0.58458656 ,0.62106317 ,0.64688802 ,0.61956245 ,0.58191341 ,0.37796655 ,0.41338006 ,0.45562238 ,0.47811818 ,0.50052267 ,0.54254669 ,0.57570505 ,0.54044306 ,0.51024377 ,0.47821599 ,0.44642609 ,0.41657540 ,0.38790068 ,0.44901687 ,0.47766650 ,0.50653827 ,0.56918079 ,0.50583494 ,0.47757983 ,0.44971457],
[0.55190903,0.47428983 ,0.40360034 ,0.33980367 ,0.27118790 ,0.21624640 ,0.18327993 ,0.15577883 ,0.14014046 ,0.15676366 ,0.18313733 ,0.21531384 ,0.26951864 ,0.33780637 ,0.40212137 ,0.47324431 ,0.55168754 ,0.63735390 ,0.66241443 ,0.67068136 ,0.66713846 ,0.65712863 ,0.65805173 ,0.66828096 ,0.67205220 ,0.66368717 ,0.63796753 ,0.58252430 ,0.53523010 ,0.48812559 ,0.44775373 ,0.41256407 ,0.40846801 ,0.40317070 ,0.40854913 ,0.41281027 ,0.58095986 ,0.59604895 ,0.59652811 ,0.57966459 ,0.57139677 ,0.56953919 ,0.57967824 ,0.59695679 ,0.59599525 ,0.58050835 ,0.57008123 ,0.57134289 ,0.31730300 ,0.34064898 ,0.35593933 ,0.35154018 ,0.35593045 ,0.34062389 ,0.31715956 ,0.30086508 ,0.28950119 ,0.28752795 ,0.28963783 ,0.30076182 ,0.31932616 ,0.32959232 ,0.33032984 ,0.32936266 ,0.31900606 ,0.32014942 ,0.31873652 ,0.32043788],
[0.54887491,0.55835652 ,0.56531715 ,0.58029217 ,0.61638439 ,0.68007606 ,0.75769442 ,0.82921398 ,0.85709274 ,0.82894272 ,0.75751764 ,0.68032110 ,0.61664295 ,0.58068472 ,0.56520522 ,0.55785143 ,0.54947090 ,0.79504120 ,0.84203368 ,0.87477297 ,0.89484525 ,0.90437353 ,0.90412331 ,0.89423305 ,0.87385195 ,0.84139013 ,0.79445726 ,0.91648984 ,0.95176858 ,0.98838627 ,0.99706292 ,0.91018295 ,0.92791700 ,0.93613458 ,0.92778808 ,0.90999144 ,0.82165444 ,0.85368645 ,0.85440493 ,0.84463143 ,0.85324180 ,0.84432119 ,0.84337026 ,0.85280263 ,0.85272932 ,0.82140154 ,0.84402239 ,0.85248041 ,0.86857969 ,0.91266698 ,0.93638903 ,0.93873996 ,0.93629760 ,0.91227442 ,0.86774820 ,0.90530455 ,0.92216164 ,0.92610627 ,0.92281538 ,0.90596151 ,0.87151438 ,0.91635096 ,0.92336667 ,0.91626322 ,0.87006092 ,0.91713434 ,0.92056626 ,0.91682398]]
lm_ref = [[42.022587,44.278061,48.761536,53.206482,59.514465,70.836105,84.312767,101.52200,126.94785,152.38043,169.48012,182.85706,194.07301,200.38426,204.65921,208.75444,210.91682,60.597733,70.168953,82.383194,94.110878,104.70682,148.17944,158.83000,170.62653,183.05284,192.62436,126.76157,126.81262,126.93536,126.93034,112.89234,118.76100,126.73531,134.64207,140.44775,76.755737,84.124748,94.604538,104.36041,95.559410,84.613594,148.28040,158.10228,168.84100,176.44383,168.39919,157.31531,97.273354,107.69909,120.13522,126.75800,133.35388,145.72574,155.48756,145.10645,136.21576,126.78679,117.42784,108.63980,100.19796,118.19057,126.62502,135.12486,153.56682,134.91780,126.59950,118.39597],
[94.517975,117.36908,138.18005,156.96179,177.16229,193.33707,203.04239,211.13872,215.74265,210.84879,203.08437,193.61160,177.65372,157.54980,138.61548,117.67688,94.583191,69.363007,61.985199,59.551407,60.594437,63.541336,63.269577,60.258087,59.147827,61.610504,69.182358,85.504852,99.428253,113.29582,125.18130,135.54114,136.74701,138.30655,136.72314,135.46866,85.965424,81.523193,81.382126,86.346741,88.780792,89.327667,86.342728,81.255920,81.539001,86.098343,89.168091,88.796661,163.58600,156.71295,152.21146,153.50656,152.21408,156.72034,163.62823,168.42532,171.77084,172.35178,171.73062,168.45572,162.99039,159.96802,159.75090,160.03563,163.08463,162.74802,163.16397,162.66309]]
RANDOM_SEED = 123456789
REPEAT_TIME_LI = 3500
REPEAT_TIME_SP = 2000
SAMPLE_NUM_TRAIN = 20000
SAMPLE_NUM_TEST = 500 | 4,776 | 190.08 | 821 | py |
null | Multi-domain-learning-FAS-main/source_multi_domain/test_architecture.py | # -*- coding: utf-8 -*-
# Copyright 2022
#
# Authors: Xiao Guo, Yaojie Liu, Anil Jain, and Xiaoming Liu.
#
# All Rights Reserved.s
#
# This research is based upon work supported by the Office of the Director of
# National Intelligence (ODNI), Intelligence Advanced Research Projects Activity
# (IARPA), via IARPA R&D Contract No. 2017-17020200004. The views and
# conclusions contained herein are those of the authors and should not be
# interpreted as necessarily representing the official policies or endorsements,
# either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The
# U.S. Government is authorized to reproduce and distribute reprints for
# Governmental purposes not withstanding any copyright annotation thereon.
# ==============================================================================
import tensorflow as tf
import argparse
import os
import time
import math
import csv
import numpy as np
from tqdm import tqdm
from model import Generator, Discriminator, region_estimator
from utils import Logging
from dataset import Dataset
from config import Config_siwm, Config_siw, Config_oulu
from metrics import my_metrics
from tensorboardX import SummaryWriter
class SRENet(object):
"""
the SRENet class.
Attributes:
-----------
configurations: config, config_siw, and config_oulu.
modules: gen_pretrained, gen, RE, multi-disc and optimizers.
various directories for checkpoints.
log: log handler.
Methods:
-----------
basic functions: update_lr, _restore, _save.
optimization functions: train and train_step.
"""
def __init__(self, config, config_siw, config_oulu):
self.config = config
self.config_siw = config_siw
self.config_oulu = config_oulu
self.lr = config.lr
self.bs = config.BATCH_SIZE + config_siw.BATCH_SIZE + config_oulu.BATCH_SIZE
self.SUMMARY_WRITER = config.SUMMARY_WRITER
## The modules:
self.gen_pretrained = Generator()
self.RE = region_estimator()
self.gen = Generator(self.RE)
self.disc1 = Discriminator(1,config.n_layer_D)
self.disc2 = Discriminator(2,config.n_layer_D)
self.disc3 = Discriminator(4,config.n_layer_D)
self.gen_opt = tf.keras.optimizers.Adam(self.lr)
# Checkpoint initialization.
self.save_dir = config.save_model_dir
self.checkpoint_path_g = self.save_dir+"/gen/cp-{epoch:04d}.ckpt"
self.checkpoint_path_re = self.save_dir+"/ReE/cp-{epoch:04d}.ckpt"
self.checkpoint_path_d1 = self.save_dir+"/dis1/cp-{epoch:04d}.ckpt"
self.checkpoint_path_d2 = self.save_dir+"/dis2/cp-{epoch:04d}.ckpt"
self.checkpoint_path_d3 = self.save_dir+"/dis3/cp-{epoch:04d}.ckpt"
self.checkpoint_path_g_op = self.save_dir+"/g_opt/cp-{epoch:04d}.ckpt"
self.checkpoint_dir_g = os.path.dirname(self.checkpoint_path_g)
self.checkpoint_dir_re = os.path.dirname(self.checkpoint_path_re)
self.checkpoint_dir_d1 = os.path.dirname(self.checkpoint_path_d1)
self.checkpoint_dir_d2 = os.path.dirname(self.checkpoint_path_d2)
self.checkpoint_dir_d3 = os.path.dirname(self.checkpoint_path_d3)
self.checkpoint_dir_g_op = os.path.dirname(self.checkpoint_path_g_op)
self.model_list = [self.gen, self.RE, self.disc1, self.disc2, self.disc3]
self.model_p_list= [self.checkpoint_path_re,
self.checkpoint_path_g,
self.checkpoint_path_d1,
self.checkpoint_path_d2,
self.checkpoint_path_d3]
self.model_d_list= [self.checkpoint_dir_re,
self.checkpoint_dir_g,
self.checkpoint_dir_d1,
self.checkpoint_dir_d2,
self.checkpoint_dir_d3]
# Log class for displaying the losses.
self.log = Logging(config)
self.csv_file = open(self.config.csv_file_name, mode='w')
#############################################################################
def inference(self, config):
'''the main inference entrance.'''
## setup the csv handler.
self.csv_writer = csv.writer(self.csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_title = ['Video name', 'Dataset', 'Depth', 'Region', 'Content', 'Additive', 'Label', 'test_mode']
self.csv_writer.writerow(csv_title)
for model_, model_dir_ in zip(self.model_list, self.model_d_list):
epoch_suffix = f"/cp-{59:04d}.ckpt"
current_checkpoint = model_dir_ + epoch_suffix
model_.load_weights(current_checkpoint).expect_partial()
print(f"loading weights for {model_dir_}.")
start = time.time()
## inference.
start = time.time()
for test_mode in ['test_A']:
update_list = self.test_step(test_mode)
assert len(update_list[-1]) == len(update_list[0]), print("Their length should match.")
print ('\n*****Time for epoch {} is {} sec*****'.format(self.config.epoch_eval+1, int(time.time()-start)))
self.csv_file.close()
self.SUMMARY_WRITER.close()
def test_step(self, test_mode, viz_mode=False):
"""gathers results from three datasets."""
result_update = []
result_siwm = self.test_step_helper(self.config, test_mode, viz_mode, prefix_dataset='SIWM')
result_siw = self.test_step_helper(self.config_siw, test_mode, viz_mode, prefix_dataset='SIW')
result_oulu = self.test_step_helper(self.config_oulu, test_mode, viz_mode, prefix_dataset='Oulu')
for i in range(len(result_siwm)):
update_res = result_siwm[i] + result_siw[i] + result_oulu[i]
result_update.append(update_res)
return result_update
def test_step_helper(self, config_cur, test_mode, viz_mode=False, prefix_dataset=None):
tmp_bs = config_cur.BATCH_SIZE
config_cur.BATCH_SIZE = 16
dataset_test = Dataset(config_cur, test_mode+'_csv')
img_num = len(dataset_test.name_list)
num_step = int(img_num/config_cur.BATCH_SIZE)
d_score, p_score, c_score, n_score, label_lst = [],[],[],[],[]
for step in tqdm(range(num_step)):
img, img_name = dataset_test.nextit()
img_name = img_name.numpy().tolist()
d, p, c, n, p_area = self._test_graph(img)
# d, p, c, n = d.numpy(), p.numpy(), c.numpy(), n.numpy()
d_score.extend(d.numpy())
p_score.extend(p.numpy())
c_score.extend(c.numpy())
n_score.extend(n.numpy())
for i in range(config_cur.BATCH_SIZE):
img_name_cur = img_name[i].decode('UTF-8')
if ("Live" in img_name_cur) or ("live" in img_name_cur):
label_cur = 0
else:
label_cur = 1
label_lst.append(label_cur)
self.csv_writer.writerow([img_name_cur, prefix_dataset, d_score[i], p_score[i],
c_score[i], n_score[i], label_cur, test_mode])
self.log.step = step
config_cur.BATCH_SIZE = tmp_bs
return d_score, p_score, c_score, n_score, label_lst
@tf.function
def _test_graph(self, img):
"""
model outputs the result.
dmap_pred, p_area, c, n are depth, region, content, and additive traces.
"""
dmap_pred, p_area, c, n, x, region_map = self.gen(img, training=False)
d = tf.reduce_mean(dmap_pred[:,:,:,0], axis=[1,2])
p = tf.reduce_mean(p_area, axis=[1,2,3])
c = tf.reduce_mean(c, axis=[1,2,3])
n = tf.reduce_mean(n, axis=[1,2,3])
return d, p, c, n, p_area
def main(args):
# Base Configuration Class
config, config_siw, config_oulu = Config_siwm(args), Config_siw(args), Config_oulu(args)
config.lr = args.lr
config.type = args.type
config.epoch_eval = args.epoch_eval
config.pretrain_folder = args.pretrain_folder
config.desc_str = '_trained'
config.root_dir = './log'+config.desc_str
config.exp_dir = '/exp'+config.desc_str
config.CHECKPOINT_DIR = config.root_dir+config.exp_dir
config.tb_dir = './tb_logs'+config.desc_str
config.csv_file_name = config.root_dir+'/res'+config.desc_str+'.csv'
config.save_model_dir = "./save_model"+config.desc_str
config.SUMMARY_WRITER = SummaryWriter(config.tb_dir)
os.makedirs(config.root_dir, exist_ok=True)
os.makedirs(config.save_model_dir, exist_ok=True)
os.makedirs(config.CHECKPOINT_DIR, exist_ok=True)
os.makedirs(config.CHECKPOINT_DIR+'/test', exist_ok=True)
print('**********************************************************')
print(f"Making root folder: {config.root_dir}")
print(f"Current exp saved into folder: {config.CHECKPOINT_DIR}")
print(f"The tensorboard results are saved into: {config.tb_dir}")
print(f"The trained weights saved into folder: {config.save_model_dir}")
print('**********************************************************')
config.compile(dataset_name='SiWM-v2')
config_siw.compile(dataset_name='SiW')
config_oulu.compile(dataset_name='Oulu')
print('**********************************************************')
srenet = SRENet(config, config_siw, config_oulu)
srenet.inference(config)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--cuda', type=int, default=6, help='The gpu num to use.')
parser.add_argument('--stage', type=str, default='ft', choices=['ft','pretrain','ub'])
parser.add_argument('--type', type=str, default='spoof', choices=['spoof','age','race','illu'])
parser.add_argument('--set', type=str, default='all', help='To choose from the predefined 14 types.')
parser.add_argument('--epoch', type=int, default=60, help='How many epochs to train the model.')
parser.add_argument('--data', type=str, default='all', choices=['all','SiW','SiWM','oulu'])
parser.add_argument('--lr', type=float, default=1e-7, help='The starting learning rate.')
parser.add_argument('--decay_step', type=int, default=2, help='The learning rate decay step.')
parser.add_argument('--pretrain_folder', type=str, default='./', help='Deprecated function.')
parser.add_argument('--epoch_eval', type=int, default=0, help='Which epoch checkpoint to evaluate.')
args = parser.parse_args()
main(args) | 9,538 | 42.756881 | 109 | py |
null | Multi-domain-learning-FAS-main/source_multi_domain/metrics.py | # -*- coding: utf-8 -*-
# Copyright 2022
#
# Multi-domain Learning for Updating Face Anti-spoofing Models (ECCV 2022)
# Xiao Guo, Yaojie Liu, Anil Jain, and Xiaoming Liu
#
# All Rights Reserved.s
#
# This research is based upon work supported by the Office of the Director of
# National Intelligence (ODNI), Intelligence Advanced Research Projects Activity
# (IARPA), via IARPA R&D Contract No. 2017-17020200004. The views and
# conclusions contained herein are those of the authors and should not be
# interpreted as necessarily representing the official policies or endorsements,
# either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The
# U.S. Government is authorized to reproduce and distribute reprints for
# Governmental purposes not withstanding any copyright annotation thereon.
# ==============================================================================
from sklearn import metrics
import numpy as np
def get_tpr_at_fpr(tpr_lst, fpr_lst, score_lst, fpr_value):
"""returns true postive rate and threshold given false positive rate value."""
abs_fpr = np.absolute(fpr_lst - fpr_value)
idx_min = np.argmin(abs_fpr)
fpr_value_target = fpr_lst[idx_min]
idx = np.max(np.where(fpr_lst == fpr_value_target))
return tpr_lst[idx], score_lst[idx]
def my_metrics(label_list, pred_list, val_phase=False):
"""
computes FAS metrics.
Parameters:
val_phase (bool): flag for train and test stage.
"""
fpr, tpr, scores = metrics.roc_curve(label_list,pred_list,
drop_intermediate=True)
auc_score = metrics.auc(fpr,tpr)
fnr = 1 - tpr
tnr = 1 - fpr
EER0 = fpr[np.nanargmin(np.absolute((fnr - fpr)))]
EER1 = fnr[np.nanargmin(np.absolute((fnr - fpr)))]
EER = min(EER0, EER1)
best_ACER, best_AP, best_BP = 100, 100, 100
best_threshold = 100
for idx_ in range(len(tpr)):
_tpr, _fpr = tpr[idx_], fpr[idx_]
_tnr, _fnr = tnr[idx_], fnr[idx_]
assert _tpr + _fnr == 1, print(_tpr, _fnr)
assert _tnr + _fpr == 1, print(_tnr, _fpr)
# https://chalearnlap.cvc.uab.cat/challenge/33/track/33/metrics/
APCER = _fpr/(_fpr+_tnr)
BPCER = _fnr/(_fnr+_tpr)
ACER = 0.5 * (APCER+BPCER)
if ACER < best_ACER:
best_ACER = ACER
best_AP = APCER
best_BP = BPCER
best_threshold = scores[idx_]
## fnr == 0.5% as the first PAMI paper version.
abs_fnr = np.absolute(fnr - 0.005)
idx = np.argmin(abs_fnr)
res_tpr = tpr[idx]
if not val_phase:
tpr_h, _ = get_tpr_at_fpr(tpr, fpr, scores, 0.005)
tpr_m, _ = get_tpr_at_fpr(tpr, fpr, scores, 0.01)
tpr_l, _ = get_tpr_at_fpr(tpr, fpr, scores, 0.02)
return best_AP, best_BP, best_ACER, EER, res_tpr, auc_score, [tpr_h, tpr_m, tpr_l]
else:
return best_AP, best_BP, best_ACER, EER, res_tpr, auc_score | 2,728 | 37.985714 | 84 | py |
null | Multi-domain-learning-FAS-main/source_multi_domain/warp.py | import tensorflow as tf
import cv2
import numpy as np
from scipy.ndimage.interpolation import map_coordinates as sp_map_coordinates
import matplotlib.tri as mtri
def tf_flatten(a):
"""Flatten tensor"""
return tf.reshape(a, [-1])
def tf_repeat(a, repeats, axis=0):
"""TensorFlow version of np.repeat for 1D"""
# https://github.com/tensorflow/tensorflow/issues/8521
assert len(a.get_shape()) == 1
a = tf.expand_dims(a, -1)
a = tf.tile(a, [1, repeats])
a = tf_flatten(a)
return a
def tf_repeat_2d(a, repeats):
"""Tensorflow version of np.repeat for 2D"""
assert len(a.get_shape()) == 2
a = tf.expand_dims(a, 0)
a = tf.tile(a, [repeats, 1, 1])
return a
def tf_map_coordinates(input, coords, order=1):
"""Tensorflow verion of scipy.ndimage.map_coordinates
Note that coords is transposed and only 2D is supported
Parameters
----------
input : tf.Tensor. shape = (s, s)
coords : tf.Tensor. shape = (n_points, 2)
"""
assert order == 1
coords_lt = tf.cast(tf.floor(coords), 'int32')
coords_rb = tf.cast(tf.ceil(coords), 'int32')
coords_lb = tf.stack([coords_lt[:, 0], coords_rb[:, 1]], axis=1)
coords_rt = tf.stack([coords_rb[:, 0], coords_lt[:, 1]], axis=1)
vals_lt = tf.gather_nd(input, coords_lt)
vals_rb = tf.gather_nd(input, coords_rb)
vals_lb = tf.gather_nd(input, coords_lb)
vals_rt = tf.gather_nd(input, coords_rt)
coords_offset_lt = coords - tf.cast(coords_lt, 'float32')
vals_t = vals_lt + (vals_rt - vals_lt) * coords_offset_lt[:, 0]
vals_b = vals_lb + (vals_rb - vals_lb) * coords_offset_lt[:, 0]
mapped_vals = vals_t + (vals_b - vals_t) * coords_offset_lt[:, 1]
return mapped_vals
def sp_batch_map_coordinates(inputs, coords):
"""Reference implementation for batch_map_coordinates"""
coords = coords.clip(0, inputs.shape[1] - 1)
mapped_vals = np.array([
sp_map_coordinates(input, coord.T, mode='nearest', order=1)
for input, coord in zip(inputs, coords)
])
return mapped_vals
def tf_batch_map_coordinates(_input, coords, order=1):
"""Batch version of tf_map_coordinates
Only supports 2D feature maps
Parameters
----------
input : tf.Tensor. shape = (b, s, s)
coords : tf.Tensor. shape = (b, n_points, 2)
"""
input_shape = tf.shape(_input)
batch_size = input_shape[0]
input_size = input_shape[1]
n_coords = tf.shape(coords)[1]
coords = tf.clip_by_value(coords, 0, tf.cast(input_size, 'float32') - 1)
coords_lt = tf.cast(tf.math.floor(coords), 'int32')
coords_rb = tf.cast(tf.math.ceil(coords), 'int32')
coords_lb = tf.stack([coords_lt[..., 0], coords_rb[..., 1]], axis=-1)
coords_rt = tf.stack([coords_rb[..., 0], coords_lt[..., 1]], axis=-1)
idx = tf_repeat(tf.range(batch_size), n_coords)
def _get_vals_by_coords(__input, coords):
indices = tf.stack([
idx, tf_flatten(coords[..., 0]), tf_flatten(coords[..., 1])
], axis=-1)
vals = tf.gather_nd(__input, indices)
vals = tf.reshape(vals, (batch_size, n_coords, __input.shape[3]))
return vals
vals_lt = _get_vals_by_coords(_input, coords_lt)
vals_rb = _get_vals_by_coords(_input, coords_rb)
vals_lb = _get_vals_by_coords(_input, coords_lb)
vals_rt = _get_vals_by_coords(_input, coords_rt)
coords_offset_lt = coords - tf.cast(coords_lt, 'float32')
offset_0 =coords_offset_lt[..., 0]
offset_1 =coords_offset_lt[..., 1]
offset_0 = tf.reshape(offset_0, [offset_0.shape[0], offset_0.shape[1], 1])
offset_1 = tf.reshape(offset_1, [offset_1.shape[0], offset_1.shape[1], 1])
vals_t = vals_lt + (vals_rt - vals_lt) * offset_0
vals_b = vals_lb + (vals_rb - vals_lb) * offset_0
mapped_vals = vals_t + (vals_b - vals_t) * offset_1
return mapped_vals
def sp_batch_map_offsets(input, offsets):
"""Reference implementation for tf_batch_map_offsets"""
batch_size = input.shape[0]
input_size = input.shape[1]
offsets = offsets.reshape(batch_size, -1, 2)
grid = np.stack(np.mgrid[:input_size, :input_size], -1).reshape(-1, 2)
grid = np.repeat([grid], batch_size, axis=0)
coords = offsets + grid
coords = coords.clip(0, input_size - 1)
mapped_vals = sp_batch_map_coordinates(input, coords)
return mapped_vals
def tf_batch_map_offsets(_input, offsets, order=1):
input_size = _input.shape[1]
offsets = tf.image.resize(offsets, [input_size, input_size]) * input_size
offsets = offsets[:,:,:,0:2]
"""Batch map offsets into input
Parameters
---------
input : tf.Tensor. shape = (b, s, s)
offsets: tf.Tensor. shape = (b, s, s, 2)
"""
input_shape = tf.shape(_input)
batch_size = input_shape[0]
input_size = input_shape[1]
offsets = tf.reshape(offsets, (batch_size, -1, 2))
grid = tf.meshgrid(
tf.range(input_size), tf.range(input_size), indexing='ij'
)
grid = tf.stack(grid, axis=-1)
grid = tf.cast(grid, 'float32')
grid = tf.reshape(grid, (-1, 2))
grid = tf_repeat_2d(grid, batch_size)
coords = offsets + grid
mapped_vals = tf_batch_map_coordinates(_input, coords)
mapped_vals = tf.reshape(mapped_vals, (batch_size, input_size, input_size, -1))
return mapped_vals
def generate_offset_map_batch(source, target, img_size):
offsetmap_batch = []
for _source, _target in zip(tf.unstack(source), tf.unstack(target)):
offsetmap = generate_offset_map(_source, _target, img_size)
offsetmap_batch.append(offsetmap)
return tf.stack(offsetmap_batch, axis=0)
def generate_offset_map(source, target, img_size):
anchor_pts = [[0,0],[0,255],[255,0],[255,255],
[0,127],[127,0],[255,127],[127,255],
[0,63],[0,191],[255,63],[255,191],
[63,0],[191,0],[63,255],[191,255]]
anchor_pts = np.asarray(anchor_pts)/ 255
xi, yi = np.meshgrid(np.linspace(0, 1, img_size), np.linspace(0, 1, img_size))
_source = np.concatenate([source, anchor_pts], axis=0).astype(np.float32)
_target = np.concatenate([target, anchor_pts], axis=0).astype(np.float32)
_offset = _source - _target
# interp2d
_triang = mtri.Triangulation(_target[:,0], _target[:,1])
_interpx = mtri.LinearTriInterpolator(_triang, _offset[:,0])
_interpy = mtri.LinearTriInterpolator(_triang, _offset[:,1])
_offsetmapx = _interpx(xi, yi)
_offsetmapy = _interpy(xi, yi)
offsetmap = np.stack([_offsetmapy, _offsetmapx, _offsetmapx*0], axis=2)
return offsetmap
def generate_uv_map(source, uv, img_size):
xi, yi = np.meshgrid(np.linspace(0, 1, img_size), np.linspace(0, 1, img_size))
# interp2d
_triang = mtri.Triangulation(source[:,0], source[:,1])
_interpz = mtri.LinearTriInterpolator(_triang, uv[:,2])
_offsetmapz = _interpz(xi, yi)
offsetmap = np.reshape(_offsetmapz,(img_size,img_size,1))
offsetmap = np.nan_to_num(offsetmap)
return offsetmap
| 7,015 | 33.392157 | 83 | py |
null | Multi-domain-learning-FAS-main/source_multi_domain/FASMD/README.md | ## FASMD Dataset
The FASMD Dataset is constructed on three exsiting datasets: SiW-Mv2, SiW, and Oulu-NPU. FASMD consists of five sub-datasets: dataset A is the
source domain dataset, and B, C, D and E are four target domain datasets. The details can be found in [[PDF]](http://cvlab.cse.msu.edu/pdfs/guo_liu_jain_liu_eccv2022.pdf).
<p align="center">
<img src="https://github.com/CHELSEA234/Multi-domain-learning-FAS/blob/main/figures/Dataset_demo.png" alt="drawing" width="800"/>
<img src="https://github.com/CHELSEA234/Multi-domain-learning-FAS/blob/main/figures/age_gallery.png" alt="drawing" width="900"/>
<img src="https://github.com/CHELSEA234/Multi-domain-learning-FAS/blob/main/figures/distribution.png" alt="drawing" width="800"/>
</p>
## Usage
- Please first follow links to download OULU, SIW and SIW-Mv2 datasets.
- OULU: [download link](https://sites.google.com/site/oulunpudatabase/)
- SIW: [download link](http://cvlab.cse.msu.edu/siw-spoof-in-the-wild-database.html)
- SIWM-v2: [download link](https://arxiv.org/pdf/1904.02860.pdf)
- Data partitioning files (OULU_list, SIW_list and SIWM_list) assign samples into different sub-dataset of FASMD.
- run config.py will construct the FASMD.
## SiW-Mv2 Dataset Download link:
SiW-Mv2 database is available under a license from Michigan State University for research purposes. Please send application to **guoxia11@msu.edu** for the Dataset Release Agreement (DRA) form. The official protocols and the baseline performance will be updated on this page, recently.
## Acknowledge
- We have used fantastic following script for age estimation and lighting estimation.
- Age Esimation: [code link](https://github.com/yu4u/age-gender-estimation)
- Illumination Estimation: [code link](https://github.com/zhhoper/DPR)
## Reference
If you would like to use our work, please cite:
```
@inproceedings{xiaoguo2023MDFAS
title={Multi-domain Learning for Updating Face Anti-spoofing Models},
author={Xiao, Guo and Yaojie, Liu, Anil, Jain and Liu, Xiaoming},
booktitle={In Proceeding of European Conference on Computer Vision (ECCV 2022)},
year={2022}
}
```
This github will continue to update in the near future. If you have any question, please contact: [Xiao Guo](guoxia11@msu.edu)
| 2,289 | 57.717949 | 285 | md |
null | Multi-domain-learning-FAS-main/source_SiW_Mv2/inference.py | # -*- coding: utf-8 -*-
# Copyright 2022
#
# Authors: Xiao Guo.
#
# All Rights Reserved.s
#
# This research is based upon work supported by the Office of the Director of
# National Intelligence (ODNI), Intelligence Advanced Research Projects Activity
# (IARPA), via IARPA R&D Contract No. 2017-17020200004. The views and
# conclusions contained herein are those of the authors and should not be
# interpreted as necessarily representing the official policies or endorsements,
# either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The
# U.S. Government is authorized to reproduce and distribute reprints for
# Governmental purposes not withstanding any copyright annotation thereon.
# ==============================================================================
from metrics import my_metrics
import argparse
import os
import csv
import time
import math
import numpy as np
from tqdm import tqdm
from glob import glob
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## exp protocol setup.
parser.add_argument('--stage', type=str, default='ft', choices=['ft','pretrain','ub'])
parser.add_argument('--type', type=str, default='spoof', choices=['spoof','age','race','illu'])
parser.add_argument('--set', type=str, default='all', help='To choose from the predefined 14 types.')
parser.add_argument('--data', type=str, default='all', choices=['all','SiW','SiWM','oulu'])
parser.add_argument('--pretrain_folder', type=str, default='./pre_trained/', help='Pretrain weight.')
parser.add_argument('--pro', type=int, default=1, help='Protocol number.')
parser.add_argument('--unknown', type=str, default='Ob', help='The unknown spoof type.')
## train hyper-parameters.
parser.add_argument('--epoch', type=int, default=50, help='How many epochs to train the model.')
parser.add_argument('--lr', type=float, default=1e-4, help='The starting learning rate.')
parser.add_argument('--batch_size', type=int, default=6, help='Batch size.')
parser.add_argument('--decay_step', type=int, default=3, help='The learning rate decay step.')
parser.add_argument('--cuda', type=int, default=3, help='The gpu num to use.')
parser.add_argument('--debug_mode', type=str, default='True', choices=['True', "False"],
help='Deprecated function.')
## inference
parser.add_argument('--weight_dir', type=str, default='.', help='pre-trained weights dirs.')
parser.add_argument('--dir', type=str, default=None, help='the inference image folder.')
parser.add_argument('--img', type=str, default=None, help='the inference image.')
parser.add_argument('--warnings', action='store_true', help='show tensorflow warnings. By default, only errors are shown.')
parser.add_argument('--overwrite', action='store_true', help='overwrite output file if already exists.')
args = parser.parse_args()
if not args.warnings:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
else:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.WARN)
from config_siwm import Config_custom
from model import Generator, Discriminator, region_estimator
from dataset import Dataset
from utils import l1_loss, l2_loss, hinge_loss, Logging, normalization_score
from tensorboardX import SummaryWriter
class SRENet(object):
def __init__(self, config, args):
self.config = config
self.lr = config.lr
self.bs = config.BATCH_SIZE
self.SUMMARY_WRITER = config.SUMMARY_WRITER
if args.debug_mode == 'True':
self.debug_mode = True
else:
self.debug_mode = False
## The new set:
self.RE = region_estimator()
self.gen = Generator(self.RE)
self.gen_pretrained = Generator(self.RE)
self.disc1 = Discriminator(1,config.n_layer_D)
self.disc2 = Discriminator(2,config.n_layer_D)
self.disc3 = Discriminator(4,config.n_layer_D)
self.gen_opt = tf.keras.optimizers.Adam(self.lr)
self.disc_opt = tf.keras.optimizers.Adam(self.lr)
# Checkpoint initialization.
self.save_dir = config.save_model_dir
self.checkpoint_path_g = self.save_dir+"/gen/cp-{epoch:04d}.ckpt"
self.checkpoint_path_re= self.save_dir+"/ReE/cp-{epoch:04d}.ckpt"
self.checkpoint_path_d1= self.save_dir+"/dis1/cp-{epoch:04d}.ckpt"
self.checkpoint_path_d2= self.save_dir+"/dis2/cp-{epoch:04d}.ckpt"
self.checkpoint_path_d3= self.save_dir+"/dis3/cp-{epoch:04d}.ckpt"
self.checkpoint_path_g_op = self.save_dir+"/g_opt/cp-{epoch:04d}.ckpt"
self.checkpoint_path_d_op = self.save_dir+"/d_opt/cp-{epoch:04d}.ckpt"
self.checkpoint_dir_g = os.path.dirname(self.checkpoint_path_g)
self.checkpoint_dir_re = os.path.dirname(self.checkpoint_path_re)
self.checkpoint_dir_d1 = os.path.dirname(self.checkpoint_path_d1)
self.checkpoint_dir_d2 = os.path.dirname(self.checkpoint_path_d2)
self.checkpoint_dir_d3 = os.path.dirname(self.checkpoint_path_d3)
self.checkpoint_dir_g_op = os.path.dirname(self.checkpoint_path_g_op)
self.checkpoint_dir_d_op = os.path.dirname(self.checkpoint_path_d_op)
self.model_list = [self.gen, self.RE,
self.disc1, self.disc2, self.disc3]
self.model_p_list= [self.checkpoint_path_g,
self.checkpoint_path_re,
self.checkpoint_path_d1,
self.checkpoint_path_d2,
self.checkpoint_path_d3]
self.model_d_list= [self.checkpoint_dir_g,
self.checkpoint_dir_re,
self.checkpoint_dir_d1,
self.checkpoint_dir_d2,
self.checkpoint_dir_d3]
# Log class for displaying the losses.
self.log = Logging(config)
self.gen_opt = tf.keras.optimizers.Adam(self.lr)
self.disc_opt = tf.keras.optimizers.Adam(self.lr)
self.csv_file = None
self.txt_file = open(self.config.txt_file_name+'.txt', mode='a')
self.pred_list = []
self.GT_list = []
self.test_mode = 'inference'
self.inference_data_dir = config.inference_data_dir
self.inference_data_img = config.inference_data_img
def _restore(self, model, checkpoint_dir, pretrain=False):
last_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
model.load_weights(last_checkpoint)
if not pretrain:
last_epoch = int((last_checkpoint.split('.')[1]).split('-')[-1])
return last_epoch
#############################################################################
def inference(self, config):
'''the main inference entrance.'''
last_checkpoint = tf.train.latest_checkpoint(self.checkpoint_dir_g)
if last_checkpoint != None:
epoch_num = last_checkpoint.split('/')[-1].split('.')[-2]
epoch_num = epoch_num.replace('cp-','')
if epoch_num == '0000':
epoch_num = 0
else:
epoch_num = int(epoch_num.lstrip('0'))
else:
epoch_num = 49 # make the 45th checkpoint public.
for model_, model_dir_ in zip(self.model_list, self.model_d_list):
epoch_suffix = f"/cp-{epoch_num:04d}.ckpt"
current_checkpoint = model_dir_ + epoch_suffix
model_.load_weights(current_checkpoint)
start = time.time()
print(f"loading weights at epoch {epoch_num}.")
self.test_step(self.test_mode)
def test_step(self, test_mode):
if self.config.inference_data_dir != None:
filename = self.config.csv_file_name+'.csv'
print("overwrite: ", args.overwrite)
if os.path.exists(filename) and not args.overwrite:
print(f"File {filename} exists and --overwrite option was not set, aborting.")
exit(1)
csv_file = open(filename, mode='w')
csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(['Image name', 'Score', 'Decision'])
else:
csv_file = None
csv_writer = None
old_bs = self.config.BATCH_SIZE
self.config.BATCH_SIZE = 5
dataset_inference = Dataset(self.config, test_mode)
img_num = len(dataset_inference.name_list)
num_list = int(img_num/self.config.BATCH_SIZE)+1
final_score = None
decision = None
for step in tqdm(range(num_list)):
img, img_name = dataset_inference.nextit()
img_name = img_name.numpy().tolist()
dmap_score, p = self._test_graph(img)
dmap_score, p = dmap_score.numpy(), p.numpy()
for idx, _ in enumerate(img_name):
img_name_cur = img_name[idx].decode('UTF-8')
img_idx = img_name_cur.split('/')[-1].replace('.png','')
img_idx = int(img_idx)
final_score = dmap_score[idx] + 0.1*p[idx]
final_score, decision = normalization_score(final_score)
if csv_writer is not None:
csv_writer.writerow([img_name_cur, f"{final_score:.2f}", decision])
if self.config.inference_data_img != None:
print(f"{img_name_cur} is classified as {decision} with the score {final_score:.2f}")
else:
print(f"Results written to {filename}")
self.config.BATCH_SIZE = old_bs
if csv_file is not None:
csv_file.close()
@tf.function
def _test_graph(self, img):
dmap_pred, p, c, n, x, region_map = self.gen(img, training=False)
dmap_score = tf.reduce_mean(dmap_pred[:,:,:,1], axis=[1,2]) - \
tf.reduce_mean(dmap_pred[:,:,:,0], axis=[1,2])
p = tf.reduce_mean(p, axis=[1,2,3])
return dmap_score, p
def main(args):
# Base Configuration Class
config = Config_custom(args)
config.lr = args.lr
config.type = args.type
config.DECAY_STEP = args.decay_step
config.pretrain_folder = args.pretrain_folder
config.desc_str = f'_siwmv2_pro_{args.pro}_unknown_{args.unknown}'
config.root_dir = './log'+config.desc_str
config.exp_dir = '/exp'+config.desc_str
config.CHECKPOINT_DIR = config.root_dir+config.exp_dir
config.tb_dir = './tb_logs'+config.desc_str
config.save_model_dir = f"{args.weight_dir}/save_model"+config.desc_str
config.res_dir = './result'
config.csv_file_name = config.res_dir + '/result'
config.txt_file_name = config.res_dir + '/result'
config.SUMMARY_WRITER = SummaryWriter(config.tb_dir)
if not os.path.exists(config.res_dir):
print('**********************************************************')
print(f"Making results directory: {config.res_dir}")
print('**********************************************************')
os.makedirs(config.res_dir, exist_ok=True)
else:
print('**********************************************************')
print(f"Using results directory: {config.res_dir}")
print('**********************************************************')
stdnet = SRENet(config, args)
stdnet.inference(config)
if __name__ == '__main__':
main(args) | 11,656 | 46.386179 | 127 | py |
null | Multi-domain-learning-FAS-main/source_SiW_Mv2/environment.yml | name: anti_spoofing_siwmv2
channels:
- conda-forge
- defaults
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2022.9.24=ha878542_0
- certifi=2022.9.14=py38h06a4308_0
- dlib=19.24.0=py38he2161a6_0
- jpeg=9e=h166bdaf_1
- ld_impl_linux-64=2.38=h1181459_1
- libblas=3.9.0=15_linux64_openblas
- libcblas=3.9.0=15_linux64_openblas
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgfortran-ng=12.2.0=h69a702a_19
- libgfortran5=12.2.0=h337968e_19
- libgomp=11.2.0=h1234567_1
- liblapack=3.9.0=15_linux64_openblas
- libopenblas=0.3.20=pthreads_h78a6416_0
- libpng=1.6.37=hbc83047_0
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.3=h5eee18b_3
- openssl=1.1.1s=h7f8727e_0
- pip=22.1.2=py38h06a4308_0
- python=3.8.13=h12debd9_0
- python_abi=3.8=2_cp38
- readline=8.1.2=h7f8727e_1
- sqlite=3.39.2=h5082296_0
- tk=8.6.12=h1ccaba5_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.2.5=h7f8727e_1
- zlib=1.2.12=h5eee18b_3
- pip:
- absl-py==1.2.0
- astunparse==1.6.3
- cachetools==5.2.0
- charset-normalizer==2.1.1
- contourpy==1.0.5
- cycler==0.11.0
- face-alignment==1.3.5
- flatbuffers==1.12
- fonttools==4.37.3
- gast==0.4.0
- google-auth==2.11.1
- google-auth-oauthlib==0.4.6
- google-pasta==0.2.0
- grpcio==1.49.1
- h5py==3.7.0
- idna==3.4
- imageio==2.22.0
- importlib-metadata==4.12.0
- joblib==1.2.0
- keras==2.9.0
- keras-preprocessing==1.1.2
- kiwisolver==1.4.4
- libclang==14.0.6
- llvmlite==0.39.1
- markdown==3.4.1
- markupsafe==2.1.1
- matplotlib==3.6.0
- networkx==2.8.6
- numba==0.56.2
- numpy==1.23.3
- oauthlib==3.2.1
- opencv-python==4.6.0.66
- opt-einsum==3.3.0
- packaging==21.3
- pillow==9.2.0
- protobuf==3.19.5
- pyasn1==0.4.8
- pyasn1-modules==0.2.8
- pyparsing==3.0.9
- python-dateutil==2.8.2
- pywavelets==1.4.1
- requests==2.28.1
- requests-oauthlib==1.3.1
- rsa==4.9
- scikit-image==0.19.3
- scikit-learn==1.1.2
- scipy==1.9.1
- setuptools==59.8.0
- six==1.16.0
- tensorboard==2.9.1
- tensorboard-data-server==0.6.1
- tensorboard-plugin-wit==1.8.1
- tensorflow==2.9.1
- tensorflow-addons==0.18.0
- tensorflow-estimator==2.9.0
- tensorflow-io-gcs-filesystem==0.27.0
- termcolor==2.0.1
- tflearn==0.5.0
- threadpoolctl==3.1.0
- tifffile==2022.8.12
- torch==1.12.1
- typing-extensions==4.3.0
- urllib3==1.26.12
- werkzeug==2.2.2
- wrapt==1.14.1
- zipp==3.8.1
prefix: /user/guoxia11/cvlshare/cvl-guoxia11/anaconda3/envs/anti_spoofing
| 2,702 | 24.990385 | 73 | yml |
null | Multi-domain-learning-FAS-main/source_SiW_Mv2/test.py | # -*- coding: utf-8 -*-
# Copyright 2022
#
# Multi-domain Learning for Updating Face Anti-spoofing Models (ECCV 2022)
# Xiao Guo, Yaojie Liu, Anil Jain, and Xiaoming Liu
#
# All Rights Reserved.s
#
# This research is based upon work supported by the Office of the Director of
# National Intelligence (ODNI), Intelligence Advanced Research Projects Activity
# (IARPA), via IARPA R&D Contract No. 2017-17020200004. The views and
# conclusions contained herein are those of the authors and should not be
# interpreted as necessarily representing the official policies or endorsements,
# either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The
# U.S. Government is authorized to reproduce and distribute reprints for
# Governmental purposes not withstanding any copyright annotation thereon.
# ==============================================================================
import tensorflow as tf
import argparse
import os
import time
import math
import numpy as np
import csv
from model import Generator, Discriminator, region_estimator
from utils import l1_loss, l2_loss, Logging
from dataset import Dataset
from config_siwm import Config_siwm
from tensorboardX import SummaryWriter
from tqdm import tqdm
class SRENet(object):
"""
the SRENet class.
Attributes:
-----------
configurations: config, config_siw, and config_oulu.
modules: gen_pretrained, gen, RE, multi-disc and optimizers.
various directories for checkpoints.
log: log handler.
Methods:
-----------
basic functions: update_lr, _restore, _save.
optimization functions: train and train_step.
"""
def __init__(self, config):
self.config = config
self.lr = config.lr
self.bs = config.BATCH_SIZE
self.SUMMARY_WRITER = config.SUMMARY_WRITER
## The modules:
self.gen_pretrained = Generator()
self.RE = region_estimator()
self.gen = Generator(self.RE)
self.disc1 = Discriminator(1,config.n_layer_D)
self.disc2 = Discriminator(2,config.n_layer_D)
self.disc3 = Discriminator(4,config.n_layer_D)
self.gen_opt = tf.keras.optimizers.Adam(self.lr)
# Checkpoint initialization.
self.save_dir = config.save_model_dir
self.checkpoint_path_g = self.save_dir+"/gen/cp-{epoch:04d}.ckpt"
self.checkpoint_path_re = self.save_dir+"/ReE/cp-{epoch:04d}.ckpt"
self.checkpoint_path_d1 = self.save_dir+"/dis1/cp-{epoch:04d}.ckpt"
self.checkpoint_path_d2 = self.save_dir+"/dis2/cp-{epoch:04d}.ckpt"
self.checkpoint_path_d3 = self.save_dir+"/dis3/cp-{epoch:04d}.ckpt"
self.checkpoint_path_g_op = self.save_dir+"/g_opt/cp-{epoch:04d}.ckpt"
self.checkpoint_dir_g = os.path.dirname(self.checkpoint_path_g)
self.checkpoint_dir_re = os.path.dirname(self.checkpoint_path_re)
self.checkpoint_dir_d1 = os.path.dirname(self.checkpoint_path_d1)
self.checkpoint_dir_d2 = os.path.dirname(self.checkpoint_path_d2)
self.checkpoint_dir_d3 = os.path.dirname(self.checkpoint_path_d3)
self.checkpoint_dir_g_op = os.path.dirname(self.checkpoint_path_g_op)
self.model_list = [self.gen, self.RE, self.disc1, self.disc2, self.disc3]
self.model_p_list= [self.checkpoint_path_g,
self.checkpoint_path_re,
self.checkpoint_path_d1,
self.checkpoint_path_d2,
self.checkpoint_path_d3]
self.model_d_list= [self.checkpoint_dir_g,
self.checkpoint_dir_re,
self.checkpoint_dir_d1,
self.checkpoint_dir_d2,
self.checkpoint_dir_d3]
# Log class for displaying the losses.
self.log = Logging(config)
self.csv_file = open(self.config.csv_file_name, mode='w')
#############################################################################
def inference(self, config):
'''the main inference entrance.'''
## setup the csv handler.
self.csv_writer = csv.writer(self.csv_file, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
self.csv_writer.writerow(['Video name', 'Dataset', 'Depth', 'Region', \
'Content', 'Additive', 'Label', 'test_mode'])
## loading the target epoch weight.
self.log.epoch = self.config.epoch_eval
for model_, model_dir_ in zip(self.model_list, self.model_d_list):
current_checkpoint = model_dir_ + f"/cp-{self.config.epoch_eval:04d}.ckpt"
model_.load_weights(current_checkpoint).expect_partial()
print("*********************************************************")
print(f"loading weights from {current_checkpoint}.")
print("*********************************************************")
## inference.
start = time.time()
test_mode = 'test_A'
update_list = self.test_step(test_mode)
assert len(update_list[-1]) == len(update_list[0]), print("Their length should match.")
print('\n*****Time for epoch {} is {} sec*****'.format(self.config.epoch_eval+1,
int(time.time()-start)))
self.csv_file.close()
self.SUMMARY_WRITER.close()
print("...Execution Over...")
def test_step(self, test_mode, viz_mode=False):
"""gathers results from three datasets."""
result_update = []
result_siwm = self.test_step_helper(self.config, test_mode, viz_mode, prefix_dataset='SIWM')
return result_siwm
def test_step_helper(self, config_cur, test_mode, viz_mode=False, prefix_dataset=None):
tmp_bs = config_cur.BATCH_SIZE
config_cur.BATCH_SIZE = 64
dataset_test = Dataset(config_cur, test_mode+'_csv')
img_num = len(dataset_test.name_list)
print(f"The total inference image number is: {img_num}.")
num_step = int(img_num/config_cur.BATCH_SIZE)
d_score, p_score, c_score, n_score, final_score, label_lst = [],[],[],[],[],[]
for step in tqdm(range(num_step)):
self.log.step = step
img, img_name = dataset_test.nextit()
img_name = img_name.numpy().tolist()
d, p, c, n, figs = self._test_graph(img)
d, p, c, n = d.numpy(), p.numpy(), c.numpy(), n.numpy()
d_score.extend(d)
p_score.extend(p)
c_score.extend(c)
n_score.extend(n)
self.log.save(figs, training=False)
for i in range(config_cur.BATCH_SIZE):
img_name_cur = img_name[i].decode('UTF-8')
if ("Live" in img_name_cur) or ("live" in img_name_cur):
label_cur = 0
else:
label_cur = 1
label_lst.append(label_cur)
final_score.append(d[i]+p[i])
self.csv_writer.writerow([img_name_cur, prefix_dataset, d[i], p[i],
c[i], n[i], label_cur, test_mode])
self.csv_file.flush()
config_cur.BATCH_SIZE = tmp_bs
return d_score, p_score, c_score, n_score, final_score, label_lst
@tf.function
def _test_graph(self, img):
"""
model outputs the result.
dmap_pred, p_area, c, n are depth, region, content, and additive traces.
"""
dmap_pred, p_area, c, n, x, region_map = self.gen(img, training=False)
dmap_pred = tf.concat([dmap_pred,
tf.zeros([dmap_pred.get_shape()[0], 32, 32, 1])],
axis=3)
converted_gray = tf.image.rgb_to_grayscale(img)
figs = [img, tf.abs(p_area), tf.abs(region_map), dmap_pred]
d = tf.reduce_mean(dmap_pred[:,:,:,1], axis=[1,2]) - \
tf.reduce_mean(dmap_pred[:,:,:,0], axis=[1,2])
p = tf.reduce_mean(p_area, axis=[1,2,3])
c = tf.reduce_mean(c, axis=[1,2,3])
n = tf.reduce_mean(n, axis=[1,2,3])
return d, p, c, n, figs
def main(args):
# Base Configuration Class
config = Config_siwm(args)
config.lr = args.lr
config.type = args.type
config.pretrain_folder = args.pretrain_folder
config.DECAY_STEP = args.decay_step
config.pretrain_folder = args.pretrain_folder
config.desc_str = f'_siwmv2_pro_{args.pro}_unknown_{args.unknown}'
config.root_dir = './log'+config.desc_str
config.exp_dir = '/exp'+config.desc_str
config.CHECKPOINT_DIR = config.root_dir+config.exp_dir
config.tb_dir = './tb_logs'+config.desc_str
config.save_model_dir = "./save_model"+config.desc_str
config.csv_file_name = config.root_dir+'/res_'+str(config.epoch_eval)+'.csv'
config.SUMMARY_WRITER = SummaryWriter(config.tb_dir)
print('**********************************************************')
config.compile()
print('**********************************************************')
srenet = SRENet(config)
srenet.inference(config)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## exp protocol setup.
parser.add_argument('--stage', type=str, default='ft', choices=['ft','pretrain','ub'])
parser.add_argument('--type', type=str, default='spoof', choices=['spoof','age','race','illu'])
parser.add_argument('--set', type=str, default='all', help='To choose from the predefined 14 types.')
parser.add_argument('--data', type=str, default='all', choices=['all','SiW','SiWM','oulu'])
parser.add_argument('--pretrain_folder', type=str, default='./pre_trained/', help='Pretrain weight.')
parser.add_argument('--pro', type=int, default=1, help='Protocol number.')
parser.add_argument('--unknown', type=str, default='Ob', help='The unknown spoof type.')
## train hyper-parameters.
parser.add_argument('--epoch', type=int, default=50, help='How many epochs to train the model.')
parser.add_argument('--lr', type=float, default=1e-4, help='The starting learning rate.')
parser.add_argument('--batch_size', type=int, default=6, help='Batch size.')
parser.add_argument('--decay_step', type=int, default=3, help='The learning rate decay step.')
parser.add_argument('--cuda', type=int, default=3, help='The gpu num to use.')
parser.add_argument('--debug_mode', type=str, default='True', choices=['True', "False"],
help='Deprecated function.')
## inference
parser.add_argument('--epoch_eval', type=int, default=49, help='Which epoch to eval.')
parser.add_argument('--dir', type=str, default=None, help='the inference image folder.')
parser.add_argument('--img', type=str, default='fake.png', help='the inference image.')
args = parser.parse_args()
main(args) | 9,685 | 41.296943 | 102 | py |
null | Multi-domain-learning-FAS-main/source_SiW_Mv2/preprocessing.py | # -*- coding: utf-8 -*-
# Copyright 2022
#
# Multi-domain Learning for Updating Face Anti-spoofing Models (ECCV 2022)
# Xiao Guo, Yaojie Liu, Anil Jain, and Xiaoming Liu
#
# All Rights Reserved.s
#
# This research is based upon work supported by the Office of the Director of
# National Intelligence (ODNI), Intelligence Advanced Research Projects Activity
# (IARPA), via IARPA R&D Contract No. 2017-17020200004. The views and
# conclusions contained herein are those of the authors and should not be
# interpreted as necessarily representing the official policies or endorsements,
# either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The
# U.S. Government is authorized to reproduce and distribute reprints for
# Governmental purposes not withstanding any copyright annotation thereon.
# ==============================================================================
import face_alignment
import glob
import os
import cv2
import numpy as np
from PIL import Image
from tqdm import tqdm
from skimage import io
import face_alignment
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False)
def video_process(vlist, folder_dir='./test/'):
for vd in tqdm(vlist):
folder_name = vd.split('/')[-1].split('.')[0]
folder = os.path.join(folder_dir, folder_name)
if not os.path.exists(folder):
os.makedirs(folder)
cap = cv2.VideoCapture(vd)
fr = 1
while(cap.isOpened()):
ret, frame = cap.read()
if ret == True:
frame = Image.fromarray(frame)
width, height = frame.size
scale = 1.0
if max(width, height) > 800:
scale = 800.0 / max(width, height)
detect_frame = frame.resize((int(width*scale),int(height*scale)), Image.Resampling.BICUBIC)
detect_frame = np.array(detect_frame)
else:
detect_frame = np.array(frame)
scale = 1/scale
preds = fa.get_landmarks(detect_frame)
frame = np.array(frame)
if frame is None:
# This shouldn't happen as we should only be here when ret == True
raise Exception("Failed to read source video")
frame_shape = frame.shape
if preds is None:
print(f'No Face found at frame #{fr} of {vd} (frame # won\'t be incremented)')
continue
else:
pred = (preds[0] * [scale, scale]).astype(int)
if len(preds) > 1:
biggest_eye2eye_dis = -100
for test_pred in preds:
test_pred = (test_pred * [scale, scale]).astype(int)
eye2eye_dis = np.sqrt(np.sum(np.square(
np.abs(test_pred[36, :] - test_pred[45, :])
))) / 2
if eye2eye_dis > biggest_eye2eye_dis:
pred = test_pred
biggest_eye2eye_dis = eye2eye_dis
eye2eye_dis = np.sqrt(np.sum(np.square(
np.abs(pred[36, :] - pred[45, :])
))) / 2
nose_len = np.sqrt(np.sum(np.square(
np.abs(pred[27, :] - pred[30, :])
))) / 2
face_len = np.sqrt(np.sum(np.square(
np.abs(pred[27, :] - pred[8, :])
))) / 2
if face_len == 0.0:
nose_face_ratio = 1.0 # Chin is on nose, ie. spoof
else:
nose_face_ratio = nose_len / face_len
eye_center = (pred[36, :] + pred[45, :]) / 2
xl = int(eye_center[0] - eye2eye_dis * 2.3)
xr = int(eye_center[0] + eye2eye_dis * 2.3)
yt = int(eye_center[1] - eye2eye_dis * 1.6)
yb = int(eye_center[1] + eye2eye_dis * 3.0)
if xl < 0 or yt < 0 or xr >= frame.shape[1] or yb >= frame.shape[0]:
(xl_pad, xr_pad, yt_pad, yb_pad) = (0,0,0,0)
if xl < 0:
xl_pad = abs(xl)
if yt < 0:
yt_pad = abs(yt)
if xr > (frame.shape[1] - 1):
xr_pad = xr - frame.shape[1] + 1
if yb > (frame.shape[0] - 1):
yb_pad = yb - frame.shape[0] + 1
large_fr = np.zeros((yt_pad + yb_pad + frame.shape[0],
xl_pad + xr_pad + frame.shape[1],
3))
large_fr[yt_pad:yt_pad + frame.shape[0],
xl_pad:xl_pad + frame.shape[1],
:] = frame
xl += xl_pad
xr += xl_pad
yt += yt_pad
yb += yt_pad
face = large_fr[yt:yb, xl:xr, :]
else:
face = frame[yt:yb, xl:xr, :]
x_scale = float(256) / float(xr-xl)
y_scale = float(256) / float(yb-yt)
pred[:, 0] = pred[:, 0] - int(eye_center[0] - eye2eye_dis * 2.3)
pred[:, 1] = pred[:, 1] - int(eye_center[1] - eye2eye_dis * 1.6)
face = Image.fromarray(face.astype(np.uint8)).resize((256, 256), Image.Resampling.BICUBIC)
pred = (pred * [x_scale,y_scale]).astype(int)
face_numpy_array = np.array(face)
img_rgb = face_numpy_array
fname = folder + '/' + str(fr) + '.png'
lmname = folder + '/' + str(fr) + '.npy'
cv2.imwrite(fname, img_rgb)
np.save(lmname, pred)
fr += 1
else:
break
if __name__ == "__main__":
file_path = './sample_video/live/*.mov'
video_list = glob.glob(file_path)
video_process(video_list, folder_dir='./demo/live/')
file_path = './sample_video/spoof/*.mov'
video_list = glob.glob(file_path)
video_process(video_list, folder_dir='./demo/spoof/') | 6,305 | 42.191781 | 111 | py |
null | Multi-domain-learning-FAS-main/source_SiW_Mv2/run.sh | source ~/.bashrc
conda activate anti_spoofing
CUDA_NUM=0
python train.py --cuda=$CUDA_NUM --pro=1
python test.py --cuda=$CUDA_NUM --pro=1
python train.py --cuda=$CUDA_NUM --pro=2 --unknown=Co
python test.py --cuda=$CUDA_NUM --pro=2 --unknown=Co
python train.py --cuda=$CUDA_NUM --pro=2 --unknown=Eye
python test.py --cuda=$CUDA_NUM --pro=2 --unknown=Eye
python train.py --cuda=$CUDA_NUM --pro=2 --unknown=Funnyeye
python test.py --cuda=$CUDA_NUM --pro=2 --unknown=Funnyeye
python train.py --cuda=$CUDA_NUM --pro=2 --unknown=Half
python test.py --cuda=$CUDA_NUM --pro=2 --unknown=Half
python train.py --cuda=$CUDA_NUM --pro=2 --unknown=Im
python test.py --cuda=$CUDA_NUM --pro=2 --unknown=Im
python train.py --cuda=$CUDA_NUM --pro=2 --unknown=Mann
python test.py --cuda=$CUDA_NUM --pro=2 --unknown=Mann
python train.py --cuda=$CUDA_NUM --pro=2 --unknown=Mouth
python test.py --cuda=$CUDA_NUM --pro=2 --unknown=Mouth
python train.py --cuda=$CUDA_NUM --pro=3
python test.py --cuda=$CUDA_NUM --pro=3 | 995 | 46.428571 | 59 | sh |
null | Multi-domain-learning-FAS-main/source_SiW_Mv2/utils.py | # -*- coding: utf-8 -*-
# Copyright 2022
#
# Multi-domain Learning for Updating Face Anti-spoofing Models (ECCV 2022)
# Xiao Guo, Yaojie Liu, Anil Jain, and Xiaoming Liu
#
# All Rights Reserved.s
#
# This research is based upon work supported by the Office of the Director of
# National Intelligence (ODNI), Intelligence Advanced Research Projects Activity
# (IARPA), via IARPA R&D Contract No. 2017-17020200004. The views and
# conclusions contained herein are those of the authors and should not be
# interpreted as necessarily representing the official policies or endorsements,
# either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The
# U.S. Government is authorized to reproduce and distribute reprints for
# Governmental purposes not withstanding any copyright annotation thereon.
# ==============================================================================
from skimage.draw import line_aa
import cv2
import tensorflow as tf
import sys
import glob
import random
import numpy as np
import math as m
import tensorflow.keras.layers as layers
import matplotlib.tri as mtri
from scipy import ndimage, misc
from PIL import Image, ImageDraw
import face_alignment
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False)
class Logging(object):
def __init__(self, config):
self.config = config
self.losses = {}
self.losses_val = {}
self.txt = ''
self.fig = []
self.fig_val = []
def update(self, losses, training):
if training:
for name in losses.keys():
if name in self.losses:
current_loss = self.losses[name]
self.losses[name] = [current_loss[0]+losses[name], current_loss[1]+1]
else:
self.losses[name] = [losses[name], 1]
else:
for name in losses.keys():
if name in self.losses_val:
current_loss = self.losses_val[name]
self.losses_val[name] = [current_loss[0]+losses[name].numpy(), current_loss[1]+1]
else:
self.losses_val[name] = [losses[name].numpy(), 1]
def display(self, losses, epoch, step, training, allstep):
self.update(losses, training)
if training:
text = 'Epoch (Train) '+str(epoch+1)+'-'+str(step+1)+'/'+str(allstep) + ': '
for _name in self.losses.keys():
value = self.losses[_name]
text += _name+':'+"{:.3g}".format(value[0]/value[1])+', '
else:
text = 'Epoch ( Val ) '+str(epoch+1)+'-'+str(step+1)+'/'+str(allstep) + ': '
for _name in self.losses_val.keys():
value = self.losses_val[_name]
text += _name+':'+"{:.3g}".format(value[0]/value[1])+', '
text = text[:-2]+' '
print(text, end='\r')
self.txt = text
self.epoch = epoch
self.step = step
def display_metric(self, message):
config = self.config
print(message, end='\r')
file_object = open(config.CHECKPOINT_DIR+'/log.txt', 'a')
file_object.write(message+'\n')
file_object.close()
def save(self, fig, training, idx=0):
config = self.config
step = self.step
if training:
if step % config.IMG_LOG_FR == 0:
fig = self.get_figures(fig)
fname = config.CHECKPOINT_DIR + '/epoch-' + str(self.epoch+1) + '-Train-' + str(self.step+1) + '.png'
cv2.imwrite(fname, fig.numpy())
if step % config.TXT_LOG_FR == 0:
file_object = open(config.CHECKPOINT_DIR+'/log.txt', 'a')
file_object.write(self.txt+'\n')
file_object.close()
else:
if step % (config.IMG_LOG_FR//100) == 0:
fig = self.get_figures(fig)
fname = config.CHECKPOINT_DIR + '/epoch-' + str(self.epoch+1) + '-Val-' + str(self.step+1) + '_' + str(idx) + '_' + '.png'
cv2.imwrite(fname, fig.numpy())
if step % (config.TXT_LOG_FR//100) == 0:
file_object = open(config.CHECKPOINT_DIR+'/log.txt', 'a')
file_object.write(self.txt+'\n')
file_object.close()
self.fig = []
self.fig_val = []
def save_img(self, fig, fname):
config = self.config
step = self.step
fig = self.get_imgs(fig,256)
fname = config.CHECKPOINT_DIR+'/test/'+fname.split('/')[-1].split('.')[0]+'-result.png'
cv2.imwrite(fname, fig.numpy())
self.fig = []
self.fig_val = []
def reset(self):
losses = {}
losses_val = {}
ind = 0
for _name in self.loss_names:
self.losses[_name] = [0, 0]
self.losses_val[_name] = [0, 0]
ind += 1
self.txt = ''
self.img = 0
def get_imgs(self, fig, size=None):
config = self.config
column = []
for _img in fig:
_img = tf.clip_by_value(_img, 0.0, 1.0)*255
if _img.shape[3] == 1:
_img = tf.concat([_img, _img, _img], axis=3)
else:
r, g, b = tf.split(_img[:,:,:,:3], 3, 3)
_img = tf.concat([b,g,r], 3)
if size is None:
_img = tf.image.resize(_img, [config.FIG_SIZE, config.FIG_SIZE])
else:
_img = tf.image.resize(_img, [config.IMG_SIZE, config.IMG_SIZE])
column.append(_img[0,:,:,:])
column = tf.concat(column, axis=0)
return column
def get_figures(self, fig, size=None):
config = self.config
column = []
for _img in fig:
_img = tf.clip_by_value(_img, 0.0, 1.0)*255
if _img.shape[3] == 1:
_img = tf.concat([_img, _img, _img], axis=3)
else:
r, g, b = tf.split(_img[:,:,:,:3], 3, 3)
_img = tf.concat([b,g,r], 3)
if size is None:
_img = tf.image.resize(_img, [config.FIG_SIZE, config.FIG_SIZE])
else:
_img = tf.image.resize(_img, [config.IMG_SIZE, config.IMG_SIZE])
_row = tf.split(_img, _img.shape[0])
_row = tf.concat(_row, axis=2)
column.append(_row[0,:,:,:])
column = tf.concat(column, axis=0)
return column
def l1_loss(x, y, mask=None):
xshape = x.shape
if mask is not None:
loss = tf.math.reduce_sum(tf.abs(x-y) * mask) / (tf.reduce_sum(mask) + 1e-6) / x.shape[3]
else:
loss = tf.math.reduce_mean(tf.abs(x-y))
return loss
def l2_loss(x, y, mask=None):
xshape = x.shape
if mask is not None:
loss = tf.math.reduce_sum(tf.square(tf.subtract(x, y)) * mask) / (tf.reduce_sum(mask) + 1e-6) / x.shape[3]
else:
loss = tf.math.reduce_mean(tf.square(tf.subtract(x, y)))
return loss
def hinge_loss(y_pred, y_true, mask=None):
return tf.math.reduce_mean(tf.math.maximum(0., 1. - y_true*y_pred))
def generate_face_region(source, img_size):
morelm = np.copy(source[0:17,:])
morelm[:,1] = morelm[0,1] - (morelm[:,1] - morelm[0,1]) * 0.8
source = np.concatenate([source,morelm],axis=0)
'''
img = Image.new('L', (img_size, img_size), 0)
ImageDraw.Draw(img).polygon(source, outline=1, fill=1)
mask = np.array(img)
mask = cv2.GaussianBlur(mask,(5,5),0).reshape([img_size,img_size,1])
'''
xi, yi = np.meshgrid(np.linspace(0, 1, img_size), np.linspace(0, 1, img_size))
# interp2d
_triang = mtri.Triangulation(source[:,0], source[:,1])
_interpx = mtri.LinearTriInterpolator(_triang, source[:,0])
_offsetmapx = _interpx(xi, yi)
offsetmap = np.stack([_offsetmapx], axis=2)
offsetmap = np.nan_to_num(offsetmap)
offsetmap = np.asarray(offsetmap>0,np.float32)
offsetmap = cv2.GaussianBlur(offsetmap,(5,5),0).reshape([img_size,img_size,1])
return offsetmap
def generate_landmark_map(landmark, img_size):
lmlist = [[1,2],[2,3],[3,4],[4,5],[5,6],[6,7],[7,8],[8,9],[9,10],[10,11],[11,12],[12,13],[13,14],[14,15],[15,16],[16,17],
[18,19],[19,20],[20,21],[21,22],[23,24],[24,25],[25,26],[26,27],
[37,38],[38,39],[39,40],[40,41],[41,42],[42,37],[43,44],[44,45],[45,46],[46,47],[47,48],[48,43],
[28,29],[29,30],[30,31],[32,33],[33,34],[34,35],[35,36],
[49,50],[50,51],[51,52],[52,53],[53,54],[54,55],[55,56],[56,57],[57,58],[58,59],[59,60],[60,49],
[61,62],[62,63],[63,64],[64,65],[65,66],[66,67],[67,68],[68,61]]
lm_map = []
img = np.zeros((img_size, img_size), dtype=np.uint8)
lm = landmark*img_size
for pr in lmlist:
lm_start = lm[pr[0]-1,:].astype(np.int32)
lm_end = lm[pr[1]-1,:].astype(np.int32)
rr, cc, val = line_aa(lm_start[0], lm_start[1], lm_end[0], lm_end[1])
templist = [t for t in range(len(rr)) if rr[t] < img_size and rr[t] > 0 ]
rr = rr[templist]
cc = cc[templist]
val = val[templist]
templist = [t for t in range(len(cc)) if cc[t] < img_size and cc[t] > 0 ]
rr = rr[templist]
cc = cc[templist]
val = val[templist]
img[cc, rr] = val * 255
blur = cv2.GaussianBlur(img,(3,3),0)
blur = blur / np.amax(blur) * 255
lm_map = np.reshape(blur, [blur.shape[0], blur.shape[1], 1])
return lm_map
def face_crop_and_resize(img, lm, fsize, box_perturb=[1.15, 1.25], aug=False):
## visualize this function.
img_shape = img.shape
lm_reverse_list = np.array([17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,
27,26,25,24,23,22,21,20,19,18,
28,29,30,31,36,35,34,33,32,
46,45,44,43,48,47,40,39,38,37,42,41,
55,54,53,52,51,50,49,60,59,58,57,56,65,64,63,62,61,68,67,66],np.int32) -1
if aug and lm.shape[0] == 68 and random.uniform(0,1)>0.5:
img = cv2.flip(img, 1) # horizontal reverse.
lm[:,0] = img_shape[1] - lm[:,0]
lm = lm[lm_reverse_list,:]
# center and length of the landmarks.
center = [(np.min(lm[:,0])+np.max(lm[:,0]))/2, (np.min(lm[:,1])+np.max(lm[:,1]))/2]
length = np.max([(np.max(lm[:,0])-np.min(lm[:,0]))/2, (np.max(lm[:,1])-np.min(lm[:,1]))/2]) * 1.1
if aug: # if aug, change the center and length.
center[0] = center[0] + random.uniform(-0.05,0.05)*length
center[1] = center[1] + random.uniform(-0.05,0.05)*length
length = length * random.uniform(0.93,1.07)
## cropping the image.
box = [int(center[0])-int(length),
int(center[1])-int(length*1.2),
int(center[0])+int(length),
int(center[1])+int(length)+int(length)-int(length*1.2)]
box_m = [img_shape[1] - box[2],
box[1],
img_shape[1] - box[0],
box[3]]
lm[:,0] = lm[:,0] - box[0]
lm[:,1] = lm[:,1] - box[1]
preset_x = 0
preset_y = 0
if box[0] < 0 or box[2] > img_shape[1]:
preset_x = max(-box[0], box[2] - img_shape[1])
if box[1] < 0 or box[3] > img_shape[0]:
preset_y = max(-box[1], box[3] - img_shape[0])
if preset_x > 0 or preset_y > 0:
img_large= np.zeros((img_shape[0]+preset_y+preset_y+2,img_shape[1]+preset_x+preset_x+2,img_shape[2]))
img_large[preset_y:preset_y+int(img_shape[0]),preset_x:preset_x+int(img_shape[1]),:] = img
img = img_large
box[0] = box[0] + preset_x
box[1] = box[1] + preset_y
box[2] = box[2] + preset_x
box[3] = box[3] + preset_y
img = img[box[1]:box[3],box[0]:box[2],:]
sz = img.shape[0]
if img.shape[0] == img.shape[1] and img.shape[0]>0:
img = cv2.resize(img, (fsize,fsize))
else:
img = np.zeros((fsize, fsize, img.shape[2]))
return img, lm/(length*2)
def file_reader(filename):
with open(filename, 'r') as f:
filenames_list = f.read().split('\n')
return filenames_list
def image_process(image_name):
'''
process the input image and generate .npy file for the landmarks.
'''
frame = cv2.imread(image_name)
frame = Image.fromarray(frame)
width, height = frame.size
scale = 1.0
if max(width, height) > 800:
scale = 800.0 / max(width, height)
detect_frame = frame.resize((int(width*scale),int(height*scale)), Image.BICUBIC)
detect_frame = np.array(detect_frame)
else:
detect_frame = np.array(frame)
scale = 1/scale
preds = fa.get_landmarks(detect_frame)
frame = np.array(frame)
frame_shape = frame.shape
if preds is None:
print('No Face!')
return np.ones((256, 256, 3)).astype(np.uint8), np.ones((68, 2)).astype(np.float32)
else:
pred = (preds[0] * [scale, scale]).astype(int)
if len(preds) > 1:
biggest_eye2eye_dis = -100
for test_pred in preds:
test_pred = (test_pred * [scale, scale]).astype(int)
eye2eye_dis = np.sqrt(np.sum(np.square(
np.abs(test_pred[36, :] - test_pred[45, :])
))) / 2
if eye2eye_dis > biggest_eye2eye_dis:
pred = test_pred
biggest_eye2eye_dis = eye2eye_dis
eye2eye_dis = np.sqrt(np.sum(np.square(
np.abs(pred[36, :] - pred[45, :])
))) / 2
nose_len = np.sqrt(np.sum(np.square(
np.abs(pred[27, :] - pred[30, :])
))) / 2
face_len = np.sqrt(np.sum(np.square(
np.abs(pred[27, :] - pred[8, :])
))) / 2
if face_len == 0.0:
nose_face_ratio = 1.0 # Chin is on nose, ie. spoof
else:
nose_face_ratio = nose_len / face_len
eye_center = (pred[36, :] + pred[45, :]) / 2
xl = int(eye_center[0] - eye2eye_dis * 2.3)
xr = int(eye_center[0] + eye2eye_dis * 2.3)
yt = int(eye_center[1] - eye2eye_dis * 1.6)
yb = int(eye_center[1] + eye2eye_dis * 3.0)
if xl < 0 or yt < 0 or xr >= frame.shape[1] or yb >= frame.shape[0]:
(xl_pad, xr_pad, yt_pad, yb_pad) = (0,0,0,0)
if xl < 0:
xl_pad = abs(xl)
if yt < 0:
yt_pad = abs(yt)
if xr > (frame.shape[1] - 1):
xr_pad = xr - frame.shape[1] + 1
if yb > (frame.shape[0] - 1):
yb_pad = yb - frame.shape[0] + 1
large_fr = np.zeros((yt_pad + yb_pad + frame.shape[0],
xl_pad + xr_pad + frame.shape[1],
3))
large_fr[yt_pad:yt_pad + frame.shape[0],
xl_pad:xl_pad + frame.shape[1],
:] = frame
xl += xl_pad
xr += xl_pad
yt += yt_pad
yb += yt_pad
face = large_fr[yt:yb, xl:xr, :]
else:
face = frame[yt:yb, xl:xr, :]
x_scale = float(256) / float(xr-xl)
y_scale = float(256) / float(yb-yt)
pred[:, 0] = pred[:, 0] - int(eye_center[0] - eye2eye_dis * 2.3)
pred[:, 1] = pred[:, 1] - int(eye_center[1] - eye2eye_dis * 1.6)
face = Image.fromarray(face.astype(np.uint8)).resize((256, 256), Image.BICUBIC)
pred = (pred * [x_scale,y_scale]).astype(int)
image = cv2.cvtColor(np.array(face), cv2.COLOR_BGR2RGB)
facial_landmark = np.array(pred)
return image, facial_landmark
def normalization_score(score, shift=0.6, scale=1.6, lower=-0.4, upper=0.8):
'''compute the normalization score.'''
nor_score = (score+shift)/scale
if nor_score < 0.32:
if nor_score < 0:
nor_score = 0
return nor_score, 'Live'
else:
if nor_score > 1:
nor_score = 1
return nor_score, 'Spoof' | 14,443 | 34.841191 | 130 | py |
null | Multi-domain-learning-FAS-main/source_SiW_Mv2/model.py | # -*- coding: utf-8 -*-
# Copyright 2022
#
# Multi-domain Learning for Updating Face Anti-spoofing Models (ECCV 2022)
# Xiao Guo, Yaojie Liu, Anil Jain, and Xiaoming Liu
#
# All Rights Reserved.s
#
# This research is based upon work supported by the Office of the Director of
# National Intelligence (ODNI), Intelligence Advanced Research Projects Activity
# (IARPA), via IARPA R&D Contract No. 2017-17020200004. The views and
# conclusions contained herein are those of the authors and should not be
# interpreted as necessarily representing the official policies or endorsements,
# either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The
# U.S. Government is authorized to reproduce and distribute reprints for
# Governmental purposes not withstanding any copyright annotation thereon.
# ==============================================================================
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow.keras import layers
from warp import tf_batch_map_offsets
class Conv(layers.Layer):
def __init__(self, ch=32, ksize=3, stride=1, norm='batch', nl=True, dropout=False, name=None):
super(Conv, self).__init__()
self.norm = norm
self.conv = layers.Conv2D(ch, (ksize, ksize), strides=(stride, stride), padding='same',name=name)
if norm == 'batch':
# conv + bn
self.bnorm = layers.BatchNormalization()
else:
self.bnorm = None
if norm == 'spec':
# conv + sn
self.conv = tfa.layers.SpectralNormalization(self.conv)
# relu
if nl:
self.relu = layers.LeakyReLU()
else:
self.relu = None
# dropout
if dropout:
self.drop = layers.Dropout(0.3)
else:
self.drop = None
def call(self, x, training):
x = self.conv(x)
if self.bnorm:
x = self.bnorm(x, training)
if self.relu:
x = self.relu(x)
if self.drop:
x = self.drop(x)
return x
class ConvT(layers.Layer):
def __init__(self, ch=32, ksize=3, stride=2, norm='batch', nl=True, dropout=False):
super(ConvT, self).__init__()
self.norm = norm
self.conv = layers.Conv2DTranspose(ch, (ksize, ksize), strides=(stride, stride), padding='same')
if norm == 'batch': # conv + bn
self.bnorm = layers.BatchNormalization()
else:
self.bnorm = None
if norm == 'spec': # conv + sn
self.conv = tfa.layers.SpectralNormalization(self.conv)
if nl: # relu
self.relu = layers.LeakyReLU()
else:
self.relu = None
if dropout: # dropout
self.drop = layers.Dropout(0.3)
else:
self.drop = None
def call(self, x, training):
x = self.conv(x)
if self.bnorm:
x = self.bnorm(x, training)
if self.relu:
x = self.relu(x)
if self.drop:
x = self.drop(x)
return x
class SA(layers.Layer):
def __init__(self, ksize=3):
super(SA, self).__init__()
self.conv1 = Conv(1, ksize=ksize, name='conv')
def call(self, x, training):
xmean = tf.reduce_mean(x, axis=3, keepdims=True)
xmax = tf.reduce_max(x, axis=3, keepdims=True)
xmeanmax = tf.concat([xmean, xmax], axis=3)
y = self.conv1(xmeanmax, training)
return x*tf.sigmoid(y)
class region_estimator(tf.keras.Model):
def __init__(self):
super(region_estimator, self).__init__()
self.up1 = ConvT(64)
self.up2 = ConvT(40)
self.up3 = ConvT(40)
self.up4 = ConvT(40)
self.conv_map1 = Conv(1, ksize=7, norm=False, nl=False)
def call(self, feature, training):
x = self.up1(feature[3])
x = self.up2(tf.concat([x, feature[2]], axis=3), training)
x = self.up3(tf.concat([x, feature[1]], axis=3), training)
x = self.up4(tf.concat([x, feature[0]], axis=3), training)
output_f = self.conv_map1(x)
x = tf.nn.sigmoid(output_f)
return x
class Generator(tf.keras.Model):
def __init__(self, Region_E=None):
super(Generator, self).__init__()
self.RE = Region_E
n_ch = [32,40,64,96,128,192,256]
self.n_ch = n_ch
self.conv0 = Conv(n_ch[0], ksize=7, name='conv0')
self.conv1 = Conv(n_ch[1], name='conv1')
self.conv2 = Conv(n_ch[1], name='conv2')
self.conv3 = Conv(n_ch[1], name='conv3')
self.conv4 = Conv(n_ch[1], name='conv4')
self.conv5 = Conv(n_ch[1], name='conv5')
self.conv6 = Conv(n_ch[1], name='conv6')
self.conv7 = Conv(n_ch[1], name='conv7')
self.conv8 = Conv(n_ch[1], name='conv8')
self.conv9 = Conv(1, ksize=7, norm=False, nl=False, name='conv9')
self.conv10 = Conv(3, ksize=7, norm=False, nl=False, name='conv10')
self.conv11 = Conv(n_ch[4], name='conv11')
self.conv12 = Conv(2, ksize=7, norm=False, nl=False, name='conv12')
self.conv13 = Conv(3, ksize=7, norm=False, nl=False, name='conv13')
self.up1 = ConvT(n_ch[2])
self.up2 = ConvT(n_ch[1])
self.up3 = ConvT(n_ch[1])
self.up4 = ConvT(n_ch[1])
self.up5 = ConvT(n_ch[2])
self.up6 = ConvT(n_ch[1])
self.up7 = ConvT(n_ch[1])
self.up8 = ConvT(n_ch[1])
self.up9 = ConvT(n_ch[2])
self.up10 = ConvT(n_ch[1])
self.up11 = ConvT(n_ch[1])
self.up12 = ConvT(n_ch[1])
self.down1 = Conv(n_ch[4], stride=2)
self.down2 = Conv(n_ch[4], stride=2)
self.down3 = Conv(n_ch[4], stride=2)
self.down4 = Conv(n_ch[4], stride=2)
self.sa1 = SA(7)
self.sa2 = SA(5)
self.sa3 = SA(3)
self.sa4 = SA(3)
self.sa5 = SA(7)
self.sa6 = SA(7)
self.pool2 = layers.AveragePooling2D(pool_size=(2, 2), strides=2)
self.pool4 = layers.AveragePooling2D(pool_size=(4, 4), strides=4)
self.pool8 = layers.AveragePooling2D(pool_size=(8, 8), strides=8)
def call(self, img, training):
## GX: image with different scales.
im_128 = tf.image.resize(self.pool2(img), [256, 256])
im_64 = tf.image.resize(self.pool4(img), [256, 256])
im_32 = tf.image.resize(self.pool8(img), [256, 256])
## GX: create some residual images.
imgd1 = img - im_128
imgd2 = im_128 - im_64
imgd3 = im_64 - im_32
imgd4 = im_32
inputs = tf.concat([imgd1*25, imgd2*15, imgd3*8, imgd4], axis=3)
x0 = self.conv0(inputs, training)
x1_1 = tf.concat([x0, self.conv1(x0, training)], axis=3)
x1_2 = tf.concat([x0, x1_1, self.conv2(x1_1, training)],axis=3)
x1_3 = self.down1(x1_2, training)
x2_1 = tf.concat([x1_3, self.conv3(x1_3, training)], axis=3)
x2_2 = tf.concat([x1_3, x2_1, self.conv4(x2_1, training)],axis=3)
x2_3 = self.down2(x2_2, training)
x3_1 = tf.concat([x2_3, self.conv5(x2_3, training)], axis=3)
x3_2 = tf.concat([x2_3, x3_1, self.conv6(x3_1, training)],axis=3)
x3_3 = self.down3(x3_2, training)
x4_1 = tf.concat([x3_3, self.conv7(x3_3, training)], axis=3)
x4_2 = tf.concat([x3_3, x4_1, self.conv8(x4_1, training)],axis=3)
x4_3 = self.down4(x4_2, training)
region_map = self.RE([x1_3,x2_3,x3_3,x4_3], training=training)
x1_3 = 1e-5*x1_3*tf.image.resize(region_map, [128, 128]) + x1_3
x2_3 = 1e-5*x2_3*tf.image.resize(region_map, [64, 64]) + x2_3
x3_3 = 1e-5*x3_3*tf.image.resize(region_map, [32, 32]) + x3_3
x4_3 = 1e-5*x4_3*tf.image.resize(region_map, [16, 16]) + x4_3
# u, w, v are for the p, n, c respectively.
u1 = self.up1(x4_3, training)
u2 = self.up2(tf.concat([u1, x3_3], axis=3), training)
u3 = self.up3(tf.concat([u2, x2_3], axis=3), training)
u4 = self.up4(tf.concat([u3, x1_3], axis=3), training)
w1 = self.up5(x4_3, training)
w2 = self.up6(tf.concat([w1, x3_3], axis=3), training)
w3 = self.up7(tf.concat([w2, x2_3], axis=3), training)
w4 = self.up8(tf.concat([w3, x1_3], axis=3), training)
v1 = self.up9(x4_3, training)
v2 = self.up10(v1, training)
v3 = self.up11(v2, training)
v4 = self.up12(v3, training)
p = tf.nn.sigmoid(self.conv9(u4, training)) # region
n = tf.nn.tanh(self.conv10(w4, training)/3e2) # additive trace.
c = tf.nn.sigmoid(self.conv13(v4, training)) # content
# ShortCut
d1 = tf.image.resize(self.sa1(x1_3),[32,32])
d2 = tf.image.resize(self.sa2(x2_3),[32,32])
d3 = tf.image.resize(self.sa3(x3_3),[32,32])
d4 = tf.image.resize(self.sa4(x4_3),[32,32])
d5 = tf.image.resize(self.sa5(tf.stop_gradient(u4)),[32,32])
d6 = tf.image.resize(self.sa6(tf.stop_gradient(w4)),[32,32])
ds = tf.concat([d1, d2, d3, d4, d5, d6],3)
x4 = self.conv11(ds, training)
dmap = self.conv12(x4, training)
return dmap, p, c, n, [x1_3,x2_3,x3_3,x4_3], region_map
class Discriminator(tf.keras.Model):
def __init__(self, downsize=1, num_layers=3):
super(Discriminator, self).__init__()
n_ch = [32,64,96,128,128,256]
self.conv1 = Conv(n_ch[0], ksize=4, stride=2, norm=False)
self.conv_stack = []
for i in range(num_layers):
self.conv_stack.append(Conv(n_ch[i], ksize=4, stride=2, norm='batch'))
self.conv2 = Conv(n_ch[2], ksize=4, norm=False, nl=False)
self.downsize = downsize
self.num_layers = num_layers
def call(self, x, training):
if self.downsize > 1:
_,w,h,_ = x.shape
x=tf.image.resize(x,(w//self.downsize,h//self.downsize))
x = self.conv1(x, training)
for i in range(self.num_layers):
x = self.conv_stack[i](x, training)
x = self.conv2(x)
return tf.split(x,2,axis=0) | 10,063 | 38.007752 | 105 | py |
null | Multi-domain-learning-FAS-main/source_SiW_Mv2/dataset.py | # -*- coding: utf-8 -*-
# Copyright 2022
#
# Multi-domain Learning for Updating Face Anti-spoofing Models (ECCV 2022)
# Xiao Guo, Yaojie Liu, Anil Jain, and Xiaoming Liu
#
# All Rights Reserved.s
#
# This research is based upon work supported by the Office of the Director of
# National Intelligence (ODNI), Intelligence Advanced Research Projects Activity
# (IARPA), via IARPA R&D Contract No. 2017-17020200004. The views and
# conclusions contained herein are those of the authors and should not be
# interpreted as necessarily representing the official policies or endorsements,
# either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The
# U.S. Government is authorized to reproduce and distribute reprints for
# Governmental purposes not withstanding any copyright annotation thereon.
# ==============================================================================
import cv2
import tensorflow as tf
import glob
import random
import numpy as np
from natsort import natsorted, ns
from utils import face_crop_and_resize, image_process
from warp import generate_uv_map
from parameters import uv, lm_ref, RANDOM_SEED, REPEAT_TIME_LI, REPEAT_TIME_SP, SAMPLE_NUM_TRAIN, SAMPLE_NUM_TEST
autotune = tf.data.experimental.AUTOTUNE
autotune = -1
uv = np.transpose(np.asarray(uv, dtype=np.float32))
lm_ref = np.transpose(np.asarray(lm_ref, dtype=np.float32))/256.
def get_dmap_and_stype(config, lm, dataset, stype):
dmap0 = generate_uv_map(lm, uv, config.IMG_SIZE)
dmap_up = np.copy(dmap0)
dmap_up[config.IMG_SIZE//2:,:,:]=0
dmap_bot = np.copy(dmap0)
dmap_bot[:config.IMG_SIZE//2,:,:]=0
if stype == 'Live':
n_stype = [1,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
dmap = np.concatenate([dmap0, dmap0*0], axis=2)
elif stype == 'Makeup_Co':
n_stype = [0,1,0,0,0,0,0,0,0,0,0,0,0,0,0]
dmap = np.concatenate([np.zeros_like(dmap0), dmap0], axis=2)
elif stype == 'Makeup_Im':
n_stype = [0,0,1,0,0,0,0,0,0,0,0,0,0,0,0]
dmap = np.concatenate([np.zeros_like(dmap0), dmap0], axis=2)
elif stype == 'Makeup_Ob':
n_stype = [0,0,0,1,0,0,0,0,0,0,0,0,0,0,0]
dmap = np.concatenate([np.zeros_like(dmap0), dmap0], axis=2)
elif stype == 'Mask_Half':
n_stype = [0,0,0,0,1,0,0,0,0,0,0,0,0,0,0]
dmap = np.concatenate([np.zeros_like(dmap0), dmap0], axis=2)
elif stype == 'Mask_Silicone':
n_stype = [0,0,0,0,0,1,0,0,0,0,0,0,0,0,0]
dmap = np.concatenate([np.zeros_like(dmap0), dmap0], axis=2)
elif stype == 'Mask_Trans':
n_stype = [0,0,0,0,0,0,1,0,0,0,0,0,0,0,0]
dmap = np.concatenate([np.zeros_like(dmap0), dmap0], axis=2)
elif stype == 'Mask_Paper':
n_stype = [0,0,0,0,0,0,0,1,0,0,0,0,0,0,0]
dmap = np.concatenate([np.zeros_like(dmap0), dmap0], axis=2)
elif stype == 'Mask_Mann':
n_stype = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0]
dmap = np.concatenate([np.zeros_like(dmap0), dmap0], axis=2)
elif stype == 'Partial_Funnyeye':
n_stype = [0,0,0,0,0,0,0,0,0,1,0,0,0,0,0]
dmap = np.concatenate([dmap_bot, dmap_up], axis=2)
elif stype == 'Partial_Eye':
n_stype = [0,0,0,0,0,0,0,0,0,0,1,0,0,0,0]
dmap = np.concatenate([dmap_bot, dmap_up], axis=2)
elif stype == 'Partial_Mouth':
n_stype = [0,0,0,0,0,0,0,0,0,0,0,1,0,0,0]
dmap = np.concatenate([dmap_up, dmap_bot], axis=2)
elif stype == 'Partial_Paperglass':
n_stype = [0,0,0,0,0,0,0,0,0,0,0,0,1,0,0]
dmap = np.concatenate([dmap_bot, dmap_up], axis=2)
elif stype == 'Replay':
n_stype = [0,0,0,0,0,0,0,0,0,0,0,0,0,1,0]
dmap = np.concatenate([np.zeros_like(dmap0), np.ones_like(dmap0)], axis=2)
elif stype == 'Paper':
n_stype = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
dmap = np.concatenate([np.zeros_like(dmap0), np.ones_like(dmap0)], axis=2)
else:
assert False, print(f"{stype} is invalid....")
return dmap, n_stype
class Dataset():
def __init__(self, config, mode, dset=None):
self.config = config
self.mode = mode
self.dset = dset
if mode == 'inference':
self.input_tensors, self.name_list = self.inputs(self.config.inference_data_dir,
self.config.inference_data_img)
else:
if mode == 'train':
data_dir_li = self.config.LI_DATA_DIR
data_dir_sp = self.config.SP_DATA_DIR
elif mode == 'val':
data_dir_li = self.config.LI_DATA_DIR_VAL
data_dir_sp = self.config.SP_DATA_DIR_VAL
elif 'test_A' in mode:
data_dir_li = self.config.LI_DATA_DIR_TEST
data_dir_sp = self.config.SP_DATA_DIR_TEST
elif 'test_B' in mode:
data_dir_li = self.config.LI_DATA_DIR_TEST_B
data_dir_sp = self.config.SP_DATA_DIR_TEST_B
self.data_folders = None
self.data_samples = None
self.input_tensors, self.name_list = self.inputs(data_dir_li, data_dir_sp)
self.feed = iter(self.input_tensors)
def __len__(self):
return len(self.name_list)
def _info(self):
return len(self.data_samples)
def nextit(self):
return next(self.feed)
def _return_list(self, dir):
dir_list = []
for _ in dir:
_list = glob.glob(_)
dir_list += _list
return dir_list
def _extend_list(self, vd_list, mode):
'''
the entire dataset result will be dumped into csv, mode = testA_csv, when 'csv' not in mode,
it means use no more than 20 images per subject for the validation.
test_A and test_B denote the source and target domain dataset, and these two terms are used
in the ECCV2022 paper.
'''
new_list = []
for idx, _file in enumerate(vd_list):
meta = glob.glob(_file+'/*.png')
meta.sort()
if 'csv' not in mode:
random.seed(RANDOM_SEED)
meta = random.sample(meta, 20)
else:
meta = meta[::10]
# pass
new_list += meta
return new_list
def inputs(self, data_dir_li, data_dir_sp):
mode = self.mode
protocol = self.config.SET
if mode == 'train' or mode == 'val':
li_data_samples = data_dir_li if self.config.dataset in ['oulu', 'custom'] else self._return_list(data_dir_li)
sp_data_samples = data_dir_sp if self.config.dataset in ['oulu', 'custom'] else self._return_list(data_dir_sp)
data_samples = [li_data_samples, sp_data_samples]
li_data_samples = REPEAT_TIME_LI * li_data_samples
sp_data_samples = REPEAT_TIME_SP * sp_data_samples
li_data_samples = li_data_samples[:SAMPLE_NUM_TRAIN] if mode == 'train' else li_data_samples[:SAMPLE_NUM_TEST]
sp_data_samples = sp_data_samples[:SAMPLE_NUM_TRAIN] if mode == 'train' else sp_data_samples[:SAMPLE_NUM_TEST]
shuffle_buffer_size = min(len(li_data_samples), len(sp_data_samples))
dataset = tf.data.Dataset.from_tensor_slices((li_data_samples, sp_data_samples))
dataset = dataset.shuffle(shuffle_buffer_size).repeat(-1)
if mode == 'train':
dataset = dataset.map(map_func=self.parse_fn, num_parallel_calls=autotune)
elif mode == 'val':
dataset = dataset.map(map_func=self.parse_fn_val, num_parallel_calls=autotune)
dataset = dataset.batch(batch_size=self.config.BATCH_SIZE).prefetch(buffer_size=autotune)
else:
if 'test_A' in mode or 'test_B' in mode:
li_data_samples = self._return_list(data_dir_li)
sp_data_samples = self._return_list(data_dir_sp)
## GX: we use the 50 samples in the validation.
## GX: we use the entire dataset for the test.
if 'csv' not in mode:
random.seed(RANDOM_SEED)
li_data_samples = random.sample(li_data_samples, 50) if len(li_data_samples) >= 50 else \
random.sample(li_data_samples, len(li_data_samples))
random.seed(RANDOM_SEED)
sp_data_samples = random.sample(sp_data_samples, 50) if len(sp_data_samples) >= 50 else \
random.sample(sp_data_samples, len(sp_data_samples))
self.data_folders = li_data_samples + sp_data_samples
## GX: we only take 20 images per image in the validation set.
## GX: we take the entire dataset for the test.
self.data_samples = self._extend_list(self.data_folders, mode)
dataset = tf.data.Dataset.from_tensor_slices(self.data_samples)
dataset = dataset.cache()
dataset = dataset.map(map_func=self.parse_fn_test)
dataset = dataset.batch(batch_size=self.config.BATCH_SIZE).prefetch(buffer_size=autotune)
elif mode == 'inference':
if data_dir_li != None:
self.data_samples = glob.glob(data_dir_li+'/*.png')
self.data_samples.sort()
elif data_dir_sp != None:
self.data_samples = [data_dir_sp]
else:
assert False, print("Please offer either valid inference image directory or images.")
dataset = tf.data.Dataset.from_tensor_slices(self.data_samples)
dataset = dataset.cache()
dataset = dataset.map(map_func=self.parse_fn_inference)
dataset = dataset.batch(batch_size=self.config.BATCH_SIZE).prefetch(buffer_size=autotune)
return dataset, self.data_samples
def _img_parse(self, file_name):
'''given the image name and dataset, we decide the spoof type or live images.'''
file_name = file_name.decode('UTF-8')
meta = glob.glob(file_name + '/*.png')
try:
im_name = meta[random.randint(0, len(meta) - 1)]
except:
print(file_name)
print(meta)
import sys;sys.exit(0)
lm_name = im_name[:-3] + 'npy'
parts = file_name.split('/')
dataset = self.config.dataset
if dataset == 'SiWM-v2':
stype = parts[-1].split('_')[:-1]
stype = '_'.join(stype)
elif dataset == 'SiW':
spoof_id = int(parts[-1].split("-")[2])
if spoof_id == 1:
stype = 'Live'
elif spoof_id == 2:
stype = 'Paper'
else:
stype = 'Replay'
elif dataset == 'oulu':
# device_id, bg_id, sub_id, spoof_id
spoof_id = int(parts[-1].split('_')[-1])
if spoof_id == 1:
stype = "Live"
elif spoof_id in [2,3]:
stype = 'Paper'
elif spoof_id in [4,5]:
stype = 'Replay'
else:
assert False, print("Please offer the valid dataset...")
return im_name, lm_name, dataset, stype
def _img_preprocess(self, file_name, dataset=None):
while True:
im_name, lm_name, dataset_, stype = self._img_parse(file_name)
dataset = self.config.dataset
img = cv2.cvtColor(cv2.imread(im_name), cv2.COLOR_BGR2RGB) / 255.
lm = np.load(lm_name)
img, lm = face_crop_and_resize(img, lm, self.config.IMG_SIZE, aug=True)
try:
dmap, n_stype = get_dmap_and_stype(self.config, lm, dataset, stype)
n_stype = np.reshape(np.array([n_stype], np.float32), (-1))
except:
print(f"{file_name} cannot work on get_dmap_and_stype.")
continue
else:
break
return img, dmap, n_stype, lm, dataset
def _parse_function(self, _file1, _file2):
img1, dmap1, n_stype1, lm1, dataset = self._img_preprocess(_file1)
img2, dmap2, n_stype2, lm2, _ = self._img_preprocess(_file2, dataset)
reg = img1 # dummy code.
return img1.astype(np.float32), img2.astype(np.float32), \
dmap1.astype(np.float32), dmap2.astype(np.float32), \
n_stype1.astype(np.float32), n_stype2.astype(np.float32), \
reg.astype(np.float32)
def parse_fn_val(self, file1, file2):
config = self.config
_img1, _img2, _dmap1, _dmap2, _stype1, _stype2, _reg = \
tf.numpy_function(self._parse_function,
[file1, file2],
[tf.float32, tf.float32, tf.float32,
tf.float32, tf.float32, tf.float32,
tf.float32])
_img1 = tf.ensure_shape(_img1, [config.IMG_SIZE, config.IMG_SIZE, 3])
_img2 = tf.ensure_shape(_img2, [config.IMG_SIZE, config.IMG_SIZE, 3])
_dmap1 = tf.ensure_shape(_dmap1,[config.IMG_SIZE, config.IMG_SIZE, 2])
_dmap2 = tf.ensure_shape(_dmap2,[config.IMG_SIZE, config.IMG_SIZE, 2])
_stype1 = tf.ensure_shape(_stype1, [15])
_stype2 = tf.ensure_shape(_stype2, [15])
_reg = tf.ensure_shape(_reg,[config.IMG_SIZE, config.IMG_SIZE, 3])
return _img1, _img2, _dmap1, _dmap2, _stype1, _stype2, _reg
def parse_fn(self, file1, file2):
config = self.config
_img1, _img2, _dmap1, _dmap2, _stype1, _stype2, _reg = self.parse_fn_val(file1, file2)
# Data augmentation.
_img1a = tf.image.random_contrast(_img1, 0.9, 1.1)+ tf.random.uniform([1, 1, 3], minval=-0.03, maxval=0.03)
_img1a = tf.cond(tf.greater(tf.random.uniform([1], 0, 1)[0], 0.5),lambda: _img1a, lambda: _img1)
_img2a = tf.image.random_contrast(_img2, 0.9, 1.1)+ tf.random.uniform([1, 1, 3], minval=-0.03, maxval=0.03)
_img2a = tf.cond(tf.greater(tf.random.uniform([1],0,1)[0],0.5),lambda: _img2a, lambda: _img2)
return _img1a, _img2a, _dmap1, _dmap2, _stype1, _stype2, _reg
def parse_fn_test(self, file):
config = self.config
def _parse_function(_file):
_file = _file.decode('UTF-8')
im_name = _file
lm_name = im_name[:-3] + 'npy'
dataset = config.dataset
img = cv2.cvtColor(cv2.imread(im_name), cv2.COLOR_BGR2RGB) / 255.
lm = np.load(lm_name)
img, lm = face_crop_and_resize(img, lm, config.IMG_SIZE, aug=False)
return img.astype(np.float32), im_name
image, im_name = tf.numpy_function(_parse_function, [file], [tf.float32, tf.string])
image = tf.ensure_shape(image, [config.IMG_SIZE, config.IMG_SIZE, 3])
return image, im_name
def parse_fn_inference(self, file):
config = self.config
def _parse_function_inference(_file):
im_name = _file.decode('UTF-8')
dataset = config.dataset
img, lm = image_process(im_name)
img = img / 255.
img, lm = face_crop_and_resize(img, lm, config.IMG_SIZE, aug=False)
return img.astype(np.float32), im_name
image, im_name = tf.numpy_function(_parse_function_inference, [file], [tf.float32, tf.string])
image = tf.ensure_shape(image, [config.IMG_SIZE, config.IMG_SIZE, 3])
return image, im_name | 15,618 | 47.657321 | 122 | py |
null | Multi-domain-learning-FAS-main/source_SiW_Mv2/inference.sh | source ~/.bashrc
conda activate anti_spoofing
CUDA_NUM=0
python inference.py --cuda=$CUDA_NUM --pro=1 --dir=./demo/live/ --overwrite --weight_dir=../saved_model
python inference.py --cuda=$CUDA_NUM --pro=1 --img=./demo/1.png --overwrite --weight_dir=../saved_model | 264 | 52 | 103 | sh |
null | Multi-domain-learning-FAS-main/source_SiW_Mv2/csv_parser.sh | source ~/.bashrc
conda activate anti_spoofing
CUDA_NUM=0
python csv_parser.py --pro=1 --log_dir=../train_log
python csv_parser.py --pro=2 --log_dir=../train_log
python csv_parser.py --pro=3 --log_dir=../train_log | 212 | 34.5 | 51 | sh |
null | Multi-domain-learning-FAS-main/source_SiW_Mv2/README.md | # SiW-Mv2 Dataset
<p align="center">
<img src="https://github.com/CHELSEA234/Multi-domain-learning-FAS/blob/main/source_SiW_Mv2/figures/train_tb.png" alt="drawing" width="500"/>
<img src="https://github.com/CHELSEA234/Multi-domain-learning-FAS/blob/main/source_SiW_Mv2/figures/intermediate_result.png" alt="drawing" width="300"/>
</p>
- We provide detailed dataset preprocessing steps as well as the training scripts.
- After following our instructions, user can generate tensorboard similar to the left figure above, and the intermediate results (right figure above) which has, from the top to down, original input image, pseudo reconstructed live images, spoof trace, ground truth and predicted depth maps.
### 1. Setup the environment.
- The quick view on the code structure:
```bash
./source_SiW_Mv2
├── config_siwm.py
├── train.py
├── test.py
├── run.sh (call train.py and test.py)
├── inference.py
├── inference.sh (call inference.py for the custom data.)
├── csv_parser.py
├── csv_parser.sh (call csv_parser.py to reproduce the numerical baseline result.)
├── pro_3_text (partition of the three protocol.)
│ │ (Repetitive subject names are for balancing the number between live and spoof subjects.)
│ ├── trainlist_all.txt (spoof train subjects in protocol I)
│ ├── trainlist_live.txt (live train subjects in protocol I and II)
│ ├── testlist_all.txt (spoof test subjects in protocol I)
│ ├── testlist_live.txt (live test subjects in protocol I and II)
│ ├── train_A_pretrain.txt (protocol III source domain subject for training)
│ ├── test_B_spoof.txt (protocol III target domain B subject for testing)
│ ├── test_C_race.txt (protocol III target domain C subject for testing)
│ ├── test_D_age.txt (protocol III target domain D subject for testing)
│ └── test_E_illu.txt (protocol III target domain E subject for testing)
├── model.py (SRENet)
├── preprocessing.py (data preprocessing file.)
├── demo (the demo image and image dir for the quick usage)
│ └── ...
├── parameters.py
├── enviornment.yml
├── metrics.py
├── utils.py
├── warp.py
└── DRA_form_SIWMv2.pdf (Dataset Release Agreement)
```
- To create your own environment by:
```
conda env create -f environment.yml
```
### 2. Quick Usage
- The pre-trained weights for $3$ different protocols and corresponding `.csv` result files can be found in this [page](https://drive.google.com/drive/folders/106TrDEeH-OOfPP4cWketphMJGXtE9sgW?usp=sharing).
- To reproduce the numerical results of the baseline, please run the following command. Result will output to the screen.
```bash
bash csv_parser.sh
Compute the protocol I scores.
AP: ['2.3', '2.3', '0.4', '2.3', '0.0', '7.3', '5.4', '0.0', '10.7', '0.0', ...
...
```
- For inference on a single image or a directory of images, please run the following command. Of course, users can play around with their own images.
- Results will output to the screen and saved into the `.csv` file.
```bash
bash inference.sh
...
- Results written to ./result/result.csv
...
./demo/1.png is classified as Spoof with the score 0.52
```
### 3. Train and Testing
#### 3.1. Data Preparation
- Please first sign the [DRA form](https://github.com/CHELSEA234/Multi-domain-learning-FAS/blob/main/source_SiW_Mv2/DRA_form_SIWMv2.pdf) before donwloading the SiW-Mv2 dataset.
- After unzip the dataset files, you can obtain the following structure:
```bash
./SiW-Mv2
├── Spoof (contain 14 folders, each of which has raw videos).
│ ├── Makeup_Cosmetic
│ ├── Makeup_Impersonation
│ ├── Makeup_Obfuscation
│ ├── Mannequin
│ ├── Silicone
│ ├── Print
│ ├── Replay
│ ├── Partial_FunnyeyeGlasses
│ ├── Partial_PaperGlasses
│ ├── Partial_Eye
│ ├── Partial_Mouth
│ ├── Mask_HalfMask
│ ├── Mask_PaperMask
│ └── Mask_TransparentMask
├── Live (contain 785 raw video files)
└── DRA_form_SIWMv2.pdf (Dataset Release Agreement)
```
- To preprocess the videos for training and testing, you can adapt our `preprocessing.py` for your own data configuration, such as:
```bash
python preprocessing.py
```
- After the preprocessing step completes, the program outputs extracted frames (`*.png`) and facial landmarks (`*.npy`) to `data` folder. Specifically,
```bash
./preprocessed_image_train
├── Train
│ │── live
│ │ │── Live_0
│ │ │ │── 1.png
│ │ │ │── 1.npy
│ │ │ │── 2.png
│ │ │ │── 2.npy
│ │ │ └── ...
│ │ │── Live_1
│ │ │ │── 1.png
│ │ │ │── 1.npy
│ │ │ └── ...
│ │ ...
│ └── spoof
│ │── Spoof_0
│ │ │── 1.png
│ │ │── 1.npy
│ │ └── ...
│ │── Spoof_1
│ │ │── 1.png
│ │ │── 1.npy
│ │ └── ...
│ ...
└── Test (consistent with the training dataset configuration)
│── live
│ │── Live_0
│ │ └── ...
│ ...
└── spoof
│── Spoof_0
│ └── ...
...
```
#### 3.2. Train and Testing
- After setting up the dataset path, you can run the training code as shown below:
```
python train_architecture.py --pro=1 --cuda=0
```
- Use `--pro=1` and `--unknown=Co` to decide which protocol and which unknown type is.
- `--batch_size`, `--lr`, and `--decay_step` are training hyper-parameters.
- `--cuda=0` specifies the GPU usage.
- To run the testing code, which will save scores in csv file.
```
python test_architecture.py --pro=1 --cuda=0
```
- To run the algorithm for all $3$ protocols, please run the following code.
```
bash run.sh
```
#### 3.3 Pre-trained Weights.
- Pre-trained weights for $3$ different protocols can be found in this [page](https://drive.google.com/drive/folders/106TrDEeH-OOfPP4cWketphMJGXtE9sgW?usp=sharing).
| Protocol | Unknown | Download | Protocol | Unknown | Download | Protocol | Unknown | Download |
|:----:|:--------:|:----:|:----:|:--------:|:----:|:----:|:--------:|:----:|
|I|N/A|[link](https://drive.google.com/drive/folders/1fSoF-Xy1DajQvIdnO8LQtEi-waXr6OaW?usp=sharing)|II|Partial Eyes|[link](https://drive.google.com/drive/folders/1AS6J0aYIUNEv6wkEf_XLWlhqncxIptfi?usp=sharing)|II|Transparent|[link](https://drive.google.com/drive/folders/1S-Pm-iAtYdr2EBgl6qhvOmHKdwcdVw3s?usp=sharing)|
|II|Full Mask|[link](https://drive.google.com/drive/folders/1m2kvmlzOySLISlbuBe3izPazev-IO30J?usp=sharing)|II|Paper Mask|[link](https://drive.google.com/drive/folders/1ng5ax86y_Gvh_DYGJvScPW7bEzA7lY9e?usp=sharing)|II|Obfuscation|[link](https://drive.google.com/drive/folders/1PI_NdjzDsLelU8nyLRTrbYZrFA_X-k-p?usp=sharing)|
|II|Cosmetic|[link](https://drive.google.com/drive/folders/1ck0uDRvTFSzYJUwkMYZyu0KSv046-G6k?usp=sharing)|II|Paper glass|[link](https://drive.google.com/drive/folders/1nOvApxLV5t1IUSxboK0w4RtymHj6sMQ8?usp=sharing)|II|Print|[link](https://drive.google.com/drive/folders/1OlWB0MKjXrrx5Q6UkWVWkygjPNbZ_4ol?usp=sharing)|
|II|Impersonate|[link](https://drive.google.com/drive/folders/1Lt-_h3vqfVJ2f_vtOzr2oOKTVnyve2oz?usp=sharing)|II|Silicone|[link](https://drive.google.com/drive/folders/1bplxEU4G_qs5P9Udy3G3c12FmJC_6kkE?usp=share_link)|II|Replay|[link](https://drive.google.com/drive/folders/1Kkp5awJMvteEGe-9772ms3s3qxH_jj4N?usp=sharing)|
|II|FunnyEyes|[link](https://drive.google.com/drive/folders/1Fs4GxiUr3zMJhoUYb8jX-Raf1WST-o90?usp=sharing)|II|Partial Mouth|[link](https://drive.google.com/drive/folders/1Z-LcrLNv5g7NrgzuF4ba2g80mEpa14p0?usp=share_link)|II|Mannequin|[link](https://drive.google.com/drive/folders/1Lv3byEmeWtgJi23A5_6SC2mkhhLs8VHe?usp=sharing)|
|III|Cross Domain|[link](https://drive.google.com/drive/folders/1Nv2BePpjQgo2YD_CqxQ1Sv99UJn7esPB?usp=sharing)|
## Reference
If you would like to use our work, please cite:
```Bibtex
@inproceedings{xiaoguo2022MDFAS,
title={Multi-domain Learning for Updating Face Anti-spoofing Models},
author={Guo, Xiao and Liu, Yaojie and Jain, Anil and Liu, Xiaoming},
booktitle={ECCV},
year={2022}
}
```
This github will continue to update in the near future. If you have any question, please contact: [Xiao Guo](guoxia11@msu.edu)
| 8,510 | 45.005405 | 326 | md |
null | Multi-domain-learning-FAS-main/source_SiW_Mv2/parameters.py | # -*- coding: utf-8 -*-
# Copyright 2022
#
# Multi-domain Learning for Updating Face Anti-spoofing Models (ECCV 2022)
# Xiao Guo, Yaojie Liu, Anil Jain, and Xiaoming Liu
#
# All Rights Reserved.s
#
# This research is based upon work supported by the Office of the Director of
# National Intelligence (ODNI), Intelligence Advanced Research Projects Activity
# (IARPA), via IARPA R&D Contract No. 2017-17020200004. The views and
# conclusions contained herein are those of the authors and should not be
# interpreted as necessarily representing the official policies or endorsements,
# either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The
# U.S. Government is authorized to reproduce and distribute reprints for
# Governmental purposes not withstanding any copyright annotation thereon.
# ==============================================================================
uv =[[0.19029412,0.19795537 ,0.21318457 ,0.22828290 ,0.24970947 ,0.28816611 ,0.33394283 ,0.39239809 ,0.47876307 ,0.56515092 ,0.62323409 ,0.66867208 ,0.70676976 ,0.72820741 ,0.74272829 ,0.75663871 ,0.76398379 ,0.25338903 ,0.28589997 ,0.32738855 ,0.36722445 ,0.40321609 ,0.55088127 ,0.58705842 ,0.62712812 ,0.66933709 ,0.70184904 ,0.47813031 ,0.47830373 ,0.47872066 ,0.47870359 ,0.43102017 ,0.45095450 ,0.47804111 ,0.50489837 ,0.52461874 ,0.30827355 ,0.33330417 ,0.36890128 ,0.40203944 ,0.37214473 ,0.33496466 ,0.55122417 ,0.58458656 ,0.62106317 ,0.64688802 ,0.61956245 ,0.58191341 ,0.37796655 ,0.41338006 ,0.45562238 ,0.47811818 ,0.50052267 ,0.54254669 ,0.57570505 ,0.54044306 ,0.51024377 ,0.47821599 ,0.44642609 ,0.41657540 ,0.38790068 ,0.44901687 ,0.47766650 ,0.50653827 ,0.56918079 ,0.50583494 ,0.47757983 ,0.44971457],
[0.55190903,0.47428983 ,0.40360034 ,0.33980367 ,0.27118790 ,0.21624640 ,0.18327993 ,0.15577883 ,0.14014046 ,0.15676366 ,0.18313733 ,0.21531384 ,0.26951864 ,0.33780637 ,0.40212137 ,0.47324431 ,0.55168754 ,0.63735390 ,0.66241443 ,0.67068136 ,0.66713846 ,0.65712863 ,0.65805173 ,0.66828096 ,0.67205220 ,0.66368717 ,0.63796753 ,0.58252430 ,0.53523010 ,0.48812559 ,0.44775373 ,0.41256407 ,0.40846801 ,0.40317070 ,0.40854913 ,0.41281027 ,0.58095986 ,0.59604895 ,0.59652811 ,0.57966459 ,0.57139677 ,0.56953919 ,0.57967824 ,0.59695679 ,0.59599525 ,0.58050835 ,0.57008123 ,0.57134289 ,0.31730300 ,0.34064898 ,0.35593933 ,0.35154018 ,0.35593045 ,0.34062389 ,0.31715956 ,0.30086508 ,0.28950119 ,0.28752795 ,0.28963783 ,0.30076182 ,0.31932616 ,0.32959232 ,0.33032984 ,0.32936266 ,0.31900606 ,0.32014942 ,0.31873652 ,0.32043788],
[0.54887491,0.55835652 ,0.56531715 ,0.58029217 ,0.61638439 ,0.68007606 ,0.75769442 ,0.82921398 ,0.85709274 ,0.82894272 ,0.75751764 ,0.68032110 ,0.61664295 ,0.58068472 ,0.56520522 ,0.55785143 ,0.54947090 ,0.79504120 ,0.84203368 ,0.87477297 ,0.89484525 ,0.90437353 ,0.90412331 ,0.89423305 ,0.87385195 ,0.84139013 ,0.79445726 ,0.91648984 ,0.95176858 ,0.98838627 ,0.99706292 ,0.91018295 ,0.92791700 ,0.93613458 ,0.92778808 ,0.90999144 ,0.82165444 ,0.85368645 ,0.85440493 ,0.84463143 ,0.85324180 ,0.84432119 ,0.84337026 ,0.85280263 ,0.85272932 ,0.82140154 ,0.84402239 ,0.85248041 ,0.86857969 ,0.91266698 ,0.93638903 ,0.93873996 ,0.93629760 ,0.91227442 ,0.86774820 ,0.90530455 ,0.92216164 ,0.92610627 ,0.92281538 ,0.90596151 ,0.87151438 ,0.91635096 ,0.92336667 ,0.91626322 ,0.87006092 ,0.91713434 ,0.92056626 ,0.91682398]]
lm_ref = [[42.022587,44.278061,48.761536,53.206482,59.514465,70.836105,84.312767,101.52200,126.94785,152.38043,169.48012,182.85706,194.07301,200.38426,204.65921,208.75444,210.91682,60.597733,70.168953,82.383194,94.110878,104.70682,148.17944,158.83000,170.62653,183.05284,192.62436,126.76157,126.81262,126.93536,126.93034,112.89234,118.76100,126.73531,134.64207,140.44775,76.755737,84.124748,94.604538,104.36041,95.559410,84.613594,148.28040,158.10228,168.84100,176.44383,168.39919,157.31531,97.273354,107.69909,120.13522,126.75800,133.35388,145.72574,155.48756,145.10645,136.21576,126.78679,117.42784,108.63980,100.19796,118.19057,126.62502,135.12486,153.56682,134.91780,126.59950,118.39597],
[94.517975,117.36908,138.18005,156.96179,177.16229,193.33707,203.04239,211.13872,215.74265,210.84879,203.08437,193.61160,177.65372,157.54980,138.61548,117.67688,94.583191,69.363007,61.985199,59.551407,60.594437,63.541336,63.269577,60.258087,59.147827,61.610504,69.182358,85.504852,99.428253,113.29582,125.18130,135.54114,136.74701,138.30655,136.72314,135.46866,85.965424,81.523193,81.382126,86.346741,88.780792,89.327667,86.342728,81.255920,81.539001,86.098343,89.168091,88.796661,163.58600,156.71295,152.21146,153.50656,152.21408,156.72034,163.62823,168.42532,171.77084,172.35178,171.73062,168.45572,162.99039,159.96802,159.75090,160.03563,163.08463,162.74802,163.16397,162.66309]]
RANDOM_SEED = 123456789
REPEAT_TIME_LI = 3500
REPEAT_TIME_SP = 2000
SAMPLE_NUM_TRAIN = 5000
SAMPLE_NUM_TEST = 500 | 4,864 | 179.185185 | 821 | py |
null | Multi-domain-learning-FAS-main/source_SiW_Mv2/metrics.py | # -*- coding: utf-8 -*-
# Copyright 2022
#
# Multi-domain Learning for Updating Face Anti-spoofing Models (ECCV 2022)
# Xiao Guo, Yaojie Liu, Anil Jain, and Xiaoming Liu
#
# All Rights Reserved.s
#
# This research is based upon work supported by the Office of the Director of
# National Intelligence (ODNI), Intelligence Advanced Research Projects Activity
# (IARPA), via IARPA R&D Contract No. 2017-17020200004. The views and
# conclusions contained herein are those of the authors and should not be
# interpreted as necessarily representing the official policies or endorsements,
# either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The
# U.S. Government is authorized to reproduce and distribute reprints for
# Governmental purposes not withstanding any copyright annotation thereon.
# ==============================================================================
from sklearn import metrics
import numpy as np
def get_tpr_at_fpr(tpr_lst, fpr_lst, score_lst, fpr_value):
"""returns true postive rate and threshold given false positive rate value."""
abs_fpr = np.absolute(fpr_lst - fpr_value)
idx_min = np.argmin(abs_fpr)
fpr_value_target = fpr_lst[idx_min]
idx = np.max(np.where(fpr_lst == fpr_value_target))
return tpr_lst[idx], score_lst[idx]
def my_metrics(label_list, pred_list, val_phase=False):
"""
computes FAS metrics.
Parameters:
val_phase (bool): flag for train and test stage.
"""
fpr, tpr, scores = metrics.roc_curve(label_list,pred_list,
drop_intermediate=True)
auc_score = metrics.auc(fpr,tpr)
fnr = 1 - tpr
tnr = 1 - fpr
EER0 = fpr[np.nanargmin(np.absolute((fnr - fpr)))]
EER1 = fnr[np.nanargmin(np.absolute((fnr - fpr)))]
EER = min(EER0, EER1)
best_ACER, best_AP, best_BP = 100, 100, 100
best_threshold = 100
for idx_ in range(len(tpr)):
_tpr, _fpr = tpr[idx_], fpr[idx_]
_tnr, _fnr = tnr[idx_], fnr[idx_]
assert _tpr + _fnr == 1, print(_tpr, _fnr)
assert _tnr + _fpr == 1, print(_tnr, _fpr)
# https://chalearnlap.cvc.uab.cat/challenge/33/track/33/metrics/
APCER = _fpr/(_fpr+_tnr)
BPCER = _fnr/(_fnr+_tpr)
ACER = 0.5 * (APCER+BPCER)
if ACER < best_ACER:
best_ACER = ACER
best_AP = APCER
best_BP = BPCER
best_threshold = scores[idx_]
## fnr == 0.5% as the first PAMI paper version.
abs_fnr = np.absolute(fnr - 0.005)
idx = np.argmin(abs_fnr)
res_tpr = tpr[idx]
if not val_phase:
tpr_h, _ = get_tpr_at_fpr(tpr, fpr, scores, 0.005)
tpr_m, _ = get_tpr_at_fpr(tpr, fpr, scores, 0.01)
tpr_l, _ = get_tpr_at_fpr(tpr, fpr, scores, 0.02)
return best_AP, best_BP, best_ACER, EER, res_tpr, auc_score, [tpr_h, tpr_m, tpr_l]
else:
return best_AP, best_BP, best_ACER, EER, res_tpr, auc_score | 2,728 | 37.985714 | 84 | py |
null | Multi-domain-learning-FAS-main/source_SiW_Mv2/csv_parser.py | # -*- coding: utf-8 -*-
# Copyright 2022
#
# Multi-domain Learning for Updating Face Anti-spoofing Models (ECCV 2022)
# Xiao Guo, Yaojie Liu, Anil Jain, and Xiaoming Liu
#
# All Rights Reserved.s
#
# This research is based upon work supported by the Office of the Director of
# National Intelligence (ODNI), Intelligence Advanced Research Projects Activity
# (IARPA), via IARPA R&D Contract No. 2017-17020200004. The views and
# conclusions contained herein are those of the authors and should not be
# interpreted as necessarily representing the official policies or endorsements,
# either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The
# U.S. Government is authorized to reproduce and distribute reprints for
# Governmental purposes not withstanding any copyright annotation thereon.
# ==============================================================================
from metrics import my_metrics
from glob import glob
import sys
import csv
import numpy as np
import argparse
def dump_to_screen(apcer_order, bpcer_order, acer_order, tpr_order,
apcer, bpcer, acer, tpr):
'''standard output results to screen.'''
print('AP: ', apcer_order, f"MEAN: {np.mean(apcer)*100:.1f}", f"STD: {np.std(apcer)*100:.1f}")
print('BP: ', bpcer_order, f"MEAN: {np.mean(bpcer)*100:.1f}", f"STD: {np.std(bpcer)*100:.1f}")
print('ACER: ', acer_order, f"MEAN: {np.mean(acer)*100:.1f}", f"STD: {np.std(acer)*100:.1f}")
print('TPR@FPR=1.0%: ', tpr_order, f"MEAN: {np.mean(tpr)*100:.1f}", f"STD: {np.std(tpr)*100:.1f}")
print("...over...")
sys.exit(0)
def compute_score(args, score_list, label_list, test_name, verbose=False):
'''the depth + region performance.'''
# print(test_name, len(score_list))
APCER, BPCER, ACER, EER, res_tpr_05, auc_score, [tpr_fpr_h, tpr_fpr_m, tpr_fpr_l] \
= my_metrics(label_list, score_list, val_phase=False)
message_cur = f"Test: {args.weight:.1f} depth score \n"
message_cur += f"ACER is {ACER*100:.1f}, AP: {APCER*100:.1f}, BP: {BPCER*100:.1f}, "
message_cur += f"tpr_fpr_1.0% is {tpr_fpr_m*100:.1f}"
if verbose:
print(message_cur)
print()
return APCER, BPCER, ACER, tpr_fpr_m
def compute_metric(args, label_list, score_list, score2_list):
'''parse the score dictionary here, based on the video-level evaluation.'''
score_list, label_list = [], []
live_sample_name, spoof_sample_name = [], []
for key, value in score_dict.items():
score_list_cur = score_dict[key]
score_list_cur.sort()
interval = int(len(score_list_cur)*args.interval)
score_list_compute = score_list_cur[interval:][:-1-interval]
if len(score_list_compute) == 0:
continue
# print(f'The key is: {key}, the score list length is {len(score_list_compute)}')
value = np.mean(score_list_compute)
score_list.append(np.mean(score_list_compute))
if np.mean(score_list_compute) > 0.1:
print(key)
if "Live_" in key:
label_list.append(0)
live_sample_name.append(key)
else:
label_list.append(1)
spoof_sample_name.append(key)
assert len(label_list) == len(score_list)
print(f'the total sample number is: {len(label_list)}.')
print(f"the live sample and spoof numbers are: {len(live_sample_name)}, {len(spoof_sample_name)}.")
return score_list, label_list
def video_level_compute(score_dict):
'''computing the video level results.'''
score_list, label_list = [], []
live_sample_name, spoof_sample_name = [], []
for key, value in score_dict.items():
score_list_cur = score_dict[key]
score_list_cur.sort()
interval = int(len(score_list_cur)*args.interval)
score_list_compute = score_list_cur[interval:][:-1-interval]
if len(score_list_compute) == 0:
continue
# print(f'The key is: {key}, the score list length is {len(score_list_compute)}')
value = np.mean(score_list_compute)
score_list.append(np.mean(score_list_compute))
if "Live_" in key:
label_list.append(0)
live_sample_name.append(key)
else:
label_list.append(1)
spoof_sample_name.append(key)
return score_list, label_list
def return_dictionary(args, test_file_lst):
'''
return the score dictionary for different spoof types.
'''
for csv_file_name in test_file_lst:
csv_file = open(csv_file_name)
csv_reader = csv.reader(csv_file, delimiter=',')
score_dict = dict() # key is video name, value is a list
score_list, label_list, exist_sample_list = [], [], []
score2_list = []
line_count = 0
for row in csv_reader:
if line_count == 0:
line_count += 1
else:
line_count += 1
vid_name = row[0].split('/')[-2]
label = float(row[-2])
depth_score, region_score = float(row[2]), float(row[3])
final_score = depth_score + args.weight*region_score
if vid_name not in score_dict:
score_dict[vid_name] = [final_score]
else:
score_dict[vid_name].append(final_score)
label_list.append(label)
score_list.append(final_score)
key_list = list(score_dict.keys())
assert len(label_list) == len(score_list), print(len(label_list), len(score_list))
if args.pro == 1:
parse_protocol_1_result(score_dict)
elif args.pro == 2:
score_list, label_list = video_level_compute(score_dict)
return score_list, label_list
elif args.pro == 3:
parse_protocol_3_result(score_dict)
def parse_protocol_1_result(score_dict):
'''
output result on different spoof attack on the protocol 1.
'''
print("Compute the protocol I scores.")
co_list, eye_list, fun_eye_list, half_list, im_list, man_list, mouth_list, ob_list, \
paper_list, paperglass_list, print_list, replay_list, sil_list, trans_list, live_list \
= [],[],[],[],[],[],[],[],[],[],[],[],[],[],[]
spoof_type_dict = {
'Co': 'Makeup_Co', 'Eye': 'Partial_Eye', 'Funnyeye': 'Partial_Funnyeye',
'Half': 'Mask_Half', 'Im': 'Makeup_Im', 'Mann': 'Mask_Mann',
'Mouth': 'Partial_Mouth', 'Ob': 'Makeup_Ob', 'Paper': 'Mask_Paper',
'Paperglass': 'Partial_Paperglass', 'Print': 'Paper', 'Replay': 'Replay',
'Sil': 'Mask_Silicone', 'Trans': 'Mask_Trans'
}
vid_names = list(score_dict.keys())
for _ in vid_names:
if 'Live_' in _:
live_list.append(np.mean(score_dict[_]))
elif 'Makeup_Co' in _:
co_list.append(np.mean(score_dict[_]))
elif 'Partial_Eye' in _:
eye_list.append(np.mean(score_dict[_]))
elif 'Partial_Funnyeye' in _:
fun_eye_list.append(np.mean(score_dict[_]))
elif 'Mask_Half' in _:
half_list.append(np.mean(score_dict[_]))
elif 'Makeup_Im' in _:
im_list.append(np.mean(score_dict[_]))
elif 'Mask_Mann' in _:
man_list.append(np.mean(score_dict[_]))
elif 'Partial_Mouth' in _:
mouth_list.append(np.mean(score_dict[_]))
elif 'Makeup_Ob' in _:
ob_list.append(np.mean(score_dict[_]))
elif 'Mask_Paper' in _:
paper_list.append(np.mean(score_dict[_]))
elif 'Partial_Paperglass' in _:
paperglass_list.append(np.mean(score_dict[_]))
elif 'Paper' in _:
print_list.append(np.mean(score_dict[_]))
elif 'Replay' in _:
replay_list.append(np.mean(score_dict[_]))
elif 'Mask_Silicone' in _:
sil_list.append(np.mean(score_dict[_]))
elif 'Mask_Trans' in _:
trans_list.append(np.mean(score_dict[_]))
else:
raise ValueError
## GX: gathering the result.
apcer, bpcer, acer, tpr = [], [], [], []
apcer_order, bpcer_order, acer_order, tpr_order = [], [], [], []
APCER, BPCER, ACER, tpr_fpr = compute_score(args, live_list + fun_eye_list, [0]*len(live_list) + [1]*len(fun_eye_list), test_name='fun_eye')
apcer.append(APCER);bpcer.append(BPCER);acer.append(ACER);tpr.append(tpr_fpr)
apcer_order.append(f"{APCER*100:.1f}");bpcer_order.append(f"{BPCER*100:.1f}")
acer_order.append(f"{ACER*100:.1f}");tpr_order.append(f"{tpr_fpr*100:.1f}")
APCER, BPCER, ACER, tpr_fpr = compute_score(args, live_list + eye_list, [0]*len(live_list) + [1]*len(eye_list), test_name='eye')
apcer.append(APCER);bpcer.append(BPCER);acer.append(ACER);tpr.append(tpr_fpr)
apcer_order.append(f"{APCER*100:.1f}");bpcer_order.append(f"{BPCER*100:.1f}")
acer_order.append(f"{ACER*100:.1f}");tpr_order.append(f"{tpr_fpr*100:.1f}")
APCER, BPCER, ACER, tpr_fpr = compute_score(args, live_list + mouth_list, [0]*len(live_list) + [1]*len(mouth_list), test_name='mouth')
apcer.append(APCER);bpcer.append(BPCER);acer.append(ACER);tpr.append(tpr_fpr)
apcer_order.append(f"{APCER*100:.1f}");bpcer_order.append(f"{BPCER*100:.1f}")
acer_order.append(f"{ACER*100:.1f}");tpr_order.append(f"{tpr_fpr*100:.1f}")
APCER, BPCER, ACER, tpr_fpr = compute_score(args, live_list + paperglass_list, [0]*len(live_list) + [1]*len(paperglass_list), test_name='paperglass')
apcer.append(APCER);bpcer.append(BPCER);acer.append(ACER);tpr.append(tpr_fpr)
apcer_order.append(f"{APCER*100:.1f}");bpcer_order.append(f"{BPCER*100:.1f}")
acer_order.append(f"{ACER*100:.1f}");tpr_order.append(f"{tpr_fpr*100:.1f}")
APCER, BPCER, ACER, tpr_fpr = compute_score(args, live_list + im_list, [0]*len(live_list) + [1]*len(im_list), test_name='im')
apcer.append(APCER);bpcer.append(BPCER);acer.append(ACER);tpr.append(tpr_fpr)
apcer_order.append(f"{APCER*100:.1f}");bpcer_order.append(f"{BPCER*100:.1f}")
acer_order.append(f"{ACER*100:.1f}");tpr_order.append(f"{tpr_fpr*100:.1f}")
APCER, BPCER, ACER, tpr_fpr = compute_score(args, live_list + ob_list, [0]*len(live_list) + [1]*len(ob_list), test_name='ob')
apcer.append(APCER);bpcer.append(BPCER);acer.append(ACER);tpr.append(tpr_fpr)
apcer_order.append(f"{APCER*100:.1f}");bpcer_order.append(f"{BPCER*100:.1f}")
acer_order.append(f"{ACER*100:.1f}");tpr_order.append(f"{tpr_fpr*100:.1f}")
APCER, BPCER, ACER, tpr_fpr = compute_score(args, live_list + co_list, [0]*len(live_list) + [1]*len(co_list), test_name='co')
apcer.append(APCER);bpcer.append(BPCER);acer.append(ACER);tpr.append(tpr_fpr)
apcer_order.append(f"{APCER*100:.1f}");bpcer_order.append(f"{BPCER*100:.1f}")
acer_order.append(f"{ACER*100:.1f}");tpr_order.append(f"{tpr_fpr*100:.1f}")
APCER, BPCER, ACER, tpr_fpr = compute_score(args, live_list + half_list, [0]*len(live_list) + [1]*len(half_list), test_name='half')
apcer.append(APCER);bpcer.append(BPCER);acer.append(ACER);tpr.append(tpr_fpr)
apcer_order.append(f"{APCER*100:.1f}");bpcer_order.append(f"{BPCER*100:.1f}")
acer_order.append(f"{ACER*100:.1f}");tpr_order.append(f"{tpr_fpr*100:.1f}")
APCER, BPCER, ACER, tpr_fpr = compute_score(args, live_list + trans_list, [0]*len(live_list) + [1]*len(trans_list), test_name='trans')
apcer.append(APCER);bpcer.append(BPCER);acer.append(ACER);tpr.append(tpr_fpr)
apcer_order.append(f"{APCER*100:.1f}");bpcer_order.append(f"{BPCER*100:.1f}")
acer_order.append(f"{ACER*100:.1f}");tpr_order.append(f"{tpr_fpr*100:.1f}")
APCER, BPCER, ACER, tpr_fpr = compute_score(args, live_list + paper_list, [0]*len(live_list) + [1]*len(paper_list), test_name='paper')
apcer.append(APCER);bpcer.append(BPCER);acer.append(ACER);tpr.append(tpr_fpr)
apcer_order.append(f"{APCER*100:.1f}");bpcer_order.append(f"{BPCER*100:.1f}")
acer_order.append(f"{ACER*100:.1f}");tpr_order.append(f"{tpr_fpr*100:.1f}")
APCER, BPCER, ACER, tpr_fpr = compute_score(args, live_list + sil_list, [0]*len(live_list) + [1]*len(sil_list), test_name='sil')
apcer.append(APCER);bpcer.append(BPCER);acer.append(ACER);tpr.append(tpr_fpr)
apcer_order.append(f"{APCER*100:.1f}");bpcer_order.append(f"{BPCER*100:.1f}")
acer_order.append(f"{ACER*100:.1f}");tpr_order.append(f"{tpr_fpr*100:.1f}")
APCER, BPCER, ACER, tpr_fpr = compute_score(args, live_list + man_list, [0]*len(live_list) + [1]*len(man_list), test_name='man')
apcer.append(APCER);bpcer.append(BPCER);acer.append(ACER);tpr.append(tpr_fpr)
apcer_order.append(f"{APCER*100:.1f}");bpcer_order.append(f"{BPCER*100:.1f}")
acer_order.append(f"{ACER*100:.1f}");tpr_order.append(f"{tpr_fpr*100:.1f}")
APCER, BPCER, ACER, tpr_fpr = compute_score(args, live_list + replay_list, [0]*len(live_list) + [1]*len(replay_list), test_name='replay')
apcer.append(APCER);bpcer.append(BPCER);acer.append(ACER);tpr.append(tpr_fpr)
apcer_order.append(f"{APCER*100:.1f}");bpcer_order.append(f"{BPCER*100:.1f}")
acer_order.append(f"{ACER*100:.1f}");tpr_order.append(f"{tpr_fpr*100:.1f}")
APCER, BPCER, ACER, tpr_fpr = compute_score(args, live_list + print_list, [0]*len(live_list) + [1]*len(print_list), test_name='print')
apcer.append(APCER);bpcer.append(BPCER);acer.append(ACER);tpr.append(tpr_fpr)
apcer_order.append(f"{APCER*100:.1f}");bpcer_order.append(f"{BPCER*100:.1f}")
acer_order.append(f"{ACER*100:.1f}");tpr_order.append(f"{tpr_fpr*100:.1f}")
dump_to_screen(apcer_order, bpcer_order, acer_order, tpr_order, apcer, bpcer, acer, tpr)
def parse_protocol_2_result(spoof_lst, apcer, bpcer, acer, tpr):
'''
output results that can be easiliy copied to the overleaf.
'''
print("Compute the protocol II scores.")
overleaf_order = ['Funnyeye', 'Eye', 'Mouth', 'Paperglass', 'Im', 'Ob', 'Co', 'Half', 'Trans',
'Paper', 'Sil', 'Mann', 'Replay', 'Print'
]
apcer_order, bpcer_order, acer_order, tpr_order = [], [], [], []
for _ in overleaf_order:
spoof_idx = spoof_lst.index(_)
apcer_order.append(f"{apcer[spoof_idx]*100:.1f}")
bpcer_order.append(f"{bpcer[spoof_idx]*100:.1f}")
acer_order.append(f"{acer[spoof_idx]*100:.1f}")
tpr_order.append(f"{tpr[spoof_idx]*100:.1f}")
dump_to_screen(apcer_order, bpcer_order, acer_order, tpr_order, apcer, bpcer, acer, tpr)
def parse_protocol_3_result(score_dict):
'''
output results on protocol 3.
'''
print("Compute the protocol III scores.")
apcer, bpcer, acer, tpr = [], [], [], []
apcer_order, bpcer_order, acer_order, tpr_order = [], [], [], []
target_file_list = [
'./pro_3_text/test_A_pretrain.txt',
'./pro_3_text/test_B_spoof.txt',
'./pro_3_text/test_C_race.txt',
'./pro_3_text/test_D_age.txt',
'./pro_3_text/test_E_ill.txt'
]
for target_file_name in target_file_list:
f = open(target_file_name, 'r')
lines = f.readlines()
sub_live_list, sub_spoof_list = [], []
for line in lines:
line = line.strip()
if 'Live_' in line:
sub_live_list.append(np.mean(score_dict[line]))
else:
try:
sub_spoof_list.append(np.mean(score_dict[line]))
except:
continue
APCER, BPCER, ACER, tpr_fpr = compute_score(args, sub_live_list+sub_spoof_list,
[0]*len(sub_live_list)+[1]*len(sub_spoof_list),
target_file_name, False)
f.close()
apcer.append(APCER);bpcer.append(BPCER);acer.append(ACER);tpr.append(tpr_fpr)
apcer_order.append(f"{APCER*100:.1f}");bpcer_order.append(f"{BPCER*100:.1f}")
acer_order.append(f"{ACER*100:.1f}");tpr_order.append(f"{tpr_fpr*100:.1f}")
dump_to_screen(apcer_order, bpcer_order, acer_order, tpr_order, apcer, bpcer, acer, tpr)
def main(args):
## The performance is evaluated on the video-level.
folder_list = glob(f'{args.log_dir}/*/*.csv')
folder_list.sort()
spoof_lst, apcer, bpcer, acer, tpr = [], [], [], [], []
for folder_idx, folder_cur in enumerate(folder_list):
if args.pro == 1 and 'pro_1' not in folder_cur:
continue
elif args.pro == 2 and 'pro_2' not in folder_cur:
continue
elif args.pro == 3 and 'pro_3' not in folder_cur:
continue
elif args.pro not in [1,2,3]:
raise ValueError('Invalid Protocol.')
score_list, label_list = return_dictionary(args, [folder_cur])
APCER, BPCER, ACER, tpr_fpr = compute_score(args, score_list, label_list, folder_cur)
if args.pro == 2:
spoof_type = folder_cur.split('/')[-2].split('_')[-1]
spoof_lst.append(spoof_type)
apcer.append(APCER)
bpcer.append(BPCER)
acer.append(ACER)
tpr.append(tpr_fpr)
if args.pro == 2:
parse_protocol_2_result(spoof_lst, apcer, bpcer, acer, tpr)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--interval', type=float, default=0.0)
parser.add_argument('--lr', type=float, default=3e-4,
help='which learning rate to measure.')
parser.add_argument('--dataset', type=str, default='SiWM-v2', choices=['SiWM-v2', 'SiW', "OULU"],
help='which dataset to evaluate.')
parser.add_argument('--pro', type=int, default=1, choices=[1,2,3,4], help='which protocol to use.')
parser.add_argument('--weight', type=float, default=0.1, help='weight before the depth score.')
parser.add_argument('--log_dir', type=str, default='./',
help='the log directory that contains csv files.')
args = parser.parse_args()
main(args) | 16,311 | 44.437326 | 150 | py |
null | Multi-domain-learning-FAS-main/source_SiW_Mv2/train.py | # -*- coding: utf-8 -*-
# Copyright 2022
#
# Authors: Xiao Guo, Yaojie Liu, Anil Jain, and Xiaoming Liu.
#
# All Rights Reserved.s
#
# This research is based upon work supported by the Office of the Director of
# National Intelligence (ODNI), Intelligence Advanced Research Projects Activity
# (IARPA), via IARPA R&D Contract No. 2017-17020200004. The views and
# conclusions contained herein are those of the authors and should not be
# interpreted as necessarily representing the official policies or endorsements,
# either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The
# U.S. Government is authorized to reproduce and distribute reprints for
# Governmental purposes not withstanding any copyright annotation thereon.
# ==============================================================================
import tensorflow as tf
import argparse
import os
import time
import math
import numpy as np
from model import Generator, Discriminator, region_estimator
from utils import l1_loss, l2_loss, Logging
from dataset import Dataset
from config_siwm import Config_siwm
from tensorboardX import SummaryWriter
class SRENet(object):
"""
the SRENet class.
Attributes:
-----------
configurations: config, config_siw, and config_oulu.
modules: gen_pretrained, gen, RE, multi-disc and optimizers.
various directories for checkpoints.
log: log handler.
Methods:
-----------
basic functions: update_lr, _restore, _save.
optimization functions: train and train_step.
"""
def __init__(self, config):
self.config = config
self.lr = config.lr
self.bs = config.BATCH_SIZE
self.SUMMARY_WRITER = config.SUMMARY_WRITER
## The modules:
self.gen_pretrained = Generator()
self.RE = region_estimator()
self.gen = Generator(self.RE)
self.disc1 = Discriminator(1,config.n_layer_D)
self.disc2 = Discriminator(2,config.n_layer_D)
self.disc3 = Discriminator(4,config.n_layer_D)
self.gen_opt = tf.keras.optimizers.Adam(self.lr)
# Checkpoint initialization.
self.save_dir = config.save_model_dir
self.checkpoint_path_g = self.save_dir+"/gen/cp-{epoch:04d}.ckpt"
self.checkpoint_path_re = self.save_dir+"/ReE/cp-{epoch:04d}.ckpt"
self.checkpoint_path_d1 = self.save_dir+"/dis1/cp-{epoch:04d}.ckpt"
self.checkpoint_path_d2 = self.save_dir+"/dis2/cp-{epoch:04d}.ckpt"
self.checkpoint_path_d3 = self.save_dir+"/dis3/cp-{epoch:04d}.ckpt"
self.checkpoint_path_g_op = self.save_dir+"/g_opt/cp-{epoch:04d}.ckpt"
self.checkpoint_dir_g = os.path.dirname(self.checkpoint_path_g)
self.checkpoint_dir_re = os.path.dirname(self.checkpoint_path_re)
self.checkpoint_dir_d1 = os.path.dirname(self.checkpoint_path_d1)
self.checkpoint_dir_d2 = os.path.dirname(self.checkpoint_path_d2)
self.checkpoint_dir_d3 = os.path.dirname(self.checkpoint_path_d3)
self.checkpoint_dir_g_op = os.path.dirname(self.checkpoint_path_g_op)
self.model_list = [self.gen, self.RE, self.disc1, self.disc2, self.disc3]
self.model_p_list= [self.checkpoint_path_g,
self.checkpoint_path_re,
self.checkpoint_path_d1,
self.checkpoint_path_d2,
self.checkpoint_path_d3]
self.model_d_list= [self.checkpoint_dir_g,
self.checkpoint_dir_re,
self.checkpoint_dir_d1,
self.checkpoint_dir_d2,
self.checkpoint_dir_d3]
# Log class for displaying the losses.
self.log = Logging(config)
def update_lr(self, new_lr=0, restore=False, last_epoch=0):
if restore:
assert last_epoch != 0, print("Restoring LR should not start at 0 epoch.")
self.lr = self.lr * np.power(self.config.LEARNING_RATE_DECAY_FACTOR, last_epoch)
print(f"Restoring the previous learning rate {self.lr} at epoch {last_epoch}.")
self.gen_opt.learning_rate.assign(self.lr)
def _restore(self, model, checkpoint_dir, pretrain=False):
if not pretrain:
last_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
model.load_weights(last_checkpoint)
last_epoch = int((last_checkpoint.split('.')[1]).split('-')[-1])
return last_epoch
else:
model.load_weights(checkpoint_dir+'/cp-0179.ckpt')
def _save(self, model, checkpoint_path, epoch):
model.save_weights(checkpoint_path.format(epoch=epoch))
def train(self, dataset, config):
last_checkpoint = tf.train.latest_checkpoint(self.checkpoint_dir_g)
if last_checkpoint:
for i, j in zip(self.model_list, self.model_d_list):
last_epoch = self._restore(i, j)
print('**********************************************************')
print('Restore from Epoch '+str(last_epoch))
self.update_lr(restore=True, last_epoch=last_epoch)
print('**********************************************************')
else:
print('**********************************************************')
print('Training from the scratch.')
print('**********************************************************')
last_epoch = 0
for epoch in range(last_epoch, self.config.MAX_EPOCH):
start = time.time()
training = True
for step in range(self.config.STEPS_PER_EPOCH):
img_batch = dataset.nextit()
losses, figs = self.train_step(img_batch, training, tf.constant(step))
# display message every TXT_LOG_FR steps; save fig every IMG_LOG_FR steps.
self.log.display(losses, epoch, step, training, self.config.STEPS_PER_EPOCH)
self.log.save(figs, training)
iter_num = self.config.STEPS_PER_EPOCH*epoch+step
for name_, loss_ in losses.items():
self.SUMMARY_WRITER.add_scalar(f'train/{name_}', loss_.numpy(), iter_num)
for i, j in zip(self.model_list, self.model_p_list):
self._save(i, j, epoch)
if epoch % config.DECAY_STEP == 0:
self.SUMMARY_WRITER.add_scalar(f'train/gen_lr', self.gen_opt.learning_rate.numpy(), epoch)
self.lr = self.lr * config.LEARNING_RATE_DECAY_FACTOR
self.update_lr(self.lr)
self.SUMMARY_WRITER.flush()
self.SUMMARY_WRITER.close()
#############################################################################
@tf.function
def correlation_matrix(self, region_map):
region_map_ = tf.image.resize(region_map, [8, 8])
b, w, h, _ = region_map_.shape
tsne_feature = tf.reshape(region_map_, [b, w*h])
tsne_featureT = tf.transpose(tsne_feature)
matrix_1 = tf.matmul(tsne_feature, tsne_featureT)
norm_value = tf.norm(tsne_feature+1e-3, axis=1)
norm_value = tf.reshape(norm_value, [-1, 1])
norm_valueT = tf.transpose(norm_value)
matrix_2 = tf.matmul(norm_value, norm_valueT)
res = matrix_1/matrix_2
return res
@tf.function
def train_step(self, data, training, step=0):
losses = {}
figs = []
bsize, imsize = self.bs, self.config.IMG_SIZE
# Get images and labels for CNN.
img_li, img_sp, dmap_li, dmap_sp, _, _, _ = data
img = tf.concat([img_li, img_sp], axis=0)
dmap = tf.concat([dmap_li, dmap_sp], axis=0)
dmap_size_32 = tf.image.resize(dmap, [32, 32])
###########################################################
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape, tf.GradientTape() as reg_tape:
dmap_pred, p, c, n, x, region_map = self.gen(img, training=training)
region_map = tf.reshape(region_map, [2*bsize,imsize,imsize,1])
# Live reconstruction.
recon = (1 - p) * (img - n) + p * c
trace = img - recon
d_img = tf.concat([img[:bsize, ...], recon[bsize:, ...]], axis=0)
d_output_1 = self.disc1(d_img, training=training)
d_output_2 = self.disc2(d_img, training=training)
d_output_3 = self.disc3(d_img, training=training)
# Semantic mask loss.
p_prior_knowledge = l1_loss(p[..., 0], dmap[..., 1])
real_change = tf.zeros([bsize,imsize,imsize])
siwm_change = tf.cast(tf.greater(tf.reduce_sum(tf.abs(trace[bsize:,:,:]), axis=3), 0.35),tf.float32)
p_significant_change = tf.stop_gradient(tf.concat([real_change, siwm_change], axis=0))
map_loss = l1_loss(tf.squeeze(region_map), p_significant_change)
p_post_constraint = tf.abs(tf.squeeze(p[bsize:, ...]) - p_significant_change[bsize:, ...])
p_post_constraint = tf.reduce_mean(p_post_constraint)
p_loss = p_prior_knowledge * 0.1 + p_post_constraint
# Trace constraint loss.
trace_loss = tf.reduce_mean(tf.abs(trace[:bsize, ...])) + \
tf.reduce_mean(tf.abs(trace[bsize:, ...])) * 1e-5
# Depth map loss.
dmap_loss = l1_loss(dmap_pred, dmap_size_32) * 100
# GAN loss for the generator.
gan_loss = l2_loss(d_output_1[1], 1) + l2_loss(d_output_2[1], 1) + l2_loss(d_output_3[1], 1)
# Overall loss for generator.
g_total_loss = dmap_loss + gan_loss + p_loss + trace_loss * 10
# Discriminators loss.
d_loss_r = l2_loss(d_output_1[0], 1) + l2_loss(d_output_2[0], 1) + l2_loss(d_output_3[0], 1)
d_loss_s = l2_loss(d_output_1[1], 0) + l2_loss(d_output_2[1], 0) + l2_loss(d_output_3[1], 0)
d_total_loss = (d_loss_r + d_loss_s) / 4
if training:
# Gather all the trainable variables
gen_trainable_vars = self.gen.trainable_variables
reg_trainable_vars = self.RE.trainable_variables
disc_trainable_vars = self.disc1.trainable_variables + \
self.disc2.trainable_variables + \
self.disc3.trainable_variables
# Generate gradients.
r_gradients = reg_tape.gradient(map_loss, reg_trainable_vars)
g_gradients = gen_tape.gradient(g_total_loss, gen_trainable_vars)
d_gradients = disc_tape.gradient(d_total_loss, disc_trainable_vars)
# Backpropogate gradients.
self.gen_opt.apply_gradients(zip(g_gradients, gen_trainable_vars))
self.gen_opt.apply_gradients(zip(r_gradients, reg_trainable_vars))
if step % 2 == 0:
self.gen_opt.apply_gradients(zip(d_gradients, disc_trainable_vars))
# Gather losses for displaying for tracking the training.
losses['dmap'] = dmap_loss
losses['gen'] = gan_loss
losses['map_loss'] = map_loss
losses['p_post_constraint'] = p_post_constraint
losses['disc_real'] = d_loss_r
losses['disc_fake'] = d_loss_s
losses['p_prior_knowledge'] = p_prior_knowledge * 0.1
losses['trace_loss'] = trace_loss
# Gather network output and intermediate results for visualization.
dmap = tf.concat([dmap, tf.zeros([bsize*2, 256, 256, 1])], axis=3)
dmap_pred = tf.concat([dmap_pred, tf.zeros([bsize*2, 32, 32, 1])], axis=3)
p_significant_change = tf.expand_dims(p_significant_change, axis=-1)
figs = [img, recon, tf.abs(p), tf.abs(p_significant_change),
tf.abs(region_map), tf.abs(c), tf.abs(n), dmap, dmap_pred]
return losses, figs
def main(args):
# Base Configuration Class
config = Config_siwm(args)
config.lr = args.lr
config.type = args.type
config.DECAY_STEP = args.decay_step
config.pretrain_folder = args.pretrain_folder
config.desc_str = f'_siwmv2_pro_{args.pro}_unknown_{args.unknown}'
config.root_dir = './log'+config.desc_str
config.exp_dir = '/exp'+config.desc_str
config.CHECKPOINT_DIR = config.root_dir+config.exp_dir
config.tb_dir = './tb_logs'+config.desc_str
config.save_model_dir = "./save_model"+config.desc_str
config.SUMMARY_WRITER = SummaryWriter(config.tb_dir)
os.makedirs(config.root_dir, exist_ok=True)
os.makedirs(config.save_model_dir, exist_ok=True)
os.makedirs(config.CHECKPOINT_DIR, exist_ok=True)
os.makedirs(config.CHECKPOINT_DIR+'/test', exist_ok=True)
print('**********************************************************')
print(f"Making root folder: {config.root_dir}")
print(f"Current exp saved into folder: {config.CHECKPOINT_DIR}")
print(f"The tensorboard results are saved into: {config.tb_dir}")
print(f"The trained weights saved into folder: {config.save_model_dir}")
print('**********************************************************')
config.compile()
print('**********************************************************')
srenet = SRENet(config)
dataset_train = Dataset(config, 'train')
srenet.train(dataset_train, config)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## exp protocol setup.
parser.add_argument('--stage', type=str, default='ft', choices=['ft','pretrain','ub'])
parser.add_argument('--type', type=str, default='spoof', choices=['spoof','age','race','illu'])
parser.add_argument('--set', type=str, default='all', help='To choose from the predefined 14 types.')
parser.add_argument('--data', type=str, default='all', choices=['all','SiW','SiWM','oulu'])
parser.add_argument('--pretrain_folder', type=str, default='./pre_trained/', help='Pretrain weight.')
parser.add_argument('--pro', type=int, default=1, help='Protocol number.')
parser.add_argument('--unknown', type=str, default='Ob', help='The unknown spoof type.')
## train hyper-parameters.
parser.add_argument('--epoch', type=int, default=50, help='How many epochs to train the model.')
parser.add_argument('--lr', type=float, default=1e-4, help='The starting learning rate.')
parser.add_argument('--batch_size', type=int, default=6, help='Batch size.')
parser.add_argument('--decay_step', type=int, default=3, help='The learning rate decay step.')
parser.add_argument('--cuda', type=int, default=3, help='The gpu num to use.')
parser.add_argument('--debug_mode', type=str, default='True', choices=['True', "False"],
help='Deprecated function.')
## inference
parser.add_argument('--epoch_eval', type=int, default=49, help='Which epoch to eval.')
parser.add_argument('--dir', type=str, default=None, help='the inference image folder.')
parser.add_argument('--img', type=str, default='fake.png', help='the inference image.')
args = parser.parse_args()
main(args) | 13,417 | 42.283871 | 103 | py |
null | Multi-domain-learning-FAS-main/source_SiW_Mv2/config_siwm.py | # -*- coding: utf-8 -*-
# Copyright 2022
#
# Multi-domain Learning for Updating Face Anti-spoofing Models (ECCV 2022)
# Xiao Guo, Yaojie Liu, Anil Jain, and Xiaoming Liu
#
# All Rights Reserved.s
#
# This research is based upon work supported by the Office of the Director of
# National Intelligence (ODNI), Intelligence Advanced Research Projects Activity
# (IARPA), via IARPA R&D Contract No. 2017-17020200004. The views and
# conclusions contained herein are those of the authors and should not be
# interpreted as necessarily representing the official policies or endorsements,
# either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The
# U.S. Government is authorized to reproduce and distribute reprints for
# Governmental purposes not withstanding any copyright annotation thereon.
# ==============================================================================
import tensorflow as tf
import os
import abc
import csv
from glob import glob
from utils import file_reader
# Configuration class.
class Config(object):
"""
the meta configuration class.
Attributes:
-----------
configurations: config, config_siw, and config_oulu.
modules: gen_pretrained, gen, RE, multi-disc and optimizers.
various directories for checkpoints.
log: log handler.
Methods:
-----------
basic functions: update_lr, _restore, _save.
optimization functions: train and train_step.
"""
# Config.
LOG_DEVICE_PLACEMENT = False
IMG_SIZE = 256
MAP_SIZE = 32
FIG_SIZE = 128
# Training meta.
STEPS_PER_EPOCH = 1000
IMG_LOG_FR = 100
TXT_LOG_FR = 1000
# Initial learning rate.
lr = 1e-4
LEARNING_RATE_DECAY_FACTOR = 0.89 # The decay to use for the moving average.
LEARNING_MOMENTUM = 0.999
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
GAN = 'ls' # 'hinge', 'ls'
DECAY_STEP = 3
n_layer_D = 4
# Spoof type dictionary.
spoof_type_dict = {
'Co': 'Makeup_Co', 'Eye': 'Partial_Eye', 'Funnyeye': 'Partial_Funnyeye',
'Half': 'Mask_Half', 'Im': 'Makeup_Im', 'Mann': 'Mask_Mann', 'Mouth': 'Partial_Mouth',
'Ob': 'Makeup_Ob', 'Paper': 'Mask_Paper', 'Paperglass': 'Partial_Paperglass',
'Print': 'Paper', 'Replay': 'Replay',
'Sil': 'Mask_Silicone', 'Trans': 'Mask_Trans'
}
def __init__(self, args):
self.MAX_EPOCH = args.epoch
self.GPU_INDEX = args.cuda
self.phase = args.stage
assert self.phase in ['pretrain', 'ft', 'ub'], print("Please offer the valid phase!")
self.type = args.type
self.SET = args.set
self.illu_dict = dict()
self.spoof_type_list = list(self.spoof_type_dict.keys())
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
tf.config.experimental.set_memory_growth(gpus[self.GPU_INDEX], True)
tf.config.experimental.set_visible_devices(gpus[self.GPU_INDEX], 'GPU')
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU")
except RuntimeError as e:
print(e) # Virtual devices must be set before GPUs have been initialized
@abc.abstractmethod
def search_folder(self, root_dir, sub_id, stype):
pass
@abc.abstractmethod
def search_folder_wrapper(self, root_dir, filenames):
pass
class Config_siwm(Config):
"""
the configuration class for siw-mv2 dataset.
protocol I:
the live subjects for training and testing are from trainlist_live.txt and testlist_live.txt, respectively.
the spoof subjects for training and testing are from testlist_live.txt and testlist_all.txt, respectively.
protocol II:
the live subjects for training and testing are from trainlist_live.txt and testlist_live.txt, respectively.
13 spoof attacks in the training and 1 spoof attack for the testing.
protocol III:
both live and spoof subjects are in train_A_pretrain.txt.
the test subjects are in the test_A_pretrain.txt, test_B_spoof.txt, test_C_race.txt, test_D_age.txt and test_E_ill.txt.
"""
LI_DATA_DIR = []
SP_DATA_DIR = []
LI_DATA_DIR_TEST = []
SP_DATA_DIR_TEST = []
def __init__(self, args):
super().__init__(args)
self.dataset = "SiWM-v2"
self.BATCH_SIZE = args.batch_size
self.epoch_eval = args.epoch_eval
self.spoof_img_root = '/user/guoxia11/cvlshare/cvl-guoxia11/Spoof/'
self.live_img_root = '/user/guoxia11/cvlshare/cvl-guoxia11/Live/'
self.protocol = args.pro
if self.protocol in [1, 3]:
self.unknown = 'None'
elif self.protocol == 2:
self.unknown = args.unknown
assert self.unknown in self.spoof_type_list, print("Please offer a valid spoof type.")
root_dir_id = "/user/guoxia11/cvl/anti_spoofing_2022/PROTOCOL/SIW-Mv2/"
if self.protocol in [1, 2]:
self.spoof_train_fname = file_reader(root_dir_id + 'trainlist_all.txt')
self.spoof_test_fname = file_reader(root_dir_id + 'testlist_all.txt')
self.live_train_fname = file_reader(root_dir_id + 'trainlist_live.txt')
self.live_test_fname = file_reader(root_dir_id + 'testlist_live.txt')
elif self.protocol == 3:
total_list_train = file_reader(root_dir_id + 'train_A_pretrain.txt')
total_list_test = file_reader(root_dir_id + 'test_A_pretrain.txt')
total_list_test += file_reader(root_dir_id + 'test_B_spoof.txt')
total_list_test += file_reader(root_dir_id + 'test_C_race.txt')
total_list_test += file_reader(root_dir_id + 'test_D_age.txt')
total_list_test += file_reader(root_dir_id + 'test_E_ill.txt')
self.spoof_train_fname = []
self.spoof_test_fname = []
self.live_train_fname = []
self.live_test_fname = []
for _ in total_list_train:
if 'Live' in _:
self.live_train_fname.append(_)
else:
self.spoof_train_fname.append(_)
for _ in total_list_test:
if 'Live' in _:
self.live_test_fname.append(_)
else:
self.spoof_test_fname.append(_)
# overriding the compile method.
def compile(self, dataset_name='SiWM-v2'):
'''generates train and test list for SIW-Mv2.'''
# Train data.
for x in self.live_train_fname:
if x != '':
self.LI_DATA_DIR.append(self.live_img_root + x)
for x in self.spoof_train_fname:
if x != '':
if self.protocol in [1, 3]:
self.SP_DATA_DIR.append(self.spoof_img_root + x)
elif self.protocol == 2:
if self.spoof_type_dict[self.unknown] not in x:
self.SP_DATA_DIR.append(self.spoof_img_root + x)
for x in self.live_test_fname:
if x != '':
self.LI_DATA_DIR_TEST.append(self.live_img_root + x)
if self.protocol in [1, 3]:
for x in self.spoof_test_fname:
if x != '':
self.SP_DATA_DIR_TEST.append(self.spoof_img_root + x)
elif self.protocol == 2:
self.SP_DATA_DIR_TEST = glob(self.spoof_img_root + self.spoof_type_dict[self.unknown] + '_*')
class Config_custom(Config):
def __init__(self, args):
super().__init__(args)
self.dataset = 'custom'
self.BATCH_SIZE = 4
self.root_dir = './preprocessed_image_train'
self.phase = 'ft'
# self.partition_folder = args.partition
self.SP_DATA = glob(self.root_dir + '/spoof/*')
self.LI_DATA = glob(self.root_dir + '/live/*')
self.inference_data_dir = args.dir
self.inference_data_img = args.img
if self.inference_data_dir == None and self.inference_data_img == None:
assert False, print("Please offer either the inference image or inference image folder.")
else:
if self.inference_data_dir != None:
if not os.path.isdir(self.inference_data_dir):
assert False, print("Please offer a valid directory.")
elif self.inference_data_img != None:
image_suffix = self.inference_data_img.lower()
if not image_suffix.endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif')):
assert False, print("Please offer a valid image file.")
| 7,671 | 36.062802 | 121 | py |
null | Multi-domain-learning-FAS-main/source_SiW_Mv2/warp.py | # -*- coding: utf-8 -*-
# Copyright 2022
#
# Multi-domain Learning for Updating Face Anti-spoofing Models (ECCV 2022)
# Xiao Guo, Yaojie Liu, Anil Jain, and Xiaoming Liu
#
# All Rights Reserved.s
#
# This research is based upon work supported by the Office of the Director of
# National Intelligence (ODNI), Intelligence Advanced Research Projects Activity
# (IARPA), via IARPA R&D Contract No. 2017-17020200004. The views and
# conclusions contained herein are those of the authors and should not be
# interpreted as necessarily representing the official policies or endorsements,
# either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The
# U.S. Government is authorized to reproduce and distribute reprints for
# Governmental purposes not withstanding any copyright annotation thereon.
# ==============================================================================
import tensorflow as tf
import cv2
import numpy as np
from scipy.ndimage.interpolation import map_coordinates as sp_map_coordinates
import matplotlib.tri as mtri
def tf_flatten(a):
"""Flatten tensor"""
return tf.reshape(a, [-1])
def tf_repeat(a, repeats, axis=0):
"""TensorFlow version of np.repeat for 1D"""
# https://github.com/tensorflow/tensorflow/issues/8521
assert len(a.get_shape()) == 1
a = tf.expand_dims(a, -1)
a = tf.tile(a, [1, repeats])
a = tf_flatten(a)
return a
def tf_repeat_2d(a, repeats):
"""Tensorflow version of np.repeat for 2D"""
assert len(a.get_shape()) == 2
a = tf.expand_dims(a, 0)
a = tf.tile(a, [repeats, 1, 1])
return a
def tf_map_coordinates(input, coords, order=1):
"""Tensorflow verion of scipy.ndimage.map_coordinates
Note that coords is transposed and only 2D is supported
Parameters
----------
input : tf.Tensor. shape = (s, s)
coords : tf.Tensor. shape = (n_points, 2)
"""
assert order == 1
coords_lt = tf.cast(tf.floor(coords), 'int32')
coords_rb = tf.cast(tf.ceil(coords), 'int32')
coords_lb = tf.stack([coords_lt[:, 0], coords_rb[:, 1]], axis=1)
coords_rt = tf.stack([coords_rb[:, 0], coords_lt[:, 1]], axis=1)
vals_lt = tf.gather_nd(input, coords_lt)
vals_rb = tf.gather_nd(input, coords_rb)
vals_lb = tf.gather_nd(input, coords_lb)
vals_rt = tf.gather_nd(input, coords_rt)
coords_offset_lt = coords - tf.cast(coords_lt, 'float32')
vals_t = vals_lt + (vals_rt - vals_lt) * coords_offset_lt[:, 0]
vals_b = vals_lb + (vals_rb - vals_lb) * coords_offset_lt[:, 0]
mapped_vals = vals_t + (vals_b - vals_t) * coords_offset_lt[:, 1]
return mapped_vals
def sp_batch_map_coordinates(inputs, coords):
"""Reference implementation for batch_map_coordinates"""
coords = coords.clip(0, inputs.shape[1] - 1)
mapped_vals = np.array([
sp_map_coordinates(input, coord.T, mode='nearest', order=1)
for input, coord in zip(inputs, coords)
])
return mapped_vals
def tf_batch_map_coordinates(_input, coords, order=1):
"""Batch version of tf_map_coordinates
Only supports 2D feature maps
Parameters
----------
input : tf.Tensor. shape = (b, s, s)
coords : tf.Tensor. shape = (b, n_points, 2)
"""
input_shape = tf.shape(_input)
batch_size = input_shape[0]
input_size = input_shape[1]
n_coords = tf.shape(coords)[1]
coords = tf.clip_by_value(coords, 0, tf.cast(input_size, 'float32') - 1)
coords_lt = tf.cast(tf.math.floor(coords), 'int32')
coords_rb = tf.cast(tf.math.ceil(coords), 'int32')
coords_lb = tf.stack([coords_lt[..., 0], coords_rb[..., 1]], axis=-1)
coords_rt = tf.stack([coords_rb[..., 0], coords_lt[..., 1]], axis=-1)
idx = tf_repeat(tf.range(batch_size), n_coords)
def _get_vals_by_coords(__input, coords):
indices = tf.stack([
idx, tf_flatten(coords[..., 0]), tf_flatten(coords[..., 1])
], axis=-1)
vals = tf.gather_nd(__input, indices)
vals = tf.reshape(vals, (batch_size, n_coords, __input.shape[3]))
return vals
vals_lt = _get_vals_by_coords(_input, coords_lt)
vals_rb = _get_vals_by_coords(_input, coords_rb)
vals_lb = _get_vals_by_coords(_input, coords_lb)
vals_rt = _get_vals_by_coords(_input, coords_rt)
coords_offset_lt = coords - tf.cast(coords_lt, 'float32')
offset_0 =coords_offset_lt[..., 0]
offset_1 =coords_offset_lt[..., 1]
offset_0 = tf.reshape(offset_0, [offset_0.shape[0], offset_0.shape[1], 1])
offset_1 = tf.reshape(offset_1, [offset_1.shape[0], offset_1.shape[1], 1])
vals_t = vals_lt + (vals_rt - vals_lt) * offset_0
vals_b = vals_lb + (vals_rb - vals_lb) * offset_0
mapped_vals = vals_t + (vals_b - vals_t) * offset_1
return mapped_vals
def sp_batch_map_offsets(input, offsets):
"""Reference implementation for tf_batch_map_offsets"""
batch_size = input.shape[0]
input_size = input.shape[1]
offsets = offsets.reshape(batch_size, -1, 2)
grid = np.stack(np.mgrid[:input_size, :input_size], -1).reshape(-1, 2)
grid = np.repeat([grid], batch_size, axis=0)
coords = offsets + grid
coords = coords.clip(0, input_size - 1)
mapped_vals = sp_batch_map_coordinates(input, coords)
return mapped_vals
def tf_batch_map_offsets(_input, offsets, order=1):
input_size = _input.shape[1]
offsets = tf.image.resize(offsets, [input_size, input_size]) * input_size
offsets = offsets[:,:,:,0:2]
"""Batch map offsets into input
Parameters
---------
input : tf.Tensor. shape = (b, s, s)
offsets: tf.Tensor. shape = (b, s, s, 2)
"""
input_shape = tf.shape(_input)
batch_size = input_shape[0]
input_size = input_shape[1]
offsets = tf.reshape(offsets, (batch_size, -1, 2))
grid = tf.meshgrid(
tf.range(input_size), tf.range(input_size), indexing='ij'
)
grid = tf.stack(grid, axis=-1)
grid = tf.cast(grid, 'float32')
grid = tf.reshape(grid, (-1, 2))
grid = tf_repeat_2d(grid, batch_size)
coords = offsets + grid
mapped_vals = tf_batch_map_coordinates(_input, coords)
mapped_vals = tf.reshape(mapped_vals, (batch_size, input_size, input_size, -1))
return mapped_vals
def generate_offset_map_batch(source, target, img_size):
offsetmap_batch = []
for _source, _target in zip(tf.unstack(source), tf.unstack(target)):
offsetmap = generate_offset_map(_source, _target, img_size)
offsetmap_batch.append(offsetmap)
return tf.stack(offsetmap_batch, axis=0)
def generate_offset_map(source, target, img_size):
anchor_pts = [[0,0],[0,255],[255,0],[255,255],
[0,127],[127,0],[255,127],[127,255],
[0,63],[0,191],[255,63],[255,191],
[63,0],[191,0],[63,255],[191,255]]
anchor_pts = np.asarray(anchor_pts)/ 255
xi, yi = np.meshgrid(np.linspace(0, 1, img_size), np.linspace(0, 1, img_size))
_source = np.concatenate([source, anchor_pts], axis=0).astype(np.float32)
_target = np.concatenate([target, anchor_pts], axis=0).astype(np.float32)
_offset = _source - _target
# interp2d
_triang = mtri.Triangulation(_target[:,0], _target[:,1])
_interpx = mtri.LinearTriInterpolator(_triang, _offset[:,0])
_interpy = mtri.LinearTriInterpolator(_triang, _offset[:,1])
_offsetmapx = _interpx(xi, yi)
_offsetmapy = _interpy(xi, yi)
offsetmap = np.stack([_offsetmapy, _offsetmapx, _offsetmapx*0], axis=2)
return offsetmap
def generate_uv_map(source, uv, img_size):
xi, yi = np.meshgrid(np.linspace(0, 1, img_size), np.linspace(0, 1, img_size))
# interp2d
_triang = mtri.Triangulation(source[:,0], source[:,1])
_interpz = mtri.LinearTriInterpolator(_triang, uv[:,2])
_offsetmapz = _interpz(xi, yi)
offsetmap = np.reshape(_offsetmapz,(img_size,img_size,1))
offsetmap = np.nan_to_num(offsetmap)
return offsetmap
| 7,914 | 34.81448 | 83 | py |
null | DA-Transformer-main/README.md | # DA-Transformer
Directed Acyclic Transformer (DA-Transformer) is a non-autoregressive sequence-to-sequence model designed for parallel text generation. This repository contains the implementation of DA-Transformer, as well as pre-trained checkpoints.
**Abstract**: Unlike traditional sequence-to-sequence models that generate output tokens one at a time, DA-Transformer predicts a Directed Acyclic Graph (DAG) that represents all possible outputs simultaneously. Each path in the DAG corresponds to a specific output sequence, which enables fast and diverse text generation in a non-autoregressive fashion.

**Practical Advantages**:
* **Fast Generation**: DA-Transformer offers faster inference compared to autoregressive Transformers (with fairseq implementation), with a reduction in latency by 7~14x and an increase in throughput by ~20x.
* **High Quality**: DA-Transformer performs competitively with autoregressive Transformers, even with pre-trained models like BART, in a variety of text generation tasks.
* **Easy Training**: DA-Transformer can be trained end-to-end without requiring knowledge distillation, making it simple and straightforward to train.
<details>
<summary>Click Here for Performance on Machine Translation</summary>

</details>
<details>
<summary>Click Here for Performance on Close-Ended Text Generation</summary>

</details>
<details>
<summary>Click Here for Performance on Open-Ended Text Generation</summary>

</details>
**News(2022-5)**: We released the DA-Transformer code for machine translation. Update: This [version](https://github.com/thu-coai/DA-Transformer/tree/v1.0) is archived.
**News(2023-4)**: We are excited to announce a new framework to train DA-Transformer and a pre-trained checkpoint on Wikipedia and BookCorpus. After fine-tuning, **DA-Transformer achieves outstanding results on various generation tasks**, including question generation, summarization, paraphrasing, dialog generation, and story generation, **surpassing the performance of some pre-trained autoregressive models**, such as [MASS](https://github.com/microsoft/MASS), [BART](https://github.com/facebookresearch/fairseq/blob/main/examples/bart/README.md), and [ProphetNet](https://github.com/microsoft/ProphetNet). Our paper is released at [Arxiv](https://arxiv.org/pdf/2304.11791.pdf).
**News(2023-5)**: We release a live [demo](https://huggingface.co/spaces/thu-coai/DA-Transformer) on Huggingface Space. You can interact with our model and see the predicted DAG structure. Try it now!

## Table of Contents
* [Overview](#overview)
* [Requirements & Installation](#requirements--installation)
* [Preparing Data](#preparing-data)
* [Training](#training)
* [Examples](#examples)
* [Up-sampling Strategies](#up-sampling-strategies)
* [Speed Up with LightSeq](#speed-up-with-lightseq)
* [Decoding](#decoding)
* [Averaging Checkpoints](#averaging-checkpoints)
* [Greedy/Lookahead Decoding](#greedylookahead-decoding)
* [Viterbi Decoding](#viterbi-decoding)
* [Sampling](#sampling)
* [BeamSearch](#beamsearch)
* [Evaluation Scripts](#evaluation-scripts)
* [Other Scripts](#other-scripts)
* [Released Checkpoints](#released-checkpoints)
* [FAQs](#faqs)
* [Contact Us](#contact-us)
* [How to Cite](#how-to-cite)
## Overview
This repository is constructed using the codebase from [``fairseq:5175fd``](https://github.com/pytorch/fairseq/tree/5175fd5c267adceec9445bf067597686e159e7e7). If you require information on the basic usage of fairseq, please refer to the [fairseq documentation](https://fairseq.readthedocs.io/en/latest/).
Here are some features of our implementation:
* Our implementation includes CUDA implementations (enabled by default) for training, which includes a dynamic programming algorithm and several other operations that improve training speed and reduce GPU memory usage. If you prefer not to use CUDA, we also provide modules implemented in PyTorch native operations.
* We support LightSeq, which can further boost training speed. (Note that the reported speedup in the paper does not use LightSeq.)
* We offer a multi-threaded C++ implementation for BeamSearch.
* We have modified the fairseq training script to allow more detailed batch manipulation to avoid OOM problems in training DA-Transformer. See ``--batch-split-by-src`` and ``--max-tokens-after-upsampling`` in the descriptions below.
* We have also modified the fairseq generation script to support **overlapped decoding**, which significantly speeds up decoding throughput by reducing GPU idle time through conducting beam search algorithm on multiple CPU processes. See ``fairseq-fastgenerate`` in the descriptions below.
### DA-Transformer files (fs_plugins)
```
fs_plugins
├── criterions
│ └── nat_dag_loss.py # DA-Transformer loss
├── custom_ops # Operations implementations and cuda kernels
│ ├── dag_best_alignment.cu
│ ├── logsoftmax_gather.cu
│ ├── dag_loss.cu
│ ├── dag_loss.py
│ └── dag_loss.cpp
├── models
│ ├── glat_decomposed_with_link.py # A PyTorch implementation of DA-Transformer
│ ├── ls_glat_decomposed_with_link.py # A lightseq implementation of DA-Transformer
│ └── ls_* # Other files for lightseq
├── tasks
│ ├── translation_dat_dict.py # Customized dictionary implementation (add some special tokens)
│ ├── translation_dat_dataset.py # Customized dataset (useful in pre-training)
│ ├── translation_dat_generator.py # Customized generator
| └── translation_dat.py # Customized task
├── optimizer
│ └── ls_adam.py # Lightseq Adam
└── scripts
├── average_checkpoints.py # Averaging checkpoints tricks
├── convert_fs_to_ls.py # Converting fairseq checkpoint to fairseq checkpoint
├── convert_ls_to_fs.py # Converting lightseq checkpoint to fairseq checkpoint
└── extract_model_state.py # Extracting model weights from a checkpoint
```
### Customized LightSeq for NAT
Our code repository incorporates [a customized version of LightSeq](https://github.com/thu-coai/lightseq-nat/), with the following modifications:
* Implementation of a non-autoregressive decoder using the LightSeq autoregressive decoder as a base.
* Increased support for maximum length (currently set at 1024)
* Aligned parameters and model architectures with the Fairseq implementation, providing with a script for checkpoint conversion.
### BeamSearch on DAG
We have incorporated [dag_search](https://github.com/thu-coai/DAG-Search) into this repository, which implements the Beam Search algorithm on the DAG.
## Requirements & Installation
* Python >= 3.7
* Pytorch == 1.10.1 (tested with cuda == 10.2 or 11.3)
* gcc >= 7.0.0 (for compiling cuda operations. See FAQs if you want to use a lower gcc version)
* ``git clone --recurse-submodules https://github.com/thu-coai/DA-Transformer.git; cd DA-Transformer; pip install -e .``
* (Optional) Customized LightSeq for NAT (``cd lightseq && pip install -e .``)
* (Optional) BeamSearch algorithm for DA-Transformer (``cd dag_search && bash install.sh``)
## Preparing Data
We provide the datasets used in our papers.
| Dataset | Task | Data | Source |
|---------------|-----------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| WMT14 En<->De | Machine Translation | [[Link]](https://cloud.tsinghua.edu.cn/d/e64621ab19c44e55a774/): including raw data and distilled data. | The cleaned raw data is from [Fairseq](https://github.com/facebookresearch/fairseq/tree/main/examples/nonautoregressive_translation). The distilled corpora are generated by a Transformer-big model. |
| WMT17 Zh<->En | Machine Translation | [[Link]](https://cloud.tsinghua.edu.cn/d/3aa30749bd03423ba315/): including raw data and distilled data. | The distilled corpora are generated by a Transformer-big model. |
| SQuAD1.1 | Question Generation | [[Training]](https://microsoft.github.io/glge/) [[Test]](https://drive.google.com/file/d/11lDXIG87dChIfukq3x2Wx4r5_duCRm_J/view?usp=sharing) [[Pre-processing script]](./examples/DA-Transformer/process_bert_uncased.py) [[Vocab]](https://cloud.tsinghua.edu.cn/f/32ab06d8f7c546569c87/?dl=1) | Provided by [GLGE](https://microsoft.github.io/glge/). |
| XSUM | Summarization | [[Training]](https://microsoft.github.io/glge/) [[Test]](https://drive.google.com/file/d/11lDXIG87dChIfukq3x2Wx4r5_duCRm_J/view?usp=sharing) [[Pre-processing script]](./examples/DA-Transformer/process_bert_uncased.py) [[Vocab]](https://cloud.tsinghua.edu.cn/f/32ab06d8f7c546569c87/?dl=1) | Provided by [GLGE](https://microsoft.github.io/glge/). |
| Quora | Paraphrase Generation | [[Pre-processed Data]](https://cloud.tsinghua.edu.cn/f/de33381a2fdc4d16b1ec/?dl=1) [[Vocab]](https://cloud.tsinghua.edu.cn/f/32ab06d8f7c546569c87/?dl=1) | Provided by [Quora](https://quoradata.quora.com/First-Quora-Dataset-Release-Question-Pairs) and [MIST](https://github.com/kongds/MIST). |
| PersonaChat | Dialog Generation | [[Training]](https://microsoft.github.io/glge/) [[Test]](https://drive.google.com/file/d/11lDXIG87dChIfukq3x2Wx4r5_duCRm_J/view?usp=sharing) [[Pre-processing script]](./examples/DA-Transformer/process_bert_uncased.py) [[Vocab]](https://cloud.tsinghua.edu.cn/f/32ab06d8f7c546569c87/?dl=1) | Provided by [GLGE](https://microsoft.github.io/glge/). |
| ROCStory | Story Generation | [[Pre-processed Data]](https://cloud.tsinghua.edu.cn/f/46388d8239c248b1a13c/?dl=1) [[Vocab]](https://cloud.tsinghua.edu.cn/f/32ab06d8f7c546569c87/?dl=1) | Provided by [[Link]](https://www.cs.rochester.edu/nlp/rocstories/). |
As the pre-training data size is too large, we only provide [pre-processing script](./examples/DA-Transformer/process_pretrain.py) and [pre-processed examples](./examples/DA-Transformer/pretrain_data_example). It can be applied to any unlabelled copora to construct the pre-training data.
Then, to generate the binarized data required for fairseq training, run the following script (Note that you should rename the downloaded files before that).
```bash
input_dir=path/to/raw_data # directory of pre-processed text data
data_dir=path/to/binarized_data # directory of the generated binarized data
src=src # source suffix
tgt=tgt # target suffix
# The following command require files:
# train.${src} train.${tgt} valid.${src} valid.${tgt} test.${src} test.${tgt}
# dict.${src}.txt dict.${tgt}.txt
fairseq-datpreprocess --source-lang ${src} --target-lang ${tgt} \
--trainpref ${input_dir}/train --validpref ${input_dir}/valid --testpref ${input_dir}/test \
--srcdict ${input_dir}/dict.${src}.txt --tgtdict {input_dir}/dict.${tgt}.txt \
--destdir ${data_dir} --workers 32 \
--user-dir fs_plugins --task translation_dat_task [--seg-tokens 32]
# [--seg-tokens 32] is optional, it should be set when you use pre-trained models; otherwise, just remove it.
```
## Training
You can use ``fairseq-train`` to train a DA-Transformer. A basic example is shown as follows:
```bash
data_dir=/path/to/binarized/data/dir
checkpoint_dir=/path/to/checkpoint/dir
tensorboard_dir=/path/to/tensorboard/dir
pretrained_model=/path/to/model.bin
fairseq-train ${data_dir} \
\
`# loading DA-Transformer plugins` \
--user-dir fs_plugins \
\
`# DA-Transformer Task Configs` \
--task translation_dat_task \
--upsample-base source_old --upsample-scale 8 \
[--seg-tokens 32] [--filter-max-length 512:128] [--filter-ratio 2] \
\
`# DA-Transformer Architecture Configs` \
--arch glat_decomposed_link_base \
--links-feature feature:position [--segment-embedding] \
--max-source-positions 128 --max-target-positions 1024 [--truncate-source] \
--encoder-learned-pos --decoder-learned-pos \
--share-all-embeddings --activation-fn gelu --apply-bert-init \
[--load-pretrained-model ${pretrained_model}] \
\
`# DA-Transformer Decoding Configs (See more in the decoding section)` \
--decode-strategy lookahead --decode-upsample-scale 8.0 \
\
`# DA-Transformer Criterion Configs` \
--criterion nat_dag_loss \
--length-loss-factor 0 --max-transition-length 99999 \
--glat-p 0.5:0.1@200k --glance-strategy number-random \
[--use-pretrain-loss] [--no-force-emit] \
[--torch-dag-loss] [--torch-best-alignment-loss] [--torch-dag-logsoftmax-gather] \
\
`# Optimizer & Regularizer Configs` \
--optimizer adam --adam-betas '(0.9,0.999)' --fp16 \
--label-smoothing 0.0 --weight-decay 0.01 --dropout 0.1 \
--lr-scheduler inverse_sqrt --warmup-updates 10000 \
--clip-norm 0.1 --lr 0.0005 --warmup-init-lr '1e-07' --stop-min-lr '1e-09' \
\
`# Training Configs` \
--max-tokens 4096 --max-tokens-valid 4096 --update-freq 2 \
[--max-tokens-after-upsample] [--batch-split-by-src 32767] \
[--max-encoder-batch-tokens 20000] [--max-decoder-batch-tokens 20000] \
--max-update 300000 --grouped-shuffling \
--seed 0 --ddp-backend c10d --required-batch-size-multiple 1 \
\
`# Validation Configs` \
--valid-subset valid \
--validate-interval 1 --validate-interval-updates 10000 \
--eval-bleu --eval-bleu-detok space --eval-bleu-remove-bpe --eval-bleu-print-samples [--eval-bleu-order 4] \
--fixed-validation-seed 7 \
\
`# Checkpoint Configs` \
--best-checkpoint-metric bleu --maximize-best-checkpoint-metric \
--save-interval 1 --save-interval-updates 10000 \
--keep-best-checkpoints 5 --save-dir ${checkpoint_dir} \
\
`# Logging Configs` \
--tensorboard-logdir ${tensorboard_dir} \
--log-format 'simple' --log-interval 100
```
**In Fairseq, the number of tokens in a batch = GPU number * max_tokens * update_freq**. If you have 8 GPUs, the above scripts will have approximating 64k tokens in a batch.
For more details of the above arguments, please refer to [the explanation of the training configurations](./training.md).
### Examples
We also provide training script examples including:
* Training machine translation model from scratch on [WMT14 En-De](./examples/DA-Transformer/wmt14_ende.sh), [WMT14 De-En](./examples/DA-Transformer/wmt14_deen.sh), [WMT17 Zh-En](./examples/DA-Transformer/wmt17_zhen.sh), [WMT17 En-Zh](./examples/DA-Transformer/wmt17_enzh.sh).
* Fine-tuning pre-trained DA-Trasnformer on [SQuAD1.1](./examples/DA-Transformer/squad.sh), [XSUM](./examples/DA-Transformer/xsum.sh), [Quora](./examples/DA-Transformer/quora.sh), [PersonaChat](./examples/DA-Transformer/personachat.sh), [ROCStory](./examples/DA-Transformer/rocstory.sh).
* Pre-training DA-Transformer on [Wikipedia + BookCorpus](./examples/DA-Transformer/pretrain.sh).
### Up-sampling Strategies
DA-Transformer currently supports two up-sampling strategies to determine the DAG size:
- ``--upsample-base source_old``: Recommended in machine translation or tasks have similar length inputs and outputs. The DAG size will be determined by the source length during both training and inference. In this case, ``--upsample-scale`` is usually set to a fixed number, indicating that the DAG size is a fixed multiple times of the input length. You do not need to train a length predictor and can disable it by setting ``--length-loss-factor`` to 0. (``--upsample-base source`` is similar but gives a slightly smaller DAG size, because it does not count the <bos> and <eos> tokens when measuring the length of inputs.)
- ``--upsample-base predict``: Recommended in other tasks. The DAG size will be determined by the golden target length during training and the predicted length during inference. In this case, ``--upsample-scale`` is usually set to a range, such as 4~8, indicating that the DAG size is between 4 and 8 times of the input length. It diversifies the DAG structures to promote the model generalization. You need to train a length predictor by setting ``--length-loss-factor`` a greater value than 0 (usually 0.1).
### Speed up with Lightseq
To optimize your training with Lightseq, you only need to modify two options as follows:
* Change ``--arch glat_decomposed_link_base`` to ``--arch ls_glat_decomposed_link_base``
* Change ``--optimizer adam`` to ``--optimizer ls_adam``
By making these simple changes, you can expect to see a 1.5x speed improvement in training.
However, it's important to keep in mind that **Lightseq does not support all Transformer variants found in Fairseq**. If you wish to modify the model architecture, you must exercise caution and carefully review the code to avoid unexpected behavior. **The codes will NOT emit any warnings.**
## Decoding
DA-Transformer offers four decoding strategies to suit different needs:
- **Greedy**: The fastest option, which uses argmax operation in token prediction and transition prediction.
- **Lookahead**: A higher-quality option that is similar in speed to Greedy. It jointly considers the next transition and token probability in making choices.
- **Viterbi**: This option is slightly slower than Lookahead but offers higher quality. It also supports length penalty to control the output length.
- **Sampling**: This option facilitates diverse generation but scarifies quality, where the tradeoff can be tuned by decoding temperature.
- **BeamSearch**: The slowest but highest-quality option, which can be combined with n-gram language model.
**About ``decode_upsample_scale``**: This parameter specifies the up-sampling scale to determine the DAG size during inference. If ``--upsample-scale`` used in training is a fixed number, this parameter should be the same value. If ``--upsample-scale`` used in training is a range, this parameter can be the average of the range, or tuned on the validation set.
**About fp16**: Decoding can be accelerated by specifying ``--fp16`` to enable half-precision computation.
### Averaging Checkpoints
To enhance generation performance in NAT, averaging the five checkpoints with the highest BLEU score is a widely used technique.
```bash
checkpoint_dir=/path/to/checkpoint/dir
average_checkpoint_path=/path/to/checkpoint/average.pt
python3 ./fs_plugins/scripts/average_checkpoints.py \
--inputs ${checkpoint_dir} \
--max-metric \
--best-checkpoints-metric bleu \
--num-best-checkpoints-metric 5 \
--output ${average_checkpoint_path}
```
### Greedy/Lookahead Decoding
```bash
data_dir=/path/to/binarized/data/dir
average_checkpoint_path=/path/to/checkpoint/average.pt
# Greedy Decoding
fairseq-generate ${data_dir} \
--gen-subset test --user-dir fs_plugins --task translation_dat_task \
--remove-bpe --max-tokens 4096 --seed 0 \
--decode-strategy greedy --decode-upsample-scale 8 \
--path ${average_checkpoint_path}
# Lookahead Decoding
# ``decode_beta`` scales the score of logits. Specifically: y_i, a_i = argmax [ log P(y_i|a_i) + beta * log P(a_i|a_{i-1}) ]
fairseq-generate ${data_dir} \
--gen-subset test --user-dir fs_plugins --task translation_dat_task \
--remove-bpe --max-tokens 4096 --seed 0 \
--decode-strategy lookahead --decode-upsample-scale 8 --decode-beta 1 \
--path ${average_checkpoint_path}
# Lookahead Decoding with N-gram Prevention
# ``decode_no_consecutive_repeated_ngram`` prevents consecutive repeated k-grams (k <= n) in the generated text. Use 0 to disable this feature.
# ``decode_no_repeated_ngram`` prevents repeated k-grams (not necessarily consecutive) with order n or higher in the generated text. Use 0 to disable this feature.
# ``decode_top_cand_n`` specifies the number of top candidates to consider during transition.
# ``decode_top_p`` specifies the maximum probability of top candidates to consider during transition.
# If all transition are failed (because of n-gram prevention), the algorithm will remove the constraints and choose the most likely transition.
fairseq-generate ${data_dir} \
--gen-subset test --user-dir fs_plugins --task translation_dat_task \
--remove-bpe --max-tokens 4096 --seed 0 \
--decode-strategy lookahead --decode-upsample-scale 8 --decode-beta 1 \
--decode-no-consecutive-repeated-ngram 3 --decode-no-repeated-ngram 2 --decode-top-cand-n 20 --decode-top-p 0.9 \
--path ${average_checkpoint_path}
```
### Viterbi Decoding
Viterbi decoding algorithms proposed in "**Viterbi Decoding of Directed Acyclic Transformer for Non-Autoregressive Machine Translation**".
``decode_viterbibeta`` is the length penalty that controls the output length. Viterbi decoding finds the path than maximizes $P(A|X) / |Y|^{\beta}$. Joint-Viterbi finds the output that maximizes $P(A,Y|X) / |Y|^{\beta}$.
You can specify ``decode_strategy`` to ``viterbi`` or ``jointviterbi`` to enable the Viterbi decoding. ``jointviterbi`` is usually recommended because it jointly considers the transition and token probabilities, similar to lookahead decoding.
```bash
data_dir=/path/to/binarized/data/dir
average_checkpoint_path=/path/to/checkpoint/average.pt
# Viterbi
fairseq-generate ${data_dir} \
--gen-subset test --user-dir fs_plugins --task translation_dat_task \
--iter-decode-max-iter 0 --iter-decode-eos-penalty 0 --beam 1 \
--remove-bpe --max-tokens 4096 --seed 0 \
--decode-strategy viterbi --decode-upsample-scale 8 --decode-viterbibeta 1 \
--path ${average_checkpoint_path}
# Joint-Viterbi
fairseq-generate ${data_dir} \
--gen-subset test --user-dir fs_plugins --task translation_dat_task \
--remove-bpe --max-tokens 4096 --seed 0 \
--decode-strategy jointviterbi --decode-upsample-scale 8 --decode-viterbibeta 1 \
--path ${average_checkpoint_path}
```
### Sampling
```bash
data_dir=/path/to/binarized/data/dir
average_checkpoint_path=/path/to/checkpoint/average.pt
# Sampling
# ``decode_top_cand_n`` specifies the number of top candidates to consider during transition.
# ``decode_top_p`` specifies the maximum probability of top candidates to consider during transition.
# ``decode_temperature`` specifies the temperature. A higher temperature brings more diverse outputs.
# ``decode_no_consecutive_repeated_ngram`` prevents consecutive repeated k-grams (k <= n) in the generated text. Use 0 to disable this feature.
# ``decode_no_repeated_ngram`` prevents repeated k-grams (not necessarily consecutive) with order n or higher in the generated text. Use 0 to disable this feature.
fairseq-generate ${data_dir} \
--gen-subset test --user-dir fs_plugins --task translation_dat_task \
--remove-bpe --max-tokens 4096 --seed 0 \
--decode-strategy sample --decode-upsample-scale 8 \
--decode-no-consecutive-repeated-ngram 3 --decode-no-repeated-ngram 2 --decode-top-cand-n 5 --decode-top-p 0.9 --decode-temperature 1 \
--path ${average_checkpoint_path}
```
### BeamSearch
Please install ``dag_search`` first, see ``./dag_search/install.sh`` for requirements.
If you want to use n-gram LM in BeamSearch, see [this](dag_search/README.md) to build one before generation.
```bash
data_dir=/path/to/binarized/data/dir
average_checkpoint_path=/path/to/checkpoint/average.pt
# The algorithm finds the sentence maximize: 1 / |Y|^{alpha} [ log P(Y) + gamma log P_{n-gram}(Y)]
# ``decode_beta`` scales the score of logits. Specifically: log P(Y, A) := sum P(y_i|a_i) + beta * sum log(a_i|a_{i-1})
# ``decode_alpha`` is used for length penalty. ``decode_gamma`` is used for the n-gram language model score. The sentence with the highest score is found using: 1 / |Y|^{alpha} [ log P(Y) + gamma log P_{n-gram}(Y)].
# ``decode_lm_path`` is the path to the language model. Set to None to disable n-gram LM.
# ``decode_beamsize`` is the beam size; ``decode_top_cand_n`` set the numbers of top candidates when considering transition.
# ``decode_top_p`` set the max probability of top candidates when considering transition.
# ``decode_max_beam_per_length`` specifies the maximum number of beams with the same length in each step during beamsearch decoding.
# ``decode_max_batchsize`` specifies the maximum batch size to use. Should not be smaller than the actual batch size, as it is used for memory allocation.
# ``decode_max_workers`` specifies the number of multiprocess workers to use during beamsearch decoding. More workers will consume more memory. It does not affect decoding latency but decoding throughtput, so you must use "fariseq-fastgenerate" to enable the overlapped decoding to tell the difference.
# ``decode_threads_per_workers`` specifies the number of threads per worker to use during beamsearch decoding. This setting also applies to both vanilla decoding and overlapped decoding. A value between 2 and 8 is typically optimal.
# ``decode_dedup`` enables token deduplication.
# ``decode_no_consecutive_repeated_ngram`` prevents consecutive repeated k-grams (k <= n) in the generated text. Use 0 to disable this feature.
# ``decode_no_repeated_ngram`` prevents repeated k-grams (not necessarily consecutive) with order n or higher in the generated text. Use 0 to disable this feature.
# BeamSearch without LM
fairseq-generate ${data_dir} \
--gen-subset test --user-dir fs_plugins --task translation_dat_task \
--remove-bpe --batch-size 32 --seed 0 \
--decode-strategy beamsearch --decode-upsample-scale 8 \
--decode-beta 1 --decode-alpha 1.1 --decode-gamma 0 \
--decode-beamsize 200 --decode-top-cand-n 5 --decode-top-p 0.9 \
--decode-max-beam-per-length 10 --decode-max-batchsize 32 --decode-max-workers 0 --decode-threads-per-worker 6 --decode-dedup \
--path ${average_checkpoint_path}
# BeamSearch with LM
# You should first build the n-gram language model and save it to /path/to/ngram_lm.arpa
fairseq-generate ${data_dir} \
--gen-subset test --user-dir fs_plugins --task translation_dat_task \
--remove-bpe --batch-size 32 --seed 0 \
--decode-strategy beamsearch --decode-upsample-scale 8 \
--decode-beta 1 --decode-alpha 1.1 --decode-gamma 0.1 \
--decode-beamsize 200 --decode-top-cand-n 5 --decode-top-p 0.9 \
--decode-max-beam-per-length 10 --decode-max-batchsize 32 --decode-max-workers 0 --decode-threads-per-worker 6 --decode-dedup \
--path ${average_checkpoint_path}
# BeamSearch with Overlapped Decoding
# Enabled by using ``fairseq-fastgenerate`` and setting ``decode_max_workers`` > 0
# ``fairseq-fastgenerate`` will measure the time of processing the whole test set. It removes all time-consuming operations irrelevant with decoding (such as calculating BLEU scores).
fairseq-fastgenerate ${data_dir} \
--gen-subset test --user-dir fs_plugins --task translation_dat_task \
--remove-bpe --batch-size 32 --seed 0 \
--decode-strategy beamsearch --decode-upsample-scale 8 \
--decode-beta 1 --decode-alpha 1.1 --decode-gamma 0.1 \
--decode-lm-path /path/to/ngram_lm.arpa \
--decode-beamsize 200 --decode-top-cand-n 5 --decode-top-p 0.9 \
--decode-max-beam-per-length 10 --decode-max-batchsize 32 --decode-max-workers 0 --decode-threads-per-worker 6 --decode-dedup \
--path ${average_checkpoint_path}
```
**Note: ``decode_alpha`` can control the output length, which should be tuned on the validation set.**
**Note: Both ``decode_no_consecutive_repeated_ngram`` and ``decode_no_repeated_ngram`` options can also be used with BeamSearch. Simply include them in your command.**
## Evaluation Scripts
### Quality Evaluation
* For machine translation, we use tokenized BLEU. You can find the BLEU scores in the output files. Or using ``fairseq-score -s /path/to/output -r /path/to/reference -o 4``. For WMT17 En-Zh, we use sacreBLEU and add ``--source-lang en --target-lang zh --tokenizer moses --scoring sacrebleu --sacrebleu-tokenizer zh`` in decoding.
* For the tasks presented in the PDAT paper, we provide the evaluation scripts [here](./examples/DA-Transformer/evaluation).
### Speed Evaluation
- **Latency**: The decoding outputs produce latency, which we use a batch size of 1 in our paper. To replicate this, replace ``--max-tokens 4096`` with ``--batch-size 1`` in the decoding scripts.
- **Throughput**: We measure the time taken to process the entire test set using ``fairseq-fastgenerate``. To use this, replace ``fairseq-generate`` with ``fairseq-fastgenerate`` in the decoding scripts. If you are using BeamSearch, do not forget to specify a larger number of workers.
**Note**: Optimal performance for BeamSearch is heavily dependent on CPU and memory usage. Ensure that you are not running other computationally intensive programs and have enough memory (potentially tens or hundreds of GBs depending on your worker numbers and batch size).
## Other Scripts
### Lightseq Conversion Scripts
We provide a script to converting a LightSeq checkpoint to a Fairseq checkpoint or vice versa:
```bash
python3 ./fs_plugins/scripts/convert_ls_to_fs.py --input path/to/ls_checkpoint.pt --output path/to/fs_checkpoint.pt
python3 ./fs_plugins/scripts/convert_fs_to_ls.py --input path/to/fs_checkpoint.pt --output path/to/ls_checkpoint.pt
```
**Note:** There may be slight differences between LightSeq and Fairseq checkpoints' outputs because of the precision problem.
## Released Checkpoints
We have released the following checkpoints for pre-trained models described in our paper:
* PDAT (uncased, 127M, trained on 16GB Wikipedia + BookCorpus, 500k steps): [[Weights]](https://cloud.tsinghua.edu.cn/f/b048a7eddd204e2098a7/?dl=1) [[Vocab]](https://cloud.tsinghua.edu.cn/f/32ab06d8f7c546569c87/?dl=1)
## FAQs
1. **Cuda Compiled Failed: error: invalid static_cast from type ...**
If you encounter this error message, first check your gcc version. It's recommended to use gcc 7 or higher since PyTorch no longer supports older versions.
If upgrading is not an option, you can use this workaround (https://zhuanlan.zhihu.com/p/468605263, in Chinese):
* Locate the header file ``/PATH/TO/PYTHONLIB/torch/include/torch/csrc/api/include/torch/nn/cloneable.h``.
* Modify lines 46, 58, and 70. The original codes are:
```
copy->parameters_.size() == parameters_.size()
copy->buffers_.size() == buffers_.size()
copy->children_.size() == children_.size()
```
Replace them with:
```
copy->parameters_.size() == this -> parameters_.size()
copy->buffers_.size() == this -> buffers_.size()
copy->children_.size() == this -> children_.size()
```
* Rerun your script
## Contact Us
If there are any problems, you are welcome to contact us by posting issues in this repository or sending emails to ``huangfei382@163.com``.
## How to Cite
Please kindly cite us if you find our papers, codes, pre-trained checkpoints useful.
DA-Transformer:
```
@inproceedings{huang2022DATransformer,
author = {Fei Huang and Hao Zhou and Yang Liu and Hang Li and Minlie Huang},
title = {Directed Acyclic Transformer for Non-Autoregressive Machine Translation},
booktitle = {Proceedings of the 39th International Conference on Machine Learning, {ICML} 2022},
year = {2022}
}
```
Viterbi Decoding:
```
@inproceedings{shao2022viterbi,
author = {Chenze Shao and Zhengrui Ma and Yang Feng},
title = {Viterbi Decoding of Directed Acyclic Transformer for Non-Autoregressive Machine Translation},
booktitle = {Findings of EMNLP 2022},
year = {2022}
}
```
Pretrained DA-Transformer:
```
@article{huang2022PDAT,
author = {Fei Huang and Pei Ke and Minlie Huang},
title = {Directed Acyclic Transformer Pre-training for High-quality Non-Autoregressive Text Generation},
journal = "Transactions of the Association for Computational Linguistics",
year = {2023}
}
``` | 34,252 | 62.549165 | 682 | md |
null | DA-Transformer-main/hubconf.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import functools
import importlib
dependencies = [
"dataclasses",
"hydra",
"numpy",
"omegaconf",
"regex",
"requests",
"torch",
]
# Check for required dependencies and raise a RuntimeError if any are missing.
missing_deps = []
for dep in dependencies:
try:
importlib.import_module(dep)
except ImportError:
# Hack: the hydra package is provided under the "hydra-core" name in
# pypi. We don't want the user mistakenly calling `pip install hydra`
# since that will install an unrelated package.
if dep == "hydra":
dep = "hydra-core"
missing_deps.append(dep)
if len(missing_deps) > 0:
raise RuntimeError("Missing dependencies: {}".format(", ".join(missing_deps)))
# only do fairseq imports after checking for dependencies
from fairseq.hub_utils import ( # noqa; noqa
BPEHubInterface as bpe,
TokenizerHubInterface as tokenizer,
)
from fairseq.models import MODEL_REGISTRY # noqa
# torch.hub doesn't build Cython components, so if they are not found then try
# to build them here
try:
import fairseq.data.token_block_utils_fast # noqa
except ImportError:
try:
import cython # noqa
import os
from setuptools import sandbox
sandbox.run_setup(
os.path.join(os.path.dirname(__file__), "setup.py"),
["build_ext", "--inplace"],
)
except ImportError:
print(
"Unable to build Cython components. Please make sure Cython is "
"installed if the torch.hub model you are loading depends on it."
)
# automatically expose models defined in FairseqModel::hub_models
for _model_type, _cls in MODEL_REGISTRY.items():
for model_name in _cls.hub_models().keys():
globals()[model_name] = functools.partial(
_cls.from_pretrained,
model_name,
)
| 2,099 | 27.378378 | 82 | py |
null | DA-Transformer-main/setup.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
import site
import sys
site.ENABLE_USER_SITE = "--user" in sys.argv[1:]
from setuptools import Extension, find_packages, setup
if sys.version_info < (3, 6):
sys.exit("Sorry, Python >= 3.6 is required for fairseq.")
def write_version_py():
with open(os.path.join("fairseq", "version.txt")) as f:
version = f.read().strip()
# append latest commit hash to version string
try:
sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"])
.decode("ascii")
.strip()
)
version += "+" + sha[:7]
except Exception:
pass
# write version info to fairseq/version.py
with open(os.path.join("fairseq", "version.py"), "w") as f:
f.write('__version__ = "{}"\n'.format(version))
return version
version = write_version_py()
with open("README.md") as f:
readme = f.read()
if sys.platform == "darwin":
extra_compile_args = ["-stdlib=libc++", "-O3"]
else:
extra_compile_args = ["-std=c++11", "-O3"]
class NumpyExtension(Extension):
"""Source: https://stackoverflow.com/a/54128391"""
def __init__(self, *args, **kwargs):
self.__include_dirs = []
super().__init__(*args, **kwargs)
@property
def include_dirs(self):
import numpy
return self.__include_dirs + [numpy.get_include()]
@include_dirs.setter
def include_dirs(self, dirs):
self.__include_dirs = dirs
extensions = [
Extension(
"fairseq.libbleu",
sources=[
"fairseq/clib/libbleu/libbleu.cpp",
"fairseq/clib/libbleu/module.cpp",
],
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.data_utils_fast",
sources=["fairseq/data/data_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.token_block_utils_fast",
sources=["fairseq/data/token_block_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
]
cmdclass = {}
try:
# torch is not available when generating docs
from torch.utils import cpp_extension
# extensions.extend(
# [
# cpp_extension.CppExtension(
# "fairseq.libbase",
# sources=[
# "fairseq/clib/libbase/balanced_assignment.cpp",
# ],
# )
# ]
# )
# extensions.extend(
# [
# cpp_extension.CppExtension(
# "fairseq.libnat",
# sources=[
# "fairseq/clib/libnat/edit_dist.cpp",
# ],
# ),
# cpp_extension.CppExtension(
# "alignment_train_cpu_binding",
# sources=[
# "examples/operators/alignment_train_cpu.cpp",
# ],
# ),
# ]
# )
if "CUDA_HOME" in os.environ:
pass
# extensions.extend(
# [
# cpp_extension.CppExtension(
# "fairseq.libnat_cuda",
# sources=[
# "fairseq/clib/libnat_cuda/edit_dist.cu",
# "fairseq/clib/libnat_cuda/binding.cpp",
# ],
# ),
# cpp_extension.CppExtension(
# "fairseq.ngram_repeat_block_cuda",
# sources=[
# "fairseq/clib/cuda/ngram_repeat_block_cuda.cpp",
# "fairseq/clib/cuda/ngram_repeat_block_cuda_kernel.cu",
# ],
# ),
# cpp_extension.CppExtension(
# "alignment_train_cuda_binding",
# sources=[
# "examples/operators/alignment_train_kernel.cu",
# "examples/operators/alignment_train_cuda.cpp",
# ],
# ),
# ]
# )
cmdclass["build_ext"] = cpp_extension.BuildExtension
except ImportError:
pass
if "READTHEDOCS" in os.environ:
# don't build extensions when generating docs
extensions = []
if "build_ext" in cmdclass:
del cmdclass["build_ext"]
# use CPU build of PyTorch
dependency_links = [
"https://download.pytorch.org/whl/cpu/torch-1.7.0%2Bcpu-cp36-cp36m-linux_x86_64.whl"
]
else:
dependency_links = []
if "clean" in sys.argv[1:]:
# Source: https://bit.ly/2NLVsgE
print("deleting Cython files...")
import subprocess
subprocess.run(
["rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd"],
shell=True,
)
extra_packages = []
if os.path.exists(os.path.join("fairseq", "model_parallel", "megatron", "mpu")):
extra_packages.append("fairseq.model_parallel.megatron.mpu")
def do_setup(package_data):
setup(
name="fairseq",
version=version,
description="Facebook AI Research Sequence-to-Sequence Toolkit",
url="https://github.com/pytorch/fairseq",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
long_description=readme,
long_description_content_type="text/markdown",
setup_requires=[
"cython",
'numpy<1.20.0; python_version<"3.7"',
'numpy; python_version>="3.7"',
"setuptools>=18.0",
],
install_requires=[
"cffi",
"cython",
'dataclasses; python_version<"3.7"',
"hydra-core>=1.0.7,<1.1",
"omegaconf<2.1",
'numpy<1.20.0; python_version<"3.7"',
'numpy; python_version>="3.7"',
"regex",
"sacrebleu==1.5.1",
"sacrebleu[ja]",
"tqdm",
"bitarray",
"ninja"
],
dependency_links=dependency_links,
packages=find_packages(
exclude=[
"examples",
"examples.*",
"scripts",
"scripts.*",
"tests",
"tests.*",
]
)
+ extra_packages,
package_data=package_data,
ext_modules=extensions,
test_suite="tests",
entry_points={
"console_scripts": [
"fairseq-eval-lm = fairseq_cli.eval_lm:cli_main",
"fairseq-generate = fairseq_cli.generate:cli_main",
"fairseq-fastgenerate = fairseq_cli.fastgenerate:cli_main",
"fairseq-hydra-train = fairseq_cli.hydra_train:cli_main",
"fairseq-interactive = fairseq_cli.interactive:cli_main",
"fairseq-preprocess = fairseq_cli.preprocess:cli_main",
"fairseq-datpreprocess = fairseq_cli.datpreprocess:cli_main",
"fairseq-score = fairseq_cli.score:cli_main",
"fairseq-train = fairseq_cli.train:cli_main",
"fairseq-validate = fairseq_cli.validate:cli_main",
],
},
cmdclass=cmdclass,
zip_safe=False,
)
def get_files(path, relative_to="fairseq"):
all_files = []
for root, _dirs, files in os.walk(path, followlinks=True):
root = os.path.relpath(root, relative_to)
for file in files:
if file.endswith(".pyc"):
continue
all_files.append(os.path.join(root, file))
return all_files
if __name__ == "__main__":
try:
# symlink examples into fairseq package so package_data accepts them
fairseq_examples = os.path.join("fairseq", "examples")
if "build_ext" not in sys.argv[1:] and not os.path.exists(fairseq_examples):
os.symlink(os.path.join("..", "examples"), fairseq_examples)
package_data = {
"fairseq": (
get_files(fairseq_examples)
+ get_files(os.path.join("fairseq", "config"))
)
}
do_setup(package_data)
finally:
if "build_ext" not in sys.argv[1:] and os.path.islink(fairseq_examples):
os.unlink(fairseq_examples)
| 8,701 | 28.90378 | 92 | py |
null | DA-Transformer-main/train.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Legacy entry point. Use fairseq_cli/train.py or fairseq-train instead.
"""
from fairseq_cli.train import cli_main
if __name__ == "__main__":
cli_main()
| 366 | 23.466667 | 70 | py |
null | DA-Transformer-main/training.md | # Training Configs
### Task Configs
```bash
--task translation_dat_task # Task for DA-Transformer
--upsample-base predict # Possible values are: ["predict", "source", "source_old"].
# If set to "predict", the DAG size will be determined by the golden target length during training and the predicted length during inference.
# Note that --length-loss-factor must be greater than 0 during training.
# If set to "source", the DAG size will be determined by the source length during both training and inference. You can disable the length
# predictor during training by setting --length-loss-factor to 0.
# If set to "source_old", the DAG size is determined similarly to "source" but several token longer. This option is only used for
# compatibility with the upsampling method in version 1.0.
--upsample-scale 4~8 # Specifies the upsample scale for the decoder input length in training.
# For instance, "4~8" indicates that the upsampling scale will be uniformly sampled from the range [4, 8];
# "4" indicates fixed upsampling scale.
--seg-tokens 32 # This parameter specifies the number of special tokens that will be used for segment id.
# If you are using pre-trained checkpoints, please set this value to 32.
--filter-max-length 512:128 # Filters samples that exceed the maximum lengths. For example, "128:256" indicates a maximum source length of 128 and a maximum target length of 256.
# The default value of None filters according to max-source-positions and max-target-positions.
--filter-ratio 8 # Filters out samples that do not satisfy the specified len(target)/len(source) ratio constraints.
# For example, if the ratio is set to "8", samples where len(target)/len(source) > 8 or len(target)/len(source) < 1/8 will be removed.
# If set to "0.5~2", samples where len(target)/len(source) < 0.5 or len(target)/len(source) > 2 will be removed.
# Default: None (disabled).
--do-not-load-task-args # Do not load task arguments from the restored checkpoints.
```
### Model Configs
```bash
--arch glat_decomposed_link_base # The Model Architecture. You can use "ls_glat_decomposed_link_base" to enable LightSeq's Transformer
--links-feature feature:position # Specifies the features used to predict transitions, separated by a colon.
# For example, "feature:position" represents the concatenation of decoder features and learnable positional embeddings.
--segment-embedding # Adds an additional embedding represented segment id for the decoder input.
--max-source-positions 128 # Max length of encoder.
--max-target-positions 1024 # Max length of decoder. If the length of a sample exceeds this limit after up-sampling, the sample will be discarded.
--load-pretrained-model ${data_dir} # Path to a file containing a pre-trained model. It also support a checkpoint file and will automatically convert between lightseq and fairseq architecture.
```
### Decoding Configs
This configs used in the validation. See more configs related to decoding [here](./README.md#decoding).
```bash
--decode-strategy lookahead # Decoding Strategy. Possible values: greedy, lookahead, beamsearch.
--decode-upsample-scale 8 # Upsampling scale to determine the DAG size during inference.
# If --upsample-scale used in training is a fixed number, this parameter should be the same value.
# If --upsample-scale used in training is a range, this parameter can be the average of the range and optionally tuned after the training."
```
### Criterion Configs
```bash
--criterion nat_dag_loss # The Criterion for DA-Transformer.
--length-loss-factor 0.1 # Weights on the length prediction loss. Required if --upsample_base "predict" is set.
--max-transition-length 99999 # Specifies the maximum transition distance. A value of -1 indicates no limit, but this cannot be used with CUDA custom operations.
# To use CUDA operations with no limit, specify a very large number such as 99999.
--glat-p 0.5:0.1@200k # Set the glancing probability and its annealing schedule. For example, '0.5:0.1@200k' indicates annealing probability from 0.5 to 0.1 in 200k steps.
--glance-strategy number-random # Set the glancing strategy. Possible values: "number-random" or "None" or "CMLM"
--use-pretrain-loss # If true, use the pre-training loss, i.e. the position of segment id will be fixed.
--no-force-emit # If true, the position of glanced tokens in the second forward pass will not be fixed.
--torch-dag-loss # Use torch native implementation for logsoftmax-gather. It may be slower and consume more GPU memory.
--torch-dag-best-alignment # Use torch native implementation for dag-best-alignment. It may be slower and consume more GPU memory.
--torch-dag-logsoftmax-gather # Use torch native implementation for dag-loss. It may be slower and consume more GPU memory.
```
### Optimizer Configs
```bash
--optimizer adam # The optimizer. You can use "ls_adam" instead to enable LightSeq's Optimizer
```
### Training Configs
```bash
--max-tokens 4096 # Specifies the maximum number of tokens (either source or target) allowed in a single batch during training.
# This number does not include any padding tokens.
--max-tokens-valid 4096 # Specifies the maximum number of tokens (either source or target) allowed in a single batch during validation.
# This number does not include any padding tokens.
--update-freq 2 # Specifies the number of steps of gradient accumulation before updating the model.
# The actual batch size is: GPU number * max_tokens * update_freq.
--max-tokens-after-upsample # If enabled, the maximum number of tokens (--max-tokens) considered during generation
# will take into account the upsampling ratio. In other words, the length of the generated sequence will be capped at
# max(source_length, decoder_length * upsample_scale). Default: False.
--batch-split-by-src 32767 # If this value is greater than 0, it splits a batch into multiple smaller batches.
# The split is based on the number of source tokens in each batch (considering padding tokens),
# ensuring that no batch has more source tokens than the specified value.
# This is different from --update-freq because it works on each GPU separately. It's useful when out-of-memory (OOM) errors occur rarely
# and you do not want to set a smaller batch size.
--max-encoder-batch-tokens 20000 # Specifies the maximum number of tokens for the encoder input to avoid running out of memory. The default value of None indicates no limit.
--max-decoder-batch-tokens 20000 # Specifies the maximum number of tokens for the decoder input to avoid running out of memory. The default value of None indicates no limit.
```
### Validation Configs
```bash
--eval-bleu # Evaluate BLEU scores during validation
--eval-bleu-detok space # Detokenizer used in BLEU evaluation
--eval-bleu-remove-bpe # Whether remove bpe in BLEU evaluation
--eval-bleu-print-samples # Print several samples in BLEU evaluation
--eval-bleu-order 4 # The order of n-gram in BLEU evaluation
``` | 8,205 | 78.669903 | 192 | md |
null | DA-Transformer-main/examples/DA-Transformer/personachat.sh | data_dir=/path/to/binarized/data/dir
checkpoint_dir=/path/to/checkpoint/dir
tensorboard_dir=/path/to/tensorboard/dir
pretrained_model=/path/to/model.bin
log_txt=/path/to/logfile
CUDA_VISIBLE_DEVICES=0,1 fairseq-train ${data_dir} \
\
`# loading DA-Transformer plugins` \
--user-dir fs_plugins \
\
`# DA-Transformer Task Configs` \
--task translation_dat_task \
--upsample-base predict --upsample-scale 4~8 \
--seg-tokens 32 --filter-max-length 512:128 \
\
`# DA-Transformer Architecture Configs` \
--arch glat_decomposed_link_pretrain \
--links-feature feature:position --segment-embedding \
--max-source-positions 512 --max-target-positions 1024 --truncate-source \
--encoder-learned-pos --decoder-learned-pos \
--share-all-embeddings --activation-fn gelu --apply-bert-init \
--load-pretrained-model ${pretrained_model} \
\
`# DA-Transformer Decoding Configs (See more in the decoding section)` \
--decode-strategy lookahead --decode-upsample-scale 6.0 \
\
`# DA-Transformer Criterion Configs` \
--criterion nat_dag_loss \
--length-loss-factor 0.1 --max-transition-length 99999 \
--glat-p 0.3 --glance-strategy number-random \
--no-force-emit \
\
`# Optimizer & Regularizer Configs` \
--optimizer adam --adam-betas '(0.9,0.999)' --fp16 \
--label-smoothing 0.0 --weight-decay 0.01 --dropout 0.3 \
--lr-scheduler inverse_sqrt --warmup-updates 10000 \
--clip-norm 0.1 --lr 1e-4 --warmup-init-lr '1e-07' --stop-min-lr '1e-09' \
\
`# Training Configs` \
`# these args leads to about 4k target tokens in a batch` \
--max-tokens 23000 --max-tokens-valid 16000 --update-freq 1 \
--max-tokens-after-upsample \
--max-encoder-batch-tokens 20000 --max-decoder-batch-tokens 20000 \
--max-update 60000 --grouped-shuffling \
--seed 0 --ddp-backend c10d --required-batch-size-multiple 1 \
\
`# Validation Configs` \
--valid-subset valid \
--validate-interval 9999999 --validate-interval-updates 2000 \
--eval-bleu --eval-bleu-detok space --eval-bleu-remove-bpe bert --eval-bleu-print-samples --eval-bleu-order 2 \
--fixed-validation-seed 7 \
\
`# Checkpoint Configs` \
--best-checkpoint-metric bleu --maximize-best-checkpoint-metric \
--save-interval 9999999 --save-interval-updates 2000 \
--keep-best-checkpoints 5 --save-dir ${checkpoint_dir} \
\
`# Logging Configs` \
--tensorboard-logdir ${tensorboard_dir} \
--log-format 'simple' --log-interval 100 2> >(tee -a ${log_txt}) | tee -a ${log_txt} | 2,604 | 41.704918 | 115 | sh |
null | DA-Transformer-main/examples/DA-Transformer/pretrain.sh | data_dir=/path/to/binarized/data/dir
checkpoint_dir=/path/to/checkpoint/dir
tensorboard_dir=/path/to/tensorboard/dir
log_txt=/path/to/logfile
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 fairseq-train ${data_dir} \
\
`# loading DA-Transformer plugins` \
--user-dir fs_plugins \
\
`# DA-Transformer Task Configs` \
--task translation_dat_task \
--upsample-base predict --upsample-scale 4~8 \
--seg-tokens 32 --filter-max-length 512:128 \
\
`# DA-Transformer Architecture Configs` \
--arch ls_glat_decomposed_link_pretrain \
--links-feature feature:position --segment-embedding \
--max-source-positions 512 --max-target-positions 1024 \
--encoder-learned-pos --decoder-learned-pos \
--share-all-embeddings --activation-fn gelu --apply-bert-init \
\
`# DA-Transformer Decoding Configs (See more in the decoding section)` \
--decode-strategy lookahead --decode-upsample-scale 6.0 \
\
`# DA-Transformer Criterion Configs` \
--criterion nat_dag_loss \
--length-loss-factor 0 --max-transition-length 99999 \
--glat-p 0.1 --glance-strategy fix \
--use-pretrain-loss \
\
`# Optimizer & Regularizer Configs` \
--optimizer ls_adam --adam-betas '(0.9,0.999)' --fp16 \
--label-smoothing 0.0 --weight-decay 0.01 --dropout 0.1 \
--lr-scheduler inverse_sqrt --warmup-updates 10000 \
--clip-norm 0.1 --lr 0.0002 --warmup-init-lr '1e-07' --stop-min-lr '1e-09' \
\
`# Training Configs` \
--max-sentences 16 --max-sentences-valid 8 --update-freq 2 \
--max-encoder-batch-tokens 22000 --max-decoder-batch-tokens 22000 \
--max-update 500000 --grouped-shuffling \
--seed 0 --ddp-backend c10d --required-batch-size-multiple 1 \
\
`# Validation Configs` \
--valid-subset valid \
--validate-interval 1 --validate-interval-updates 10000 \
--fixed-validation-seed 7 \
\
`# Checkpoint Configs` \
--save-interval 1 --save-interval-updates 10000 \
--save-dir ${checkpoint_dir} \
\
`# Logging Configs` \
--tensorboard-logdir ${tensorboard_dir} \
--log-format 'simple' --log-interval 100 2> >(tee -a ${log_txt}) | tee -a ${log_txt} | 2,198 | 38.981818 | 88 | sh |
null | DA-Transformer-main/examples/DA-Transformer/process_bert_uncased.py | from transformers import BertTokenizer
def bert_uncased_tokenize(fin, fout):
fin = open(fin, 'r', encoding='utf-8')
fout = open(fout, 'w', encoding='utf-8')
tok = BertTokenizer.from_pretrained('bert-base-uncased')
for line in fin:
word_pieces = tok.tokenize(line.strip())
new_line = " ".join(word_pieces)
fout.write('{}\n'.format(new_line))
bert_uncased_tokenize('train.src', 'tokenized_train.src')
bert_uncased_tokenize('train.tgt', 'tokenized_train.tgt')
bert_uncased_tokenize('dev.src', 'tokenized_dev.src')
bert_uncased_tokenize('dev.tgt', 'tokenized_dev.tgt')
bert_uncased_tokenize('test.src', 'tokenized_test.src')
bert_uncased_tokenize('test.tgt', 'tokenized_test.tgt')
| 719 | 39 | 60 | py |
null | DA-Transformer-main/examples/DA-Transformer/process_pretrain.py | import argparse
import numpy as np
import random
import math
# import numba
from transformers import BertTokenizer
tok = BertTokenizer.from_pretrained('bert-base-uncased')
parser = argparse.ArgumentParser()
# fmt: off
parser.add_argument('file')
parser.add_argument('--max-seq-length', type=int, default=600)
parser.add_argument('--mask-ratio', type=float, default=0.15)
parser.add_argument('--mask-max-seg', type=int, default=6)
parser.add_argument('--mask-strategy', type=str, default="segment")
parser.add_argument('--mask-min-token-per-seg', type=int, default=4)
parser.add_argument('--mask-min-interval', type=int, default=8)
parser.add_argument('--duplicate', type=int, default=2)
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--out', type=str)
args = parser.parse_args()
np.random.seed(args.seed)
random.seed(args.seed)
srcfile = open(f"{args.out}.src", 'w')
tgtfile = open(f"{args.out}.tgt", 'w')
# @numba.jit(nopython=True)
def numba_process(buffer, mask_ratio, mask_max_seg, mask_min_token_per_seg, mask_min_interval):
mask_num = int(len(buffer) * mask_ratio)
if args.mask_strategy == "iid":
iid_id = np.sort(np.argsort(np.random.uniform(0, 1, len(buffer)), 0)[:mask_num], 0)
mask_length = []
mask_interval = []
for i, x in enumerate(iid_id):
if i == 0:
mask_interval.append(x)
mask_length.append(1)
elif iid_id[i-1] + 1 != x:
mask_interval.append(x - iid_id[i-1] - 1)
mask_length.append(1)
else:
mask_length[-1] += 1
mask_interval.append(len(buffer) - iid_id[-1] - 1)
mask_interval = np.array(mask_interval)
mask_length = np.array(mask_length)
seg_num = len(mask_length)
else:
max_seg = min(mask_max_seg + 1, mask_num // mask_min_token_per_seg, (len(buffer) - mask_num) // mask_min_interval + 1)
seg_num = max_seg - 1
mask_length = np.array([int(math.floor(i * mask_num / seg_num)) for i in range(seg_num + 1)])
mask_length = mask_length[1:] - mask_length[:-1]
mask_interval = np.array([0] + [mask_min_interval] * (seg_num - 1) + [0]) + \
np.random.multinomial(len(buffer) - mask_num - mask_min_interval * (seg_num - 1), [1. / (seg_num + 1)] * (seg_num + 1))
assert mask_length.sum() + mask_interval.sum() == len(buffer)
assert mask_length.sum() == mask_num
src = ["" for _ in range(0)]
tgt = ["" for _ in range(0)]
nowpos = 0
for i in range(seg_num):
nextpos = nowpos + mask_interval[i]
src += buffer[nowpos:nextpos] + [f"[P{i}]"]
nowpos = nextpos
nextpos = nowpos + mask_length[i]
tgt += [f"[P{i}]"] + buffer[nowpos:nextpos]
nowpos = nextpos
nextpos = nowpos + mask_interval[-1]
src += buffer[nowpos:nextpos]
tgt += [f"[P{seg_num}]"]
return src, tgt
def process_and_write(buffer, srcfile, tgtfile):
# import time
# starttime = time.time()
src, tgt = numba_process(buffer, args.mask_ratio, args.mask_max_seg, args.mask_min_token_per_seg, args.mask_min_interval)
# secondtime = time.time()
srcfile.write(" ".join(src) + "\n")
tgtfile.write(" ".join(tgt) + "\n")
# print(secondtime - starttime, time.time() - secondtime)
for dup in range(args.duplicate):
buffer = []
for i, line in enumerate(open(args.file)):
if i % 10000 == 0:
print(dup, i, flush=True)
if len(buffer) > args.max_seq_length:
buffer = []
if not line or line.isspace():
continue
line = line.strip()
# import time
# starttime = time.time()
if not buffer:
nowline = tok.tokenize(" " + line)
else:
nowline = tok.tokenize(line)
# print(time.time() - starttime)
buffer += nowline
if len(buffer) > args.max_seq_length:
process_and_write(buffer[:args.max_seq_length], srcfile, tgtfile)
skip_num = int((len(buffer) - args.max_seq_length) * random.random())
buffer = buffer[args.max_seq_length + skip_num:]
srcfile.close()
tgtfile.close()
| 4,214 | 33.268293 | 135 | py |
null | DA-Transformer-main/examples/DA-Transformer/quora.sh | data_dir=/path/to/binarized/data/dir
checkpoint_dir=/path/to/checkpoint/dir
tensorboard_dir=/path/to/tensorboard/dir
pretrained_model=/path/to/model.bin
log_txt=/path/to/logfile
CUDA_VISIBLE_DEVICES=0,1 fairseq-train ${data_dir} \
\
`# loading DA-Transformer plugins` \
--user-dir fs_plugins \
\
`# DA-Transformer Task Configs` \
--task translation_dat_task \
--upsample-base predict --upsample-scale 4~8 \
--seg-tokens 32 --filter-max-length 512:128 \
\
`# DA-Transformer Architecture Configs` \
--arch glat_decomposed_link_pretrain \
--links-feature feature:position --segment-embedding \
--max-source-positions 512 --max-target-positions 1024 --truncate-source \
--encoder-learned-pos --decoder-learned-pos \
--share-all-embeddings --activation-fn gelu --apply-bert-init \
--load-pretrained-model ${pretrained_model} \
\
`# DA-Transformer Decoding Configs (See more in the decoding section)` \
--decode-strategy lookahead --decode-upsample-scale 6.0 \
\
`# DA-Transformer Criterion Configs` \
--criterion nat_dag_loss \
--length-loss-factor 0.1 --max-transition-length 99999 \
--glat-p 0.3 --glance-strategy number-random \
--no-force-emit \
\
`# Optimizer & Regularizer Configs` \
--optimizer adam --adam-betas '(0.9,0.999)' --fp16 \
--label-smoothing 0.0 --weight-decay 0.01 --dropout 0.3 \
--lr-scheduler inverse_sqrt --warmup-updates 10000 \
--clip-norm 0.1 --lr 1e-5 --warmup-init-lr '1e-07' --stop-min-lr '1e-09' \
\
`# Training Configs` \
`# these args leads to about 8k target tokens in a batch` \
--max-tokens 4500 --max-tokens-valid 2048 --update-freq 1 \
--max-encoder-batch-tokens 20000 --max-decoder-batch-tokens 20000 \
--max-update 60000 --grouped-shuffling \
--seed 0 --ddp-backend c10d --required-batch-size-multiple 1 \
\
`# Validation Configs` \
--valid-subset valid \
--validate-interval 9999999 --validate-interval-updates 2000 \
--eval-bleu --eval-bleu-detok space --eval-bleu-remove-bpe bert --eval-bleu-print-samples --eval-bleu-order 4 \
--fixed-validation-seed 7 \
\
`# Checkpoint Configs` \
--best-checkpoint-metric bleu --maximize-best-checkpoint-metric \
--save-interval 9999999 --save-interval-updates 2000 \
--keep-best-checkpoints 5 --save-dir ${checkpoint_dir} \
\
`# Logging Configs` \
--tensorboard-logdir ${tensorboard_dir} \
--log-format 'simple' --log-interval 100 2> >(tee -a ${log_txt}) | tee -a ${log_txt} | 2,568 | 41.816667 | 115 | sh |
null | DA-Transformer-main/examples/DA-Transformer/rocstory.sh | data_dir=/path/to/binarized/data/dir
checkpoint_dir=/path/to/checkpoint/dir
tensorboard_dir=/path/to/tensorboard/dir
pretrained_model=/path/to/model.bin
log_txt=/path/to/logfile
CUDA_VISIBLE_DEVICES=0,1 fairseq-train ${data_dir} \
\
`# loading DA-Transformer plugins` \
--user-dir fs_plugins \
\
`# DA-Transformer Task Configs` \
--task translation_dat_task \
--upsample-base predict --upsample-scale 4~8 \
--seg-tokens 32 --filter-max-length 512:128 \
\
`# DA-Transformer Architecture Configs` \
--arch glat_decomposed_link_pretrain \
--links-feature feature:position --segment-embedding \
--max-source-positions 512 --max-target-positions 1024 --truncate-source \
--encoder-learned-pos --decoder-learned-pos \
--share-all-embeddings --activation-fn gelu --apply-bert-init \
--load-pretrained-model ${pretrained_model} \
\
`# DA-Transformer Decoding Configs (See more in the decoding section)` \
--decode-strategy lookahead --decode-upsample-scale 6.0 \
\
`# DA-Transformer Criterion Configs` \
--criterion nat_dag_loss \
--length-loss-factor 0.1 --max-transition-length 99999 \
--glat-p 0.5 --glance-strategy number-random \
--no-force-emit \
\
`# Optimizer & Regularizer Configs` \
--optimizer adam --adam-betas '(0.9,0.999)' --fp16 \
--label-smoothing 0.0 --weight-decay 0.01 --dropout 0.3 \
--lr-scheduler inverse_sqrt --warmup-updates 10000 \
--clip-norm 0.1 --lr 5e-5 --warmup-init-lr '1e-07' --stop-min-lr '1e-09' \
\
`# Training Configs` \
`# these args leads to about 8k target tokens in a batch` \
--max-tokens 4096 --max-tokens-valid 2048 --update-freq 1 \
--max-encoder-batch-tokens 20000 --max-decoder-batch-tokens 20000 \
--max-update 100000 --grouped-shuffling \
--seed 0 --ddp-backend c10d --required-batch-size-multiple 1 \
\
`# Validation Configs` \
--valid-subset valid \
--validate-interval 9999999 --validate-interval-updates 5000 \
--eval-bleu --eval-bleu-detok space --eval-bleu-remove-bpe bert --eval-bleu-print-samples --eval-bleu-order 2 \
--fixed-validation-seed 7 \
\
`# Checkpoint Configs` \
--best-checkpoint-metric bleu --maximize-best-checkpoint-metric \
--save-interval 9999999 --save-interval-updates 5000 \
--keep-best-checkpoints 5 --save-dir ${checkpoint_dir} \
\
`# Logging Configs` \
--tensorboard-logdir ${tensorboard_dir} \
--log-format 'simple' --log-interval 100 2> >(tee -a ${log_txt}) | tee -a ${log_txt} | 2,569 | 41.833333 | 115 | sh |
null | DA-Transformer-main/examples/DA-Transformer/squad.sh | data_dir=/path/to/binarized/data/dir
checkpoint_dir=/path/to/checkpoint/dir
tensorboard_dir=/path/to/tensorboard/dir
pretrained_model=/path/to/model.bin
log_txt=/path/to/logfile
CUDA_VISIBLE_DEVICES=0,1,2,3 fairseq-train ${data_dir} \
\
`# loading DA-Transformer plugins` \
--user-dir fs_plugins \
\
`# DA-Transformer Task Configs` \
--task translation_dat_task \
--upsample-base predict --upsample-scale 4~8 \
--seg-tokens 32 --filter-max-length 512:128 \
\
`# DA-Transformer Architecture Configs` \
--arch glat_decomposed_link_pretrain \
--links-feature feature:position --segment-embedding \
--max-source-positions 512 --max-target-positions 1024 --truncate-source \
--encoder-learned-pos --decoder-learned-pos \
--share-all-embeddings --activation-fn gelu --apply-bert-init \
--load-pretrained-model ${pretrained_model} \
\
`# DA-Transformer Decoding Configs (See more in the decoding section)` \
--decode-strategy lookahead --decode-upsample-scale 6.0 \
\
`# DA-Transformer Criterion Configs` \
--criterion nat_dag_loss \
--length-loss-factor 0.1 --max-transition-length 99999 \
--glat-p 0.5 --glance-strategy number-random \
--no-force-emit \
\
`# Optimizer & Regularizer Configs` \
--optimizer adam --adam-betas '(0.9,0.999)' --fp16 \
--label-smoothing 0.0 --weight-decay 0.01 --dropout 0.3 \
--lr-scheduler inverse_sqrt --warmup-updates 10000 \
--clip-norm 0.1 --lr 5e-5 --warmup-init-lr '1e-07' --stop-min-lr '1e-09' \
\
`# Training Configs` \
`# these args leads to about 4k target tokens in a batch` \
--max-tokens 14000 --max-tokens-valid 8000 --update-freq 1 \
--max-tokens-after-upsample \
--max-encoder-batch-tokens 20000 --max-decoder-batch-tokens 20000 \
--max-update 60000 --grouped-shuffling \
--seed 0 --ddp-backend c10d --required-batch-size-multiple 1 \
\
`# Validation Configs` \
--valid-subset valid \
--validate-interval 9999999 --validate-interval-updates 2000 \
--eval-bleu --eval-bleu-detok space --eval-bleu-remove-bpe bert --eval-bleu-print-samples --eval-bleu-order 4 \
--fixed-validation-seed 7 \
\
`# Checkpoint Configs` \
--best-checkpoint-metric bleu --maximize-best-checkpoint-metric \
--save-interval 9999999 --save-interval-updates 2000 \
--keep-best-checkpoints 5 --save-dir ${checkpoint_dir} \
\
`# Logging Configs` \
--tensorboard-logdir ${tensorboard_dir} \
--log-format 'simple' --log-interval 100 2> >(tee -a ${log_txt}) | tee -a ${log_txt} | 2,607 | 41.754098 | 115 | sh |
null | DA-Transformer-main/examples/DA-Transformer/wmt14_deen.sh | data_dir=/path/to/binarized/data/dir
checkpoint_dir=/path/to/checkpoint/dir
tensorboard_dir=/path/to/tensorboard/dir
log_txt=/path/to/logfile
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 fairseq-train ${data_dir} \
\
`# loading DA-Transformer plugins` \
--user-dir fs_plugins \
\
`# DA-Transformer Task Configs` \
--task translation_dat_task \
--upsample-base source_old --upsample-scale 8 \
--filter-max-length 136:1088 --filter-ratio 2 \
\
`# DA-Transformer Architecture Configs` \
--arch ls_glat_decomposed_link_base \
--links-feature feature:position \
--max-source-positions 136 --max-target-positions 1088 \
--encoder-learned-pos --decoder-learned-pos \
--share-all-embeddings --activation-fn gelu --apply-bert-init \
\
`# DA-Transformer Decoding Configs (See more in the decoding section)` \
--decode-strategy lookahead --decode-upsample-scale 8.0 \
\
`# DA-Transformer Criterion Configs` \
--criterion nat_dag_loss \
--length-loss-factor 0 --max-transition-length 99999 \
--glat-p 0.5:0.1@200k --glance-strategy number-random \
--no-force-emit \
\
`# Optimizer & Regularizer Configs` \
--optimizer ls_adam --adam-betas '(0.9,0.999)' --fp16 \
--label-smoothing 0.0 --weight-decay 0.01 --dropout 0.1 \
--lr-scheduler inverse_sqrt --warmup-updates 10000 \
--clip-norm 0.1 --lr 0.0005 --warmup-init-lr '1e-07' --stop-min-lr '1e-09' \
\
`# Training Configs` \
--max-tokens 4096 --max-tokens-valid 4096 --update-freq 2 \
--max-update 300000 --grouped-shuffling \
--max-encoder-batch-tokens 8000 --max-decoder-batch-tokens 34000 \
--seed 0 --ddp-backend c10d --required-batch-size-multiple 1 \
\
`# Validation Configs` \
--valid-subset valid \
--validate-interval 1 --validate-interval-updates 10000 \
--eval-bleu --eval-bleu-detok space --eval-bleu-remove-bpe --eval-bleu-print-samples --eval-tokenized-bleu \
--fixed-validation-seed 7 \
\
`# Checkpoint Configs` \
--best-checkpoint-metric bleu --maximize-best-checkpoint-metric \
--save-interval 1 --save-interval-updates 10000 \
--keep-best-checkpoints 5 --save-dir ${checkpoint_dir} \
\
`# Logging Configs` \
--tensorboard-logdir ${tensorboard_dir} \
--log-format 'simple' --log-interval 100 2> >(tee -a ${log_txt}) | tee -a ${log_txt} | 2,400 | 41.122807 | 112 | sh |
null | DA-Transformer-main/examples/DA-Transformer/wmt14_ende.sh | data_dir=/path/to/binarized/data/dir
checkpoint_dir=/path/to/checkpoint/dir
tensorboard_dir=/path/to/tensorboard/dir
log_txt=/path/to/logfile
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 fairseq-train ${data_dir} \
\
`# loading DA-Transformer plugins` \
--user-dir fs_plugins \
\
`# DA-Transformer Task Configs` \
--task translation_dat_task \
--upsample-base source_old --upsample-scale 8 \
--filter-max-length 128:1024 --filter-ratio 2 \
\
`# DA-Transformer Architecture Configs` \
--arch ls_glat_decomposed_link_base \
--links-feature feature:position \
--max-source-positions 128 --max-target-positions 1024 \
--encoder-learned-pos --decoder-learned-pos \
--share-all-embeddings --activation-fn gelu --apply-bert-init \
\
`# DA-Transformer Decoding Configs (See more in the decoding section)` \
--decode-strategy lookahead --decode-upsample-scale 8.0 \
\
`# DA-Transformer Criterion Configs` \
--criterion nat_dag_loss \
--length-loss-factor 0 --max-transition-length 99999 \
--glat-p 0.5:0.1@200k --glance-strategy number-random \
--no-force-emit \
\
`# Optimizer & Regularizer Configs` \
--optimizer ls_adam --adam-betas '(0.9,0.999)' --fp16 \
--label-smoothing 0.0 --weight-decay 0.01 --dropout 0.1 \
--lr-scheduler inverse_sqrt --warmup-updates 10000 \
--clip-norm 0.1 --lr 0.0005 --warmup-init-lr '1e-07' --stop-min-lr '1e-09' \
\
`# Training Configs` \
--max-tokens 4096 --max-tokens-valid 4096 --update-freq 2 \
--max-update 300000 --grouped-shuffling \
--max-encoder-batch-tokens 8000 --max-decoder-batch-tokens 34000 \
--seed 0 --ddp-backend c10d --required-batch-size-multiple 1 \
\
`# Validation Configs` \
--valid-subset valid \
--validate-interval 1 --validate-interval-updates 10000 \
--eval-bleu --eval-bleu-detok space --eval-bleu-remove-bpe --eval-bleu-print-samples --eval-tokenized-bleu \
--fixed-validation-seed 7 \
\
`# Checkpoint Configs` \
--best-checkpoint-metric bleu --maximize-best-checkpoint-metric \
--save-interval 1 --save-interval-updates 10000 \
--keep-best-checkpoints 5 --save-dir ${checkpoint_dir} \
\
`# Logging Configs` \
--tensorboard-logdir ${tensorboard_dir} \
--log-format 'simple' --log-interval 100 2> >(tee -a ${log_txt}) | tee -a ${log_txt} | 2,400 | 41.122807 | 112 | sh |
null | DA-Transformer-main/examples/DA-Transformer/wmt17_enzh.sh | data_dir=/path/to/binarized/data/dir
checkpoint_dir=/path/to/checkpoint/dir
tensorboard_dir=/path/to/tensorboard/dir
log_txt=/path/to/logfile
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 fairseq-train ${data_dir} \
\
`# loading DA-Transformer plugins` \
--user-dir fs_plugins \
\
`# DA-Transformer Task Configs` \
--task translation_dat_task \
--upsample-base source_old --upsample-scale 8 \
--filter-max-length 115:920 --filter-ratio 2 \
\
`# DA-Transformer Architecture Configs` \
--arch ls_glat_decomposed_link_base \
--links-feature feature:position \
--max-source-positions 115 --max-target-positions 920 \
--encoder-learned-pos --decoder-learned-pos \
--activation-fn gelu --apply-bert-init \
\
`# DA-Transformer Decoding Configs (See more in the decoding section)` \
--decode-strategy lookahead --decode-upsample-scale 8.0 \
\
`# DA-Transformer Criterion Configs` \
--criterion nat_dag_loss \
--length-loss-factor 0 --max-transition-length 99999 \
--glat-p 0.5:0.1@200k --glance-strategy number-random \
--no-force-emit \
\
`# Optimizer & Regularizer Configs` \
--optimizer ls_adam --adam-betas '(0.9,0.999)' --fp16 \
--label-smoothing 0.0 --weight-decay 0.01 --dropout 0.1 \
--lr-scheduler inverse_sqrt --warmup-updates 10000 \
--clip-norm 0.1 --lr 0.0005 --warmup-init-lr '1e-07' --stop-min-lr '1e-09' \
\
`# Training Configs` \
--max-tokens 4096 --max-tokens-valid 4096 --update-freq 2 \
--max-update 300000 --grouped-shuffling \
--max-encoder-batch-tokens 8000 --max-decoder-batch-tokens 34000 \
--seed 0 --ddp-backend c10d --required-batch-size-multiple 1 \
\
`# Validation Configs` \
--valid-subset valid \
--validate-interval 1 --validate-interval-updates 10000 \
--eval-bleu --eval-bleu-detok moses --eval-bleu-remove-bpe --eval-bleu-print-samples \
--fixed-validation-seed 7 \
\
`# Checkpoint Configs` \
--best-checkpoint-metric bleu --maximize-best-checkpoint-metric \
--save-interval 1 --save-interval-updates 10000 \
--keep-best-checkpoints 5 --save-dir ${checkpoint_dir} \
\
`# Logging Configs` \
--tensorboard-logdir ${tensorboard_dir} \
--log-format 'simple' --log-interval 100 2> >(tee -a ${log_txt}) | tee -a ${log_txt} | 2,353 | 40.298246 | 90 | sh |
null | DA-Transformer-main/examples/DA-Transformer/wmt17_zhen.sh | data_dir=/path/to/binarized/data/dir
checkpoint_dir=/path/to/checkpoint/dir
tensorboard_dir=/path/to/tensorboard/dir
log_txt=/path/to/logfile
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 fairseq-train ${data_dir} \
\
`# loading DA-Transformer plugins` \
--user-dir fs_plugins \
\
`# DA-Transformer Task Configs` \
--task translation_dat_task \
--upsample-base source_old --upsample-scale 8 \
--filter-max-length 115:920 --filter-ratio 2 \
\
`# DA-Transformer Architecture Configs` \
--arch ls_glat_decomposed_link_base \
--links-feature feature:position \
--max-source-positions 115 --max-target-positions 920 \
--encoder-learned-pos --decoder-learned-pos \
--activation-fn gelu --apply-bert-init \
\
`# DA-Transformer Decoding Configs (See more in the decoding section)` \
--decode-strategy lookahead --decode-upsample-scale 8.0 \
\
`# DA-Transformer Criterion Configs` \
--criterion nat_dag_loss \
--length-loss-factor 0 --max-transition-length 99999 \
--glat-p 0.5:0.1@200k --glance-strategy number-random \
--no-force-emit \
\
`# Optimizer & Regularizer Configs` \
--optimizer ls_adam --adam-betas '(0.9,0.999)' --fp16 \
--label-smoothing 0.0 --weight-decay 0.01 --dropout 0.1 \
--lr-scheduler inverse_sqrt --warmup-updates 10000 \
--clip-norm 0.1 --lr 0.0005 --warmup-init-lr '1e-07' --stop-min-lr '1e-09' \
\
`# Training Configs` \
--max-tokens 4096 --max-tokens-valid 4096 --update-freq 2 \
--max-update 300000 --grouped-shuffling \
--max-encoder-batch-tokens 8000 --max-decoder-batch-tokens 34000 \
--seed 0 --ddp-backend c10d --required-batch-size-multiple 1 \
\
`# Validation Configs` \
--valid-subset valid \
--validate-interval 1 --validate-interval-updates 10000 \
--eval-bleu --eval-bleu-detok space --eval-bleu-remove-bpe --eval-bleu-print-samples --eval-tokenized-bleu \
--fixed-validation-seed 7 \
\
`# Checkpoint Configs` \
--best-checkpoint-metric bleu --maximize-best-checkpoint-metric \
--save-interval 1 --save-interval-updates 10000 \
--keep-best-checkpoints 5 --save-dir ${checkpoint_dir} \
\
`# Logging Configs` \
--tensorboard-logdir ${tensorboard_dir} \
--log-format 'simple' --log-interval 100 2> >(tee -a ${log_txt}) | tee -a ${log_txt} | 2,375 | 40.684211 | 112 | sh |
null | DA-Transformer-main/examples/DA-Transformer/xsum.sh | data_dir=/path/to/binarized/data/dir
checkpoint_dir=/path/to/checkpoint/dir
tensorboard_dir=/path/to/tensorboard/dir
pretrained_model=/path/to/model.bin
log_txt=/path/to/logfile
CUDA_VISIBLE_DEVICES=0,1,2,3 fairseq-train ${data_dir} \
\
`# loading DA-Transformer plugins` \
--user-dir fs_plugins \
\
`# DA-Transformer Task Configs` \
--task translation_dat_task \
--upsample-base predict --upsample-scale 4~8 \
--seg-tokens 32 --filter-max-length 512:128 \
\
`# DA-Transformer Architecture Configs` \
--arch glat_decomposed_link_pretrain \
--links-feature feature:position --segment-embedding \
--max-source-positions 512 --max-target-positions 1024 --truncate-source \
--encoder-learned-pos --decoder-learned-pos \
--share-all-embeddings --activation-fn gelu --apply-bert-init \
--load-pretrained-model ${pretrained_model} \
\
`# DA-Transformer Decoding Configs (See more in the decoding section)` \
--decode-strategy lookahead --decode-upsample-scale 6.0 \
\
`# DA-Transformer Criterion Configs` \
--criterion nat_dag_loss \
--length-loss-factor 0.1 --max-transition-length 99999 \
--glat-p 0.5 --glance-strategy number-random \
--no-force-emit \
\
`# Optimizer & Regularizer Configs` \
--optimizer adam --adam-betas '(0.9,0.999)' --fp16 \
--label-smoothing 0.0 --weight-decay 0.01 --dropout 0.3 \
--lr-scheduler inverse_sqrt --warmup-updates 10000 \
--clip-norm 0.1 --lr 2e-4 --warmup-init-lr '1e-07' --stop-min-lr '1e-09' \
\
`# Training Configs` \
`# these args leads to about 4k target tokens in a batch` \
--max-tokens 15000 --max-tokens-valid 10000 --update-freq 1 \
--max-tokens-after-upsample \
--max-encoder-batch-tokens 20000 --max-decoder-batch-tokens 20000 \
--max-update 100000 --grouped-shuffling \
--seed 0 --ddp-backend c10d --required-batch-size-multiple 1 \
\
`# Validation Configs` \
--valid-subset valid \
--validate-interval 9999999 --validate-interval-updates 5000 \
--eval-bleu --eval-bleu-detok space --eval-bleu-remove-bpe bert --eval-bleu-print-samples --eval-bleu-order 4 \
--fixed-validation-seed 7 \
\
`# Checkpoint Configs` \
--best-checkpoint-metric bleu --maximize-best-checkpoint-metric \
--save-interval 9999999 --save-interval-updates 5000 \
--keep-best-checkpoints 5 --save-dir ${checkpoint_dir} \
\
`# Logging Configs` \
--tensorboard-logdir ${tensorboard_dir} \
--log-format 'simple' --log-interval 100 2> >(tee -a ${log_txt}) | tee -a ${log_txt} | 2,609 | 41.786885 | 115 | sh |
null | DA-Transformer-main/examples/DA-Transformer/evaluation/evaluate_personachat.py | # pip install pycocoevalcap
# pip install nltk
from collections import Counter
import numpy as np
from argparse import ArgumentParser
from pycocoevalcap.bleu.bleu import Bleu
def distinct(seqs):
""" Calculate intra/inter distinct 1/2. """
batch_size = len(seqs)
intra_dist1, intra_dist2 = [], []
unigrams_all, bigrams_all = Counter(), Counter()
for seq in seqs:
unigrams = Counter(seq)
bigrams = Counter(zip(seq, seq[1:]))
intra_dist1.append((len(unigrams)+1e-12) / (len(seq)+1e-5))
intra_dist2.append((len(bigrams)+1e-12) / (max(0, len(seq)-1)+1e-5))
unigrams_all.update(unigrams)
bigrams_all.update(bigrams)
inter_dist1 = (len(unigrams_all)+1e-12) / (sum(unigrams_all.values())+1e-5)
inter_dist2 = (len(bigrams_all)+1e-12) / (sum(bigrams_all.values())+1e-5)
intra_dist1 = np.average(intra_dist1)
intra_dist2 = np.average(intra_dist2)
return intra_dist1, intra_dist2, inter_dist1, inter_dist2
def bleu(hyps, refs):
""" Calculate bleu 1/2. """
ref_len = 0
hyp_len = 0
gts = {}
res = {}
for i, (hyp, ref) in enumerate(zip(hyps, refs)):
ref_len += len(ref)
hyp_len += len(hyp)
gts[i] = [" ".join(ref)]
res[i] = [" ".join(hyp)]
score, scores = Bleu(4).compute_score(gts, res)
return score[0], score[1], hyp_len / ref_len
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--golden-file', dest="golden_file", help='Input data file, one golden per line.')
parser.add_argument('--pred-file', dest="pred_file", help='Model predictions.')
args = parser.parse_args()
with open(args.pred_file, encoding='utf-8') as fin:
preds = fin.readlines()
preds = [line.strip().split(" ") for line in preds]
with open(args.golden_file, encoding='utf-8') as fin:
golds = fin.readlines()
golds = [line.strip().split(" ") for line in golds]
bleu1, bleu2, ratio = bleu(preds, golds)
intra_dist1, intra_dist2, inter_dist1, inter_dist2 = distinct(preds)
print(bleu1 * 100., bleu2 * 100., ratio, inter_dist1, inter_dist2)
| 2,150 | 32.092308 | 106 | py |
null | DA-Transformer-main/examples/DA-Transformer/evaluation/evaluate_quora.py | # pip install pycocoevalcap
#!/usr/bin/env python
from __future__ import print_function
__author__ = 'xinya'
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.meteor.meteor import Meteor
from pycocoevalcap.rouge.rouge import Rouge
from pycocoevalcap.cider.cider import Cider
from collections import defaultdict
from argparse import ArgumentParser
import string
import sys
#reload(sys)
#sys.setdefaultencoding('utf-8')
_tok_dict = {
# "(": "-lrb-", ")": "-rrb-",
# "[": "-lsb-", "]": "-rsb-",
# "{": "-lcb-", "}": "-rcb-",
# "[UNK]": "UNK", '&': '&', '<': '<', '>': '>'
}
def _is_digit(w, comma=','):
for ch in w:
if not(ch.isdigit() or ch == comma):
return False
return True
def detokenize(tk_list):
r_list = []
for tk in tk_list:
if tk.startswith('##') and len(r_list) > 0:
r_list[-1] = r_list[-1] + tk[2:]
else:
r_list.append(tk)
return r_list
def fix_tokenization(text):
input_tokens = text.split()
output_tokens = []
has_left_quote = False
has_left_single_quote = False
i = 0
prev_dash = False
while i < len(input_tokens):
tok = input_tokens[i]
flag_prev_dash = False
if tok in _tok_dict.keys():
output_tokens.append(_tok_dict[tok])
i += 1
elif tok == "\"":
if has_left_quote:
output_tokens.append("''")
else:
output_tokens.append("``")
has_left_quote = not has_left_quote
i += 1
elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t":
output_tokens[-1] = output_tokens[-1][:-1]
output_tokens.append("n't")
i += 2
elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"):
output_tokens.append("'"+input_tokens[i + 1])
i += 2
elif tok == "'":
output_tokens.append("''")
i += 1
elif tok == "`":
output_tokens.append("``")
i += 1
# if has_left_single_quote:
# output_tokens.append("''")
# else:
# output_tokens.append("``")
# has_left_single_quote = not has_left_single_quote
# i += 1
elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".":
output_tokens.append("...")
i += 3
elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]):
# $ 3 , 000 -> $ 3,000
output_tokens[-1] += ','+input_tokens[i + 1]
i += 2
elif tok == "." and len(output_tokens) > 0 and _is_digit(output_tokens[-1], '.') and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit():
# 3 . 03 -> $ 3.03
output_tokens[-1] += '.'+input_tokens[i + 1]
i += 2
#elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.':
elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 2] == '.':
# U . N . -> U.N.
k = i+3
while k+2 < len(input_tokens):
if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.':
k += 2
else:
break
output_tokens[-1] += ''.join(input_tokens[i:k])
i += 2
elif tok == "-":
if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-":
output_tokens.append("--")
i += 2
elif i == len(input_tokens) - 1 or i == 0:
output_tokens.append("-")
i += 1
elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation:
output_tokens[-1] += "-"
i += 1
flag_prev_dash = True
else:
output_tokens.append("-")
i += 1
elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation:
output_tokens[-1] += tok
i += 1
else:
output_tokens.append(tok)
i += 1
prev_dash = flag_prev_dash
return " ".join(output_tokens)
class QGEvalCap:
def __init__(self, gts, res):
self.gts = gts
self.res = res
def evaluate(self):
output = []
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(), "METEOR"),
(Rouge(), "ROUGE_L"),
# (Cider(), "CIDEr")
]
# =================================================
# Compute scores
# =================================================
for scorer, method in scorers:
# print 'computing %s score...'%(scorer.method())
score, scores = scorer.compute_score(self.gts, self.res)
if type(method) == list:
for sc, scs, m in zip(score, scores, method):
print("%s: %0.5f" % (m, sc))
output.append((m, sc))
else:
print("%s: %0.5f" % (method, score))
output.append((method, score))
return output
def eval(out_file, src_file, tgt_file, isDIn=False, num_pairs=500,
dataset=None, dataset_split='validation', src_key=None, tgt_key=None, fix_token=True):
"""
Given a filename, calculate the metric scores for that prediction file
isDin: boolean value to check whether input file is DirectIn.txt
"""
pairs = []
if dataset:
import datasets
for i in datasets.load_dataset(dataset, cache_dir='./data')[dataset_split]:
pairs.append({
'tokenized_sentence': i[src_key].strip().lower(),
'tokenized_question': i[tgt_key].strip().lower(),
})
else:
with open(src_file, 'r') as infile:
for line in infile:
pair = {}
pair['tokenized_sentence'] = line[:-1].strip().lower()
pairs.append(pair)
with open(tgt_file, "r") as infile:
cnt = 0
for line in infile:
pairs[cnt]['tokenized_question'] = line[:-1].strip().lower()
cnt += 1
output = []
with open(out_file, 'r') as infile:
for line in infile:
if fix_token:
line = fix_tokenization(line[:-1].strip()).lower()
else:
line = line[:-1].strip().lower()
output.append(line)
for idx, pair in enumerate(pairs):
pair['prediction'] = output[idx]
# eval
import json
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.4f')
res = defaultdict(lambda: [])
gts = defaultdict(lambda: [])
for pair in pairs[:]:
key = pair['tokenized_sentence']#.encode('utf-8')
res[key] = [pair['prediction']]#.encode('utf-8')]
# gts
gts[key].append(pair['tokenized_question'])#.encode('utf-8'))
QGEval = QGEvalCap(gts, res)
return QGEval.evaluate()
#python eval.py --out '/Users/royokong/Downloads/ckpt-32000.dev' \
#--src '/Users/royokong/nlp/unilm-nat/data/squadqg_data/org_data/dev.src'\
#--tgt '/Users/royokong/nlp/unilm-nat/data/squadqg_data/org_data/dev.tgt'
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-out", "--out_file", dest="out_file",
default="./output/pred.txt", help="output file to compare")
parser.add_argument("-src", "--src_file", dest="src_file",
default="./qg_data/test/test.pa.txt", help="src file")
parser.add_argument("-tgt", "--tgt_file", dest="tgt_file",
default="./qg_data/nqg_processed_data/tgt-test.txt", help="target file")
args = parser.parse_args()
print("scores: \n")
eval(args.out_file, args.src_file, args.tgt_file) | 8,497 | 35.62931 | 245 | py |
null | DA-Transformer-main/examples/DA-Transformer/evaluation/evaluate_rocstory.py | # pip install pycocoevalcap
# pip install nltk
from collections import Counter
from nltk import ngrams
import numpy as np
from argparse import ArgumentParser
import string
from pycocoevalcap.bleu.bleu import Bleu
_tok_dict = {"(": "-lrb-", ")": "-rrb-",
"[": "-lsb-", "]": "-rsb-",
"{": "-lcb-", "}": "-rcb-",
"[UNK]": "UNK", '&': '&', '<': '<', '>': '>'}
def repetition_distinct(cands):
result = {}
for i in range(1, 5):
num, all_ngram, all_ngram_num = 0, {}, 0.
for k, cand in enumerate(cands):
ngs = ["_".join(c) for c in ngrams(cand, i)]
all_ngram_num += len(ngs)
for s in ngs:
if s in all_ngram:
all_ngram[s] += 1
else:
all_ngram[s] = 1
for s in set(ngs):
if ngs.count(s) > 1:
# if i >= 3:
# print(s)
# print(" ".join(cand))
# err()
num += 1
break
result["repetition-%d"%i] = num / float(len(cands))
result["distinct-%d"%i] = len(all_ngram) / float(all_ngram_num)
return result
def bleu(hyps, refs):
""" Calculate bleu 1/2. """
ref_len = 0
hyp_len = 0
gts = {}
res = {}
for i, (hyp, ref) in enumerate(zip(hyps, refs)):
ref_len += len(ref)
hyp_len += len(hyp)
gts[i] = [" ".join(ref)]
res[i] = [" ".join(hyp)]
score, scores = Bleu(4).compute_score(gts, res)
return score[0], score[1], hyp_len / ref_len
def _is_digit(w):
for ch in w:
if not(ch.isdigit() or ch == ','):
return False
return True
def fix_tokenization(text):
input_tokens = text.split()
output_tokens = []
has_left_quote = False
has_left_single_quote = False
i = 0
prev_dash = False
while i < len(input_tokens):
tok = input_tokens[i]
flag_prev_dash = False
if tok in _tok_dict.keys():
output_tokens.append(_tok_dict[tok])
i += 1
elif tok == "\"":
if has_left_quote:
output_tokens.append("''")
else:
output_tokens.append("``")
has_left_quote = not has_left_quote
i += 1
elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t":
output_tokens[-1] = output_tokens[-1][:-1]
output_tokens.append("n't")
i += 2
elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"):
output_tokens.append("'"+input_tokens[i + 1])
i += 2
elif tok == "'":
if has_left_single_quote:
output_tokens.append("'")
else:
output_tokens.append("`")
has_left_single_quote = not has_left_single_quote
i += 1
elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".":
output_tokens.append("...")
i += 3
elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]):
# $ 3 , 000 -> $ 3,000
output_tokens[-1] += ','+input_tokens[i + 1]
i += 2
elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit():
# 3 . 03 -> $ 3.03
output_tokens[-1] += '.'+input_tokens[i + 1]
i += 2
elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.':
# U . N . -> U.N.
k = i+3
while k+2 < len(input_tokens):
if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.':
k += 2
else:
break
output_tokens[-1] += ''.join(input_tokens[i:k])
i += 2
elif tok == "-":
if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-":
output_tokens.append("--")
i += 2
elif i == len(input_tokens) - 1 or i == 0:
output_tokens.append("-")
i += 1
elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation:
output_tokens[-1] += "-"
i += 1
flag_prev_dash = True
else:
output_tokens.append("-")
i += 1
elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation:
output_tokens[-1] += tok
i += 1
else:
output_tokens.append(tok)
i += 1
prev_dash = flag_prev_dash
return " ".join(output_tokens)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--golden-file', dest="golden_file", help='Input data file, one golden per line.')
parser.add_argument('--pred-file', dest="pred_file", help='Model predictions.')
args = parser.parse_args()
with open(args.pred_file, encoding='utf-8') as fin:
preds = fin.readlines()
preds = [fix_tokenization(line.strip()).split(" ") for line in preds]
with open(args.golden_file, encoding='utf-8') as fin:
golds = fin.readlines()
golds = [line.strip().split(" ") for line in golds]
bleu1, bleu2, ratio = bleu(preds, golds)
result = repetition_distinct(preds)
print(bleu1 * 100., bleu2 * 100., ratio, result['distinct-4'] * 100)
| 5,986 | 36.186335 | 244 | py |
null | DA-Transformer-main/examples/DA-Transformer/evaluation/evaluate_squad1.1.py | # pip install pycocoevalcap
#!/usr/bin/env python
from __future__ import print_function
__author__ = 'xinya'
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.meteor.meteor import Meteor
from pycocoevalcap.rouge.rouge import Rouge
from pycocoevalcap.cider.cider import Cider
from collections import defaultdict
from argparse import ArgumentParser
import string
import sys
#reload(sys)
#sys.setdefaultencoding('utf-8')
_tok_dict = {"(": "-lrb-", ")": "-rrb-",
"[": "-lsb-", "]": "-rsb-",
"{": "-lcb-", "}": "-rcb-",
"[UNK]": "UNK", '&': '&', '<': '<', '>': '>'}
def _is_digit(w):
for ch in w:
if not(ch.isdigit() or ch == ','):
return False
return True
def detokenize(tk_list):
r_list = []
for tk in tk_list:
if tk.startswith('##') and len(r_list) > 0:
r_list[-1] = r_list[-1] + tk[2:]
else:
r_list.append(tk)
return r_list
def fix_tokenization(text):
input_tokens = text.split()
output_tokens = []
has_left_quote = False
has_left_single_quote = False
i = 0
prev_dash = False
while i < len(input_tokens):
tok = input_tokens[i]
flag_prev_dash = False
if tok in _tok_dict.keys():
output_tokens.append(_tok_dict[tok])
i += 1
elif tok == "\"":
if has_left_quote:
output_tokens.append("''")
else:
output_tokens.append("``")
has_left_quote = not has_left_quote
i += 1
elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t":
output_tokens[-1] = output_tokens[-1][:-1]
output_tokens.append("n't")
i += 2
elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"):
output_tokens.append("'"+input_tokens[i + 1])
i += 2
elif tok == "'":
if has_left_single_quote:
output_tokens.append("'")
else:
output_tokens.append("`")
has_left_single_quote = not has_left_single_quote
i += 1
elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".":
output_tokens.append("...")
i += 3
elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]):
# $ 3 , 000 -> $ 3,000
output_tokens[-1] += ','+input_tokens[i + 1]
i += 2
elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit():
# 3 . 03 -> $ 3.03
output_tokens[-1] += '.'+input_tokens[i + 1]
i += 2
elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.':
# U . N . -> U.N.
k = i+3
while k+2 < len(input_tokens):
if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.':
k += 2
else:
break
output_tokens[-1] += ''.join(input_tokens[i:k])
i += 2
elif tok == "-":
if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-":
output_tokens.append("--")
i += 2
elif i == len(input_tokens) - 1 or i == 0:
output_tokens.append("-")
i += 1
elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation:
output_tokens[-1] += "-"
i += 1
flag_prev_dash = True
else:
output_tokens.append("-")
i += 1
elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation:
output_tokens[-1] += tok
i += 1
else:
output_tokens.append(tok)
i += 1
prev_dash = flag_prev_dash
return " ".join(output_tokens)
class QGEvalCap:
def __init__(self, gts, res):
self.gts = gts
self.res = res
def evaluate(self):
output = []
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(), "METEOR"),
(Rouge(), "ROUGE_L"),
# (Cider(), "CIDEr")
]
# =================================================
# Compute scores
# =================================================
for scorer, method in scorers:
# print 'computing %s score...'%(scorer.method())
score, scores = scorer.compute_score(self.gts, self.res)
if type(method) == list:
for sc, scs, m in zip(score, scores, method):
print("%s: %0.5f" % (m, sc))
output.append((m, sc))
else:
print("%s: %0.5f" % (method, score))
output.append((method, score))
return output
def eval(out_file, src_file, tgt_file, isDIn=False, num_pairs=500,
dataset=None, dataset_split='validation', src_key=None, tgt_key=None, fix_token=False):
"""
Given a filename, calculate the metric scores for that prediction file
isDin: boolean value to check whether input file is DirectIn.txt
"""
pairs = []
if dataset:
import datasets
for i in datasets.load_dataset(dataset, cache_dir='./data')[dataset_split]:
pairs.append({
'tokenized_sentence': i[src_key].strip().lower(),
'tokenized_question': i[tgt_key].strip().lower(),
})
else:
with open(src_file, 'r') as infile:
for line in infile:
pair = {}
pair['tokenized_sentence'] = line[:-1].strip().lower()
pairs.append(pair)
with open(tgt_file, "r") as infile:
cnt = 0
for line in infile:
pairs[cnt]['tokenized_question'] = line[:-1].strip().lower()
cnt += 1
output = []
with open(out_file, 'r') as infile:
for line in infile:
if fix_token:
line = fix_tokenization(line[:-1].strip()).lower()
else:
line = line[:-1].strip().lower()
output.append(line)
for idx, pair in enumerate(pairs):
pair['prediction'] = output[idx]
# eval
import json
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.4f')
res = defaultdict(lambda: [])
gts = defaultdict(lambda: [])
for pair in pairs[:]:
key = pair['tokenized_sentence']#.encode('utf-8')
res[key] = [pair['prediction']]#.encode('utf-8')]
# gts
gts[key].append(pair['tokenized_question'])#.encode('utf-8'))
QGEval = QGEvalCap(gts, res)
return QGEval.evaluate()
#python eval.py --out '/Users/royokong/Downloads/ckpt-32000.dev' \
#--src '/Users/royokong/nlp/unilm-nat/data/squadqg_data/org_data/dev.src'\
#--tgt '/Users/royokong/nlp/unilm-nat/data/squadqg_data/org_data/dev.tgt'
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-out", "--out_file", dest="out_file", help="output file to compare")
parser.add_argument("-src", "--src_file", dest="src_file",
default="./dataset/squad/test.src", help="src file")
parser.add_argument("-tgt", "--tgt_file", dest="tgt_file",
default="./dataset/squad/test.tgt", help="target file")
args = parser.parse_args()
print("scores: \n")
eval(args.out_file, args.src_file, args.tgt_file) | 8,087 | 34.946667 | 244 | py |
null | DA-Transformer-main/examples/DA-Transformer/evaluation/evaluate_xsum.py | # pip install pycocoevalcap
# check https://github.com/pltrdy/files2rouge to install files2rouge
#!/usr/bin/env python
from __future__ import print_function
__author__ = 'xinya'
from collections import defaultdict
from argparse import ArgumentParser
import string
import os
import sys
#reload(sys)
#sys.setdefaultencoding('utf-8')
_tok_dict = {"(": "-lrb-", ")": "-rrb-",
"[": "-lsb-", "]": "-rsb-",
"{": "-lcb-", "}": "-rcb-",
"[UNK]": "UNK", '&': '&', '<': '<', '>': '>'}
def _is_digit(w):
for ch in w:
if not(ch.isdigit() or ch == ','):
return False
return True
def detokenize(tk_list):
r_list = []
for tk in tk_list:
if tk.startswith('##') and len(r_list) > 0:
r_list[-1] = r_list[-1] + tk[2:]
else:
r_list.append(tk)
return r_list
def fix_tokenization(text):
input_tokens = text.split()
output_tokens = []
has_left_quote = False
has_left_single_quote = False
i = 0
prev_dash = False
while i < len(input_tokens):
tok = input_tokens[i]
flag_prev_dash = False
if tok in _tok_dict.keys():
output_tokens.append(_tok_dict[tok])
i += 1
elif tok == "\"":
if has_left_quote:
output_tokens.append("''")
else:
output_tokens.append("``")
has_left_quote = not has_left_quote
i += 1
elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t":
output_tokens[-1] = output_tokens[-1][:-1]
output_tokens.append("n't")
i += 2
elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"):
output_tokens.append("'"+input_tokens[i + 1])
i += 2
elif tok == "'":
if has_left_single_quote:
output_tokens.append("'")
else:
output_tokens.append("`")
has_left_single_quote = not has_left_single_quote
i += 1
elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".":
output_tokens.append("...")
i += 3
elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]):
# $ 3 , 000 -> $ 3,000
output_tokens[-1] += ','+input_tokens[i + 1]
i += 2
elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit():
# 3 . 03 -> $ 3.03
output_tokens[-1] += '.'+input_tokens[i + 1]
i += 2
elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.':
# U . N . -> U.N.
k = i+3
while k+2 < len(input_tokens):
if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.':
k += 2
else:
break
output_tokens[-1] += ''.join(input_tokens[i:k])
i += 2
elif tok == "-":
if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-":
output_tokens.append("--")
i += 2
elif i == len(input_tokens) - 1 or i == 0:
output_tokens.append("-")
i += 1
elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation:
output_tokens[-1] += "-"
i += 1
flag_prev_dash = True
else:
output_tokens.append("-")
i += 1
elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation:
output_tokens[-1] += tok
i += 1
else:
output_tokens.append(tok)
i += 1
prev_dash = flag_prev_dash
return " ".join(output_tokens)
def fix_tokenization_xsum(text):
input_tokens = text.replace(" ##", "").strip().split()
output_tokens = []
has_left_quote = False
has_left_single_quote = False
i = 0
prev_dash = False
while i < len(input_tokens):
tok = input_tokens[i]
flag_prev_dash = False
if tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t":
output_tokens[-1] = output_tokens[-1][:-1]
output_tokens[-1] += "n't"
i += 2
elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll", "re"):
if len(output_tokens) > 0:
output_tokens[-1] += "'"+input_tokens[i + 1]
else:
output_tokens.append("'"+input_tokens[i + 1])
i += 2
elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".":
output_tokens.append("...")
i += 3
elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]):
# $ 3 , 000 -> $ 3,000
output_tokens[-1] += ','+input_tokens[i + 1]
i += 2
elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit():
# 3 . 03 -> $ 3.03
output_tokens[-1] += '.'+input_tokens[i + 1]
i += 2
elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.':
# U . N . -> U.N.
k = i+3
while k+2 < len(input_tokens):
if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.':
k += 2
else:
break
output_tokens[-1] += ''.join(input_tokens[i:k])
i += 2
elif tok == "-":
if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-":
output_tokens.append("--")
i += 2
elif i == len(input_tokens) - 1 or i == 0:
output_tokens.append("-")
i += 1
elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation:
output_tokens[-1] += "-"
i += 1
flag_prev_dash = True
else:
output_tokens.append("-")
i += 1
elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation:
output_tokens[-1] += tok
i += 1
else:
output_tokens.append(tok)
i += 1
prev_dash = flag_prev_dash
return " ".join(output_tokens)
def eval(out_file, src_file, tgt_file):
"""
Given a filename, calculate the metric scores for that prediction file
isDin: boolean value to check whether input file is DirectIn.txt
"""
pairs = []
with open(src_file, 'r') as infile:
for line in infile:
pair = {}
pair['tokenized_sentence'] = line[:-1].strip().lower()
pairs.append(pair)
reflen = 0
with open(tgt_file, "r") as infile:
cnt = 0
for line in infile:
pairs[cnt]['tokenized_question'] = line[:-1].strip()
reflen += len(pairs[cnt]['tokenized_question'].split())
cnt += 1
genlen = 0
output = []
with open(out_file, 'r') as infile:
for line in infile:
# if fix_token:
line = fix_tokenization(line[:-1].strip()).lower()
genlen += len(line.split())
# else:
# line = line[:-1].strip().lower()
output.append(line)
# with open(".xsum_output.txt", "w") as f:
# for line in output:
# f.write(line + "\n")
# return os.system(f"files2rouge .xsum_output.txt {tgt_file}")
print(f"genlen: {genlen}, reflen: {reflen}, ratio: {genlen / reflen}")
#python eval.py --out '/Users/royokong/Downloads/ckpt-32000.dev' \
#--src '/Users/royokong/nlp/unilm-nat/data/squadqg_data/org_data/dev.src'\
#--tgt '/Users/royokong/nlp/unilm-nat/data/squadqg_data/org_data/dev.tgt'
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-out", "--out_file", dest="out_file", help="output file to compare")
parser.add_argument("-src", "--src_file", dest="src_file",
default="./xsum/test.src", help="src file")
parser.add_argument("-tgt", "--tgt_file", dest="tgt_file",
default="./xsum/test.tgt", help="target file")
parser.add_argument("--fast", action="store_true")
args = parser.parse_args()
eval(args.out_file, args.src_file, args.tgt_file)
print("scores: \n")
if args.fast:
os.system(f"files2rouge {args.out_file} {args.tgt_file} -a \"-c 95 -r 10 -n 2 -a\"")
else:
os.system(f"files2rouge {args.out_file} {args.tgt_file}") | 9,622 | 38.277551 | 244 | py |
null | DA-Transformer-main/examples/DA-Transformer/evaluation/extract_log.py | import sys
import re
import argparse
res = {}
for line in sys.stdin.readlines():
m = re.search(r"H-([0-9]+):?\s+(?:[\-0-9.infe]*)\s+(\S.*)$", line)
if m:
res[int(m.group(1))] = m.group(2).remove("## ", "")
for i in range(len(res)):
print(res[i])
| 268 | 19.692308 | 70 | py |
null | DA-Transformer-main/examples/mass/README.md | The codes is modified from https://github.com/microsoft/MASS/tree/master/MASS-summarization
| 93 | 30.333333 | 91 | md |
null | DA-Transformer-main/examples/mass/__init__.py | from . import masked_s2s
from . import s2s_model
from . import translation
| 75 | 18 | 25 | py |
null | DA-Transformer-main/examples/mass/bert_dictionary.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import Counter
from multiprocessing import Pool
import os
import torch
from fairseq.tokenizer import tokenize_line
# from fairseq.binarizer import safe_readline
from fairseq.data import data_utils, Dictionary
class BertDictionary(Dictionary):
"""A mapping from symbols to consecutive integers"""
def __init__(
self,
pad='<pad>',
eos='</s>',
unk='<unk>',
bos='<s>',
extra_special_symbols=None,
):
super().__init__(pad=pad, eos=eos, unk=unk,
bos=bos, extra_special_symbols=extra_special_symbols)
@classmethod
def load_from_file(cls, filename, extra_symbols=None):
d = cls()
d.symbols = []
d.count = []
d.indices = {}
with open(filename, 'r', encoding='utf-8', errors='ignore') as input_file:
for line in input_file:
k, v = line.split()
d.add_symbol(k)
d.unk_word = '[UNK]'
d.pad_word = '[PAD]'
d.eos_word = '[SEP]'
d.bos_word = '[CLS]'
d.bos_index = d.add_symbol('[CLS]')
d.pad_index = d.add_symbol('[PAD]')
d.eos_index = d.add_symbol('[SEP]')
d.unk_index = d.add_symbol('[UNK]')
d.nspecial = 999
if extra_symbols is not None:
for w in extra_symbols:
d.add_symbol(w)
return d
def save(self, f):
"""Stores dictionary into a text file"""
ex_keys, ex_vals = self._get_meta()
self._save(f, zip(ex_keys + self.symbols, ex_vals + self.count))
| 1,760 | 26.092308 | 82 | py |
null | DA-Transformer-main/examples/mass/hub_interface.py | ##########################################################################
# Copyright (C) 2022 COAI @ Tsinghua University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
# This file is modified from https://github.com/facebookresearch/fairseq/blob/main/fairseq/models/bart/hub_interface.py
import logging
from typing import Dict, List
import numpy as np
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.hub_utils import GeneratorHubInterface
logger = logging.getLogger(__name__)
class MASSHubInterface(GeneratorHubInterface):
def __init__(self, cfg, task, model):
super().__init__(cfg, task, [model])
self.model = self.models[0]
def encode(
self, sentence: str, *addl_sentences, no_separator=True
) -> torch.LongTensor:
"""
BPE-encode a sentence (or multiple sentences).
Every sequence begins with a beginning-of-sentence (`<s>`) symbol.
Every sentence ends with an end-of-sentence (`</s>`).
Example (single sentence): `<s> a b c </s>`
Example (sentence pair): `<s> d e f </s> 1 2 3 </s>`
The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE
requires leading spaces. For example::
>>> bart.encode('Hello world').tolist()
[0, 31414, 232, 2]
>>> bart.encode(' world').tolist()
[0, 232, 2]
>>> bart.encode('world').tolist()
[0, 8331, 2]
"""
if self.tokenizer:
sentence = self.tokenizer.encode(sentence)
tokens = self.bpe.encode(sentence)
max_position = self.max_positions[0] - 2
if len(tokens.split(" ")) > max_position:
if self.task.cfg.truncate_source:
tokens = " ".join(tokens.split(" ")[:max_position])
else:
raise RuntimeError(f"Input is too long. Current input length: {len(tokens.split(' '))}. Supported max length: {max_position}.")
bpe_sentence = "<s> " + tokens + " </s>"
for s in addl_sentences:
bpe_sentence += " </s>" if not no_separator else ""
bpe_sentence += " " + self.bpe.encode(s) + " </s>"
tokens = self.task.source_dictionary.encode_line(bpe_sentence, append_eos=False, add_if_not_exist=False)
return tokens.long()
def decode(self, tokens: torch.LongTensor):
assert tokens.dim() == 1
tokens = tokens.cpu().numpy()
if tokens[0] == self.task.target_dictionary.bos():
tokens = tokens[1:] # remove <s>
eos_mask = tokens == self.task.target_dictionary.eos()
doc_mask = eos_mask[1:] & eos_mask[:-1]
sentences = np.split(tokens, doc_mask.nonzero()[0] + 1)
sentences = [
self.bpe.decode(self.task.target_dictionary.string(s)) for s in sentences
]
if self.tokenizer:
sentences = [
self.tokenizer.decode(s) for s in sentences
]
if len(sentences) == 1:
return sentences[0]
return sentences
def _build_sample(self, src_tokens: List[torch.LongTensor]):
# assert torch.is_tensor(src_tokens)
dataset = self.task.build_dataset_for_inference(
src_tokens,
[x.numel() for x in src_tokens],
)
sample = dataset.collater(dataset)
sample = utils.apply_to_sample(lambda tensor: tensor.to(self.device), sample)
return sample
def generate(
self,
tokenized_sentences: List[torch.LongTensor],
*args,
inference_step_args=None,
skip_invalid_size_inputs=False,
**kwargs
) -> List[List[Dict[str, torch.Tensor]]]:
inference_step_args = inference_step_args or {}
if "prefix_tokens" in inference_step_args:
raise NotImplementedError("prefix generation not implemented for MASS")
res = []
for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs):
src_tokens = batch["net_input"]["src_tokens"]
results = super().generate(
src_tokens,
*args,
inference_step_args=inference_step_args,
skip_invalid_size_inputs=skip_invalid_size_inputs,
**kwargs
)
for id, hypos in zip(batch["id"].tolist(), results):
res.append((id, hypos))
res = [hypos for _, hypos in sorted(res, key=lambda x: x[0])]
return res
def extract_features(
self, tokens: torch.LongTensor, return_all_hiddens: bool = False
) -> torch.Tensor:
if tokens.dim() == 1:
tokens = tokens.unsqueeze(0)
if tokens.size(-1) > min(self.model.max_positions()):
raise ValueError(
"tokens exceeds maximum length: {} > {}".format(
tokens.size(-1), self.model.max_positions()
)
)
tokens.to(device=self.device),
prev_output_tokens = tokens.clone()
prev_output_tokens[:, 0] = tokens.gather(
1,
(tokens.ne(self.task.source_dictionary.pad()).sum(dim=1) - 1).unsqueeze(-1),
).squeeze()
prev_output_tokens[:, 1:] = tokens[:, :-1]
features, extra = self.model(
src_tokens=tokens,
src_lengths=None,
prev_output_tokens=prev_output_tokens,
features_only=True,
return_all_hiddens=return_all_hiddens,
)
if return_all_hiddens:
# convert from T x B x C -> B x T x C
inner_states = extra["inner_states"]
return [inner_state.transpose(0, 1) for inner_state in inner_states]
else:
return features # just the last layer's features
def register_classification_head(
self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs
):
# self.model.register_classification_head(
# name, num_classes=num_classes, embedding_size=embedding_size, **kwargs
# )
raise NotImplementedError("MASS does not support classfication")
def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False):
# if tokens.dim() == 1:
# tokens = tokens.unsqueeze(0)
# features = self.extract_features(tokens.to(device=self.device))
# sentence_representation = features[
# tokens.eq(self.task.source_dictionary.eos()), :
# ].view(features.size(0), -1, features.size(-1))[:, -1, :]
# logits = self.model.classification_heads[head](sentence_representation)
# if return_logits:
# return logits
# return F.log_softmax(logits, dim=-1)
raise NotImplementedError("MASS does not support classfication")
def fill_mask(
self,
masked_inputs: List[str],
**generate_kwargs
):
raise NotImplementedError("MASS does not support fill_mask")
| 7,569 | 38.222798 | 143 | py |
null | DA-Transformer-main/examples/mass/learned_positional_embedding.py | import torch.nn as nn
from fairseq import utils
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: int,
):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.onnx_trace = False
def forward(self, input, incremental_state=None, positions=None):
"""Input is expected to be of size [bsz x seqlen]."""
assert (
(positions is None) or (self.padding_idx is None)
), "If positions is pre-computed then padding_idx should not be set."
if positions is None:
if incremental_state is not None:
# positions is the same for every token when decoding a single step
# Without the int() cast, it doesn't work in some cases when exporting to ONNX
positions = input.data.new(1, 1).fill_(int(self.padding_idx + input.size(1)))
else:
positions = utils.make_positions(
input.data, self.padding_idx, onnx_trace=self.onnx_trace,
)
return super().forward(positions)
def max_positions(self):
"""Maximum number of supported positions."""
if self.padding_idx is not None:
return self.num_embeddings - self.padding_idx - 1
else:
return self.num_embeddings
def _forward(self, positions):
return super().forward(positions)
| 1,786 | 35.469388 | 94 | py |
null | DA-Transformer-main/examples/mass/masked_dataset.py | import numpy as np
import torch
import random
import time
import math
from fairseq import utils
from fairseq.data import data_utils, LanguagePairDataset
class MaskedLanguagePairDataset(LanguagePairDataset):
""" Wrapper for masked language datasets
(support monolingual and bilingual)
For monolingual dataset:
[x1, x2, x3, x4, x5]
||
VV
[x1, _, _, x4, x5] => [x2, x3]
default, _ will be replaced by 8:1:1 (mask, self, rand),
"""
def __init__(
self,
src, src_sizes, src_dict,
tgt=None, tgt_sizes=None, tgt_dict=None,
left_pad_source=True, left_pad_target=False,
max_source_positions=1024, max_target_positions=1024,
shuffle=True, mask_prob=0.15, pred_probs=None, block_size=64,
):
self.src = src
self.tgt = tgt
self.src_sizes = src_sizes
self.tgt_sizes = tgt_sizes
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.left_pad_source = left_pad_source
self.left_pad_target = left_pad_target
self.shuffle = shuffle
self.mask_prob = mask_prob
self.pred_probs = pred_probs
self.block_size = block_size
def __getitem__(self, index):
pkgs = {'id': index}
tgt_item = self.tgt[index] if self.tgt is not None else None
src_item = self.src[index]
positions = np.arange(0, len(self.src[index]))
masked_pos = []
for i in range(1, len(src_item), self.block_size):
block = positions[i: i + self.block_size]
masked_len = int(len(block) * self.mask_prob)
masked_block_start = np.random.choice(block[:len(block) - int(masked_len) + 1], 1)[0]
masked_pos.extend(positions[masked_block_start : masked_block_start + masked_len])
masked_pos = np.array(masked_pos)
pkgs['target'] = src_item[masked_pos].clone()
pkgs['prev_output_tokens'] = src_item[masked_pos - 1].clone()
pkgs['positions'] = torch.LongTensor(masked_pos) + self.src_dict.pad_index
src_item[masked_pos] = self.replace(src_item[masked_pos])
pkgs['source'] = src_item
return pkgs
def collate(self, samples, pad_idx, eos_idx, left_pad_source=True, left_pad_target=False):
if len(samples) == 0:
return {}
def merge(x, left_pad, move_eos_to_beginning=False):
return data_utils.collate_tokens(
x, pad_idx, eos_idx, left_pad, move_eos_to_beginning
)
id = torch.LongTensor([s['id'] for s in samples])
source = merge([s['source'] for s in samples], left_pad=left_pad_source)
src_lengths = torch.LongTensor([s['source'].numel() for s in samples])
prev_output_tokens = merge([s['prev_output_tokens'] for s in samples], left_pad=left_pad_target)
positions = merge([s['positions'] for s in samples], left_pad=left_pad_target)
target = merge([s['target'] for s in samples], left_pad=left_pad_target)
ntokens = target.numel()
batch = {
'id' : id,
'nsentences': len(samples),
'net_input' : {
'src_lengths': src_lengths,
'src_tokens' : source,
'prev_output_tokens': prev_output_tokens,
'positions' : positions,
},
'target' : target,
'ntokens': ntokens,
}
return batch
def collater(self, samples):
return self.collate(samples, self.src_dict.pad(), self.src_dict.eos())
def size(self, index):
return self.src.sizes[index]
def replace(self, x):
_x_real = x
_x_rand = _x_real.clone().random_(self.src_dict.nspecial, len(self.src_dict))
_x_mask = _x_real.clone().fill_(self.src_dict.index('[MASK]'))
probs = torch.multinomial(self.pred_probs, len(x), replacement=True)
_x = _x_mask * (probs == 0).long() + \
_x_real * (probs == 1).long() + \
_x_rand * (probs == 2).long()
return _x
| 4,131 | 35.566372 | 104 | py |
null | DA-Transformer-main/examples/mass/masked_s2s.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import torch
from collections import OrderedDict
from fairseq import utils
from fairseq.data import (
data_utils,
Dictionary,
TokenBlockDataset,
)
from fairseq.tasks import FairseqTask, register_task
from .masked_dataset import MaskedLanguagePairDataset
from .bert_dictionary import BertDictionary
@register_task('masked_s2s')
class MaskedS2STask(FairseqTask):
"""
Train a sequence-to-sequence task
Args:
dictionary (~fairseq.data.Dictionary): the dictionary for the input of
the language model
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', help='path to data directory')
parser.add_argument('--sample-break-mode', default='none',
choices=['none', 'complete', 'complete_doc', 'eos'],
help='If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
'of sentence, but may include multiple sentences per sample. '
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.')
parser.add_argument('--tokens-per-sample', default=512, type=int,
help='max number of tokens per sample for text dataset')
parser.add_argument('--lazy-load', action='store_true',
help='load the dataset lazily')
parser.add_argument('--raw-text', default=False, action='store_true',
help='load raw text dataset')
parser.add_argument('--mask-s2s-prob', default=0.15, type=float,
help='probability of replacing a token with mask')
parser.add_argument('--mask-s2s-mask-keep-rand', default="0.8,0.1,0.1", type=str,
help='Word prediction probability for decoder mask')
# fmt: on
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
if getattr(args, 'raw_text', False):
utils.deprecation_warning('--raw-text is deprecated, please use --dataset-impl=raw')
args.dataset_impl = 'raw'
elif getattr(args, 'lazy_load', False):
utils.deprecation_warning('--lazy-load is deprecated, please use --dataset-impl=lazy')
args.dataset_impl = 'lazy'
paths = args.data.split(':')
dictionary = cls.load_dictionary(os.path.join(paths[0], 'dict.txt'))
print('| dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
@classmethod
def load_dictionary(cls, filename):
return BertDictionary.load_from_file(filename)
def train_step(self, sample, model, criterion, optimizer, ignore_grad=False):
model.train()
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
return model
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = self.args.data.split(':')
assert len(paths) > 0
data_path = paths[epoch % len(paths)]
split_path = os.path.join(data_path, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.dictionary,
self.args.dataset_impl,
combine=combine,
)
if dataset is None:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path))
self.datasets[split] = self.build_s2s_dataset(dataset)
def build_s2s_dataset(self, dataset):
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample,
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode=self.args.sample_break_mode,
)
pred_probs = torch.FloatTensor([float(x) for x in self.args.mask_s2s_mask_keep_rand.split(',')])
s2s_dataset = MaskedLanguagePairDataset(
dataset, dataset.sizes, self.source_dictionary,
shuffle=True, mask_prob=self.args.mask_s2s_prob,
pred_probs=pred_probs,
)
return s2s_dataset
def build_dataset_for_inference(self, src_tokens, src_lengths):
raise NotImplementedError
def inference_step(self, generator, models, sample, prefix_tokens=None):
raise NotImplementedError
@property
def source_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.dictionary
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.dictionary
def max_positions(self):
max_positions = 1024
if hasattr(self.args, 'max_positions'):
max_positions = min(max_positions, self.args.max_positions)
if hasattr(self.args, 'max_source_positions'):
max_positions = min(max_positions, self.args.max_source_positions)
if hasattr(self.args, 'max_target_positions'):
max_positions = min(max_positions, self.args.max_target_positions)
return (max_positions, max_positions)
| 6,496 | 36.33908 | 104 | py |
null | DA-Transformer-main/examples/mass/s2s_model.py | import math
import os
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
MultiheadAttention,
LayerNorm,
)
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .learned_positional_embedding import LearnedPositionalEmbedding
from fairseq.file_io import PathManager
DEFAULT_MAX_SOURCE_POSITIONS = 512
DEFAULT_MAX_TARGET_POSITIONS = 512
@register_model('transformer_mass')
class TransformerMASSModel(FairseqEncoderDecoderModel):
"""
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--load-from-pretrained-model', type=str, default=None,
help='Load from pretrained model')
# fmt: on
@classmethod
def from_pretrained(
cls,
model_name_or_path,
**kwargs,
):
from .hub_interface import MASSHubInterface
from fairseq import checkpoint_utils, file_utils
model_path = file_utils.load_archive_file(model_name_or_path)
config_path = os.path.join(model_path, "config.json")
if os.path.exists(config_path):
config = json.load(open(config_path, 'r'))
for key, value in config.items():
if key not in kwargs:
kwargs[key] = value
kwargs["data"] = model_path
# convenience hack for loading data and BPE codes from model archive
# if data_name_or_path.startswith("."):
# kwargs["data"] = os.path.abspath(os.path.join(model_path, data_name_or_path))
# else:
# kwargs["data"] = file_utils.load_archive_file(data_name_or_path)
for file, arg in {
"code": "bpe_codes",
"bpecodes": "bpe_codes",
"sentencepiece.bpe.model": "sentencepiece_model",
"merges.txt": "bpe_merges",
"vocab.json": "bpe_vocab",
}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
# utils.import_user_module(argparse.Namespace(user_dir=f"{os.path.dirname(os.path.abspath(__file__))}/../"))
models, args, task = checkpoint_utils.load_model_ensemble_and_task(
[os.path.join(model_path, kwargs['checkpoint_file'])],
arg_overrides=kwargs,
)
return MASSHubInterface(args, task, models[0])
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim
)
encoder = TransformerEncoder(args, src_dict, encoder_embed_tokens)
decoder = TransformerDecoder(args, tgt_dict, decoder_embed_tokens)
model = TransformerMASSModel(encoder, decoder)
if args.load_from_pretrained_model is not None:
state_dict = torch.load(PathManager.open(args.load_from_pretrained_model, 'rb'), map_location='cpu')
k0 = (f"encoder.embed_tokens.weight", encoder_embed_tokens)
k1 = (f"decoder.embed_tokens.weight", decoder_embed_tokens)
k2 = (f"decoder.embed_out.weight", decoder.embed_tokens if decoder.share_input_output_embed else decoder.embed_out)
now_vocab_size = len(tgt_dict.symbols)
for k, emb in [k0, k1, k2]:
if k in state_dict and state_dict[k].shape[0] < now_vocab_size:
state_dict[k], tmp = emb.weight.clone(), state_dict[k]
state_dict[k][:tmp.shape[0]] = tmp
model.load_state_dict(state_dict)
args.load_from_pretrained_model = None # Clear this param
model = TransformerMASSModel(encoder, decoder)
model.src_dict = src_dict
model.tgt_dict = tgt_dict
return model
def max_positions(self):
return (self.encoder.max_positions(), self.decoder.max_positions())
def forward(self, src_tokens=None, src_lengths=None, prev_output_tokens=None, **kwargs):
"""
Run the forward pass for an encoder-decoder model.
First feed a batch of source tokens through the encoder. Then, feed the
encoder output and previous decoder outputs (i.e., teacher forcing) to
the decoder to produce the next outputs::
encoder_out = self.encoder(src_tokens, src_lengths)
return self.decoder(prev_output_tokens, encoder_out)
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): source sentence lengths of shape `(batch)`
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, **kwargs)
return decoder_out
class TransformerEncoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embedding_dim: float = 768,
ffn_embedding_dim: float = 3072,
num_attention_heads: float = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = 'relu',
add_bias_kv: bool = False,
add_zero_attn: bool = False,
export: bool = False,
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
# Initialize blocks
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = MultiheadAttention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=True
)
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim, export=export)
def forward(
self,
x: torch.Tensor,
self_attn_mask: torch.Tensor = None,
self_attn_padding_mask: torch.Tensor = None,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer imlementation.
"""
residual = x
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.final_layer_norm(x)
return x, attn
class TransformerDecoderLayer(nn.Module):
def __init__(
self,
embedding_dim: float = 768,
ffn_embedding_dim: float = 3072,
num_attention_heads: float = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = 'relu',
add_bias_kv: bool = False,
add_zero_attn: bool = False,
export: bool = False,
):
super().__init__()
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
# Initialize blocks
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = MultiheadAttention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=True
)
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export)
self.encoder_attn = MultiheadAttention(
self.embedding_dim,
num_attention_heads,
kdim=embedding_dim,
vdim=embedding_dim,
dropout=attention_dropout,
encoder_decoder_attention=True,
)
self.encoder_attn_layer_norm = LayerNorm(self.embedding_dim, export=export)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim, export=export)
self.need_attn = False
def forward(
self,
x,
encoder_out=None,
encoder_mask=None,
incremental_state=None,
prev_self_attn_state=None,
prev_attn_state=None,
self_attn_mask=None,
self_attn_padding_mask=None,
):
residual = x
if prev_self_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_self_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.self_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
if prev_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.encoder_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.final_layer_norm(x)
return x, attn
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
class TransformerEncoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, dictionary, embed_tokens):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = LearnedPositionalEmbedding(
args.max_source_positions + 1 + self.padding_idx, embed_dim, self.padding_idx,
)
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerEncoderLayer(
args.encoder_embed_dim,
args.encoder_ffn_embed_dim,
args.encoder_attention_heads,
args.dropout,
args.attention_dropout,
args.activation_dropout,
args.activation_fn,
)
for i in range(args.encoder_layers)
])
self.emb_layer_norm = LayerNorm(embed_dim)
self.apply(init_bert_params)
def forward(self, src_tokens, src_lengths, **unused):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x += self.embed_positions(src_tokens)
if self.emb_layer_norm:
x = self.emb_layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
if encoder_padding_mask is not None:
x *= 1 - encoder_padding_mask.unsqueeze(-1).type_as(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# encoder layers
for layer in self.layers:
x, _ = layer(x, self_attn_padding_mask=encoder_padding_mask)
return {
'encoder_out': x, # T x B x C
'encoder_padding_mask': encoder_padding_mask, # B x T
}
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if encoder_out['encoder_out'] is not None:
encoder_out['encoder_out'] = \
encoder_out['encoder_out'].index_select(1, new_order)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = \
encoder_out['encoder_padding_mask'].index_select(0, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions())
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_dim = embed_dim
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.embed_positions = LearnedPositionalEmbedding(
args.max_target_positions + 1 + self.padding_idx, embed_dim, self.padding_idx,
)
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerDecoderLayer(
args.decoder_embed_dim,
args.decoder_ffn_embed_dim,
args.decoder_attention_heads,
args.dropout,
args.attention_dropout,
args.activation_dropout,
args.activation_fn,
)
for _ in range(args.decoder_layers)
])
if not self.share_input_output_embed:
self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.embed_dim))
nn.init.normal_(self.embed_out, mean=0, std=self.embed_dim ** -0.5)
self.emb_layer_norm = LayerNorm(embed_dim)
self.apply(init_bert_params)
def forward(self,
prev_output_tokens,
encoder_out=None,
incremental_state=None,
**unused):
x, extra = self.extract_features(prev_output_tokens, encoder_out, incremental_state, **unused)
x = self.output_layer(x)
return x, extra
def extract_features(self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused):
# embed positions
if 'positions' in unused:
positions = self.embed_positions._forward(unused['positions'])
else:
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
) if self.embed_positions is not None else None
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if positions is not None:
x += positions
if self.emb_layer_norm:
x = self.emb_layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
for layer in self.layers:
x, attn = layer(
x,
encoder_out['encoder_out'] if encoder_out is not None else None,
encoder_out['encoder_padding_mask'] if encoder_out is not None else None,
incremental_state,
self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None,
)
inner_states.append(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
return x, {'attn': attn, 'inner_states': inner_states}
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
# project back to size of vocabulary
if self.share_input_output_embed:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_out)
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions())
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device or self._future_mask.size(0) < dim:
self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
return self._future_mask[:dim, :dim]
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
@register_model_architecture('transformer_mass', 'transformer_mass')
def base_architecture(args):
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.activation_dropout = getattr(args, 'activation_dropout', 0.)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 2048)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)
@register_model_architecture('transformer_mass', 'transformer_mass_base')
def transformer_base(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 3072)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 12)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_dropout = getattr(args, 'activation_dropout', 0.1)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 3072)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 12)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', True)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', True)
base_architecture(args)
@register_model_architecture('transformer_mass', 'transformer_mass_middle')
def transformer_middle(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
transformer_base(args)
@register_model_architecture('transformer_mass', 'transformer_mass_big')
def transformer_big(args):
args.encoder_layers = getattr(args, 'encoder_layers', 12)
args.decoder_layers = getattr(args, 'decoder_layers', 12)
transformer_middle(args)
| 28,612 | 37.406711 | 154 | py |
null | DA-Transformer-main/examples/mass/translation.py | #from fairseq.data import BertDictionary
from fairseq.tasks import register_task
from fairseq import metrics, utils
from fairseq.tasks.translation import TranslationTask, TranslationConfig
from .bert_dictionary import BertDictionary
import torch
import logging
logger = logging.getLogger(__name__)
@register_task('translation_mass', dataclass=TranslationConfig)
class TranslationMASSTask(TranslationTask):
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args, src_dict, tgt_dict)
@classmethod
def load_dictionary(cls, filename):
return BertDictionary.load_from_file(filename)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.cfg.max_source_positions, self.cfg.max_target_positions)
def valid_step(self, sample, model, criterion, ema_model=None):
if ema_model is not None:
model = ema_model
model.eval()
with torch.no_grad():
loss, sample_size, logging_output = criterion(model, sample)
EVAL_BLEU_ORDER = self.cfg.eval_bleu_order
import sacrebleu
if sacrebleu.BLEU.NGRAM_ORDER != self.cfg.eval_bleu_order:
sacrebleu.BLEU.NGRAM_ORDER = self.cfg.eval_bleu_order
func = sacrebleu.BLEU.extract_ngrams
sacrebleu.BLEU.extract_ngrams = lambda x: func(x, min_order=1, max_order=self.cfg.eval_bleu_order)
if self.cfg.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output["_bleu_sys_len"] = bleu.sys_len
logging_output["_bleu_ref_len"] = bleu.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu.counts[i]
logging_output["_bleu_totals_" + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
| 2,164 | 39.849057 | 114 | py |
null | DA-Transformer-main/examples/transformer/__init__.py | 0 | 0 | 0 | py | |
null | DA-Transformer-main/examples/transformer/hub_interface.py | ##########################################################################
# Copyright (C) 2022 COAI @ Tsinghua University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
# This file is modified from https://github.com/facebookresearch/fairseq/blob/main/fairseq/models/bart/hub_interface.py
import logging
from typing import Dict, List
import os, json
import numpy as np
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.hub_utils import GeneratorHubInterface
logger = logging.getLogger(__name__)
class TransformerHubInterface(GeneratorHubInterface):
def __init__(self, cfg, task, model):
super().__init__(cfg, task, [model])
self.model = self.models[0]
def encode(
self, sentence: str, *addl_sentences, no_separator=True
) -> torch.LongTensor:
"""
BPE-encode a sentence (or multiple sentences).
Every sequence begins with a beginning-of-sentence (`<s>`) symbol.
Every sentence ends with an end-of-sentence (`</s>`).
Example (single sentence): `<s> a b c </s>`
Example (sentence pair): `<s> d e f </s> 1 2 3 </s>`
The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE
requires leading spaces. For example::
>>> bart.encode('Hello world').tolist()
[0, 31414, 232, 2]
>>> bart.encode(' world').tolist()
[0, 232, 2]
>>> bart.encode('world').tolist()
[0, 8331, 2]
"""
if self.tokenizer:
sentence = self.tokenizer.encode(sentence)
tokens = self.bpe.encode(sentence)
max_position = self.max_positions[0] - 2
if len(tokens.split(" ")) > max_position:
if self.task.cfg.truncate_source:
tokens = " ".join(tokens.split(" ")[:max_position])
else:
raise RuntimeError(f"Input is too long. Current input length: {len(tokens.split(' '))}. Supported max length: {max_position}.")
bpe_sentence = "<s> " + tokens + " </s>"
for s in addl_sentences:
bpe_sentence += " </s>" if not no_separator else ""
bpe_sentence += " " + self.bpe.encode(s) + " </s>"
tokens = self.task.source_dictionary.encode_line(bpe_sentence, append_eos=False, add_if_not_exist=False)
return tokens.long()
def decode(self, tokens: torch.LongTensor):
assert tokens.dim() == 1
tokens = tokens.cpu().numpy()
if tokens[0] == self.task.target_dictionary.bos():
tokens = tokens[1:] # remove <s>
eos_mask = tokens == self.task.target_dictionary.eos()
doc_mask = eos_mask[1:] & eos_mask[:-1]
sentences = np.split(tokens, doc_mask.nonzero()[0] + 1)
sentences = [
self.bpe.decode(self.task.target_dictionary.string(s)) for s in sentences
]
if self.tokenizer:
sentences = [
self.tokenizer.decode(s) for s in sentences
]
if len(sentences) == 1:
return sentences[0]
return sentences
def _build_sample(self, src_tokens: List[torch.LongTensor]):
# assert torch.is_tensor(src_tokens)
dataset = self.task.build_dataset_for_inference(
src_tokens,
[x.numel() for x in src_tokens],
)
sample = dataset.collater(dataset)
sample = utils.apply_to_sample(lambda tensor: tensor.to(self.device), sample)
return sample
def generate(
self,
tokenized_sentences: List[torch.LongTensor],
*args,
inference_step_args=None,
skip_invalid_size_inputs=False,
**kwargs
) -> List[List[Dict[str, torch.Tensor]]]:
inference_step_args = inference_step_args or {}
if "prefix_tokens" in inference_step_args:
raise NotImplementedError("prefix generation not implemented for Transformer")
res = []
for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs):
src_tokens = batch["net_input"]["src_tokens"]
results = super().generate(
src_tokens,
*args,
inference_step_args=inference_step_args,
skip_invalid_size_inputs=skip_invalid_size_inputs,
**kwargs
)
for id, hypos in zip(batch["id"].tolist(), results):
res.append((id, hypos))
res = [hypos for _, hypos in sorted(res, key=lambda x: x[0])]
return res
def extract_features(
self, tokens: torch.LongTensor, return_all_hiddens: bool = False
) -> torch.Tensor:
if tokens.dim() == 1:
tokens = tokens.unsqueeze(0)
if tokens.size(-1) > min(self.model.max_positions()):
raise ValueError(
"tokens exceeds maximum length: {} > {}".format(
tokens.size(-1), self.model.max_positions()
)
)
tokens.to(device=self.device),
prev_output_tokens = tokens.clone()
prev_output_tokens[:, 0] = tokens.gather(
1,
(tokens.ne(self.task.source_dictionary.pad()).sum(dim=1) - 1).unsqueeze(-1),
).squeeze()
prev_output_tokens[:, 1:] = tokens[:, :-1]
features, extra = self.model(
src_tokens=tokens,
src_lengths=None,
prev_output_tokens=prev_output_tokens,
features_only=True,
return_all_hiddens=return_all_hiddens,
)
if return_all_hiddens:
# convert from T x B x C -> B x T x C
inner_states = extra["inner_states"]
return [inner_state.transpose(0, 1) for inner_state in inner_states]
else:
return features # just the last layer's features
def register_classification_head(
self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs
):
# self.model.register_classification_head(
# name, num_classes=num_classes, embedding_size=embedding_size, **kwargs
# )
raise NotImplementedError("Transformer does not support classfication")
def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False):
# if tokens.dim() == 1:
# tokens = tokens.unsqueeze(0)
# features = self.extract_features(tokens.to(device=self.device))
# sentence_representation = features[
# tokens.eq(self.task.source_dictionary.eos()), :
# ].view(features.size(0), -1, features.size(-1))[:, -1, :]
# logits = self.model.classification_heads[head](sentence_representation)
# if return_logits:
# return logits
# return F.log_softmax(logits, dim=-1)
raise NotImplementedError("Transformer does not support classfication")
def fill_mask(
self,
masked_inputs: List[str],
**generate_kwargs
):
raise NotImplementedError("Transformer does not support fill_mask")
@classmethod
def from_pretrained(
cls,
model_name_or_path,
**kwargs,
):
from fairseq import checkpoint_utils, file_utils
model_path = file_utils.load_archive_file(model_name_or_path)
config_path = os.path.join(model_path, "config.json")
if os.path.exists(config_path):
config = json.load(open(config_path, 'r'))
for key, value in config.items():
if key not in kwargs:
kwargs[key] = value
kwargs["data"] = model_path
# convenience hack for loading data and BPE codes from model archive
# if data_name_or_path.startswith("."):
# kwargs["data"] = os.path.abspath(os.path.join(model_path, data_name_or_path))
# else:
# kwargs["data"] = file_utils.load_archive_file(data_name_or_path)
for file, arg in {
"code": "bpe_codes",
"bpecodes": "bpe_codes",
"sentencepiece.bpe.model": "sentencepiece_model",
"merges.txt": "bpe_merges",
"vocab.json": "bpe_vocab",
}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
# utils.import_user_module(argparse.Namespace(user_dir=f"{os.path.dirname(os.path.abspath(__file__))}/../"))
models, args, task = checkpoint_utils.load_model_ensemble_and_task(
[os.path.join(model_path, kwargs['checkpoint_file'])],
arg_overrides=kwargs,
)
return TransformerHubInterface(args, task, models[0])
| 9,235 | 38.135593 | 143 | py |
null | DA-Transformer-main/fairseq/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import os
import sys
try:
from .version import __version__ # noqa
except ImportError:
version_txt = os.path.join(os.path.dirname(__file__), "version.txt")
with open(version_txt) as f:
__version__ = f.read().strip()
__all__ = ["pdb"]
# backwards compatibility to support `from fairseq.X import Y`
from fairseq.distributed import utils as distributed_utils
from fairseq.logging import meters, metrics, progress_bar # noqa
sys.modules["fairseq.distributed_utils"] = distributed_utils
sys.modules["fairseq.meters"] = meters
sys.modules["fairseq.metrics"] = metrics
sys.modules["fairseq.progress_bar"] = progress_bar
# initialize hydra
from fairseq.dataclass.initialize import hydra_init
hydra_init()
import fairseq.criterions # noqa
import fairseq.distributed # noqa
import fairseq.models # noqa
import fairseq.modules # noqa
import fairseq.optim # noqa
import fairseq.optim.lr_scheduler # noqa
import fairseq.pdb # noqa
import fairseq.scoring # noqa
import fairseq.tasks # noqa
import fairseq.token_generation_constraints # noqa
import fairseq.benchmark # noqa
import fairseq.model_parallel # noqa
| 1,337 | 28.086957 | 72 | py |
null | DA-Transformer-main/fairseq/binarizer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import typing as tp
from abc import ABC, abstractmethod
from collections import Counter
from dataclasses import dataclass
from multiprocessing import Pool
import torch
from fairseq.data import Dictionary, indexed_dataset
from fairseq.file_chunker_utils import Chunker, find_offsets
from fairseq.file_io import PathManager
from fairseq.tokenizer import tokenize_line
logger = logging.getLogger("binarizer")
@dataclass
class BinarizeSummary:
"""
Keep track of what's going on in the binarizer
"""
num_seq: int = 0
replaced: tp.Optional[Counter] = None
num_tok: int = 0
@property
def num_replaced(self) -> int:
if self.replaced is None:
return 0
return sum(self.replaced.values())
@property
def replaced_percent(self) -> float:
return 100 * self.num_replaced / self.num_tok
def __str__(self) -> str:
base = f"{self.num_seq} sents, {self.num_tok} tokens"
if self.replaced is None:
return base
return f"{base}, {self.replaced_percent:.3}% replaced"
def merge(self, other: "BinarizeSummary"):
replaced = None
if self.replaced is not None:
replaced = self.replaced
if other.replaced is not None:
if replaced is None:
replaced = other.replaced
else:
replaced += other.replaced
self.replaced = replaced
self.num_seq += other.num_seq
self.num_tok += other.num_tok
class Binarizer(ABC):
"""
a binarizer describes how to take a string and build a tensor out of it
"""
@abstractmethod
def binarize_line(
self,
line: str,
summary: BinarizeSummary,
) -> torch.IntTensor:
...
def _worker_prefix(output_prefix: str, worker_id: int):
return f"{output_prefix}.pt{worker_id}"
class FileBinarizer:
"""
An file binarizer can take a file, tokenize it, and binarize each line to a tensor
"""
@classmethod
def multiprocess_dataset(
cls,
input_file: str,
dataset_impl: str,
binarizer: Binarizer,
output_prefix: str,
vocab_size=None,
num_workers=1,
) -> BinarizeSummary:
final_summary = BinarizeSummary()
offsets = find_offsets(input_file, num_workers)
# find_offsets returns a list of position [pos1, pos2, pos3, pos4] but we would want pairs:
# [(pos1, pos2), (pos2, pos3), (pos3, pos4)] to process the chunks with start/end info
# we zip the list with itself shifted by one to get all the pairs.
(first_chunk, *more_chunks) = zip(offsets, offsets[1:])
pool = None
if num_workers > 1:
pool = Pool(processes=num_workers - 1)
worker_results = [
pool.apply_async(
cls._binarize_chunk_and_finalize,
args=(
binarizer,
input_file,
start_offset,
end_offset,
_worker_prefix(
output_prefix,
worker_id,
),
dataset_impl,
),
kwds={
"vocab_size": vocab_size,
}
if vocab_size is not None
else {},
)
for worker_id, (start_offset, end_offset) in enumerate(
more_chunks, start=1
)
]
pool.close()
pool.join()
for r in worker_results:
summ = r.get()
final_summary.merge(summ)
# do not close the bin file as we need to merge the worker results in
final_ds, summ = cls._binarize_file_chunk(
binarizer,
input_file,
offset_start=first_chunk[0],
offset_end=first_chunk[1],
output_prefix=output_prefix,
dataset_impl=dataset_impl,
vocab_size=vocab_size if vocab_size is not None else None,
)
final_summary.merge(summ)
if num_workers > 1:
for worker_id in range(1, num_workers):
# merge the worker outputs
worker_output_prefix = _worker_prefix(
output_prefix,
worker_id,
)
final_ds.merge_file_(worker_output_prefix)
try:
os.remove(indexed_dataset.data_file_path(worker_output_prefix))
os.remove(indexed_dataset.index_file_path(worker_output_prefix))
except Exception as e:
logger.error(
f"couldn't remove {worker_output_prefix}.*", exc_info=e
)
# now we can close the file
idx_file = indexed_dataset.index_file_path(output_prefix)
final_ds.finalize(idx_file)
return final_summary
@staticmethod
def _binarize_file_chunk(
binarizer: Binarizer,
filename: str,
offset_start: int,
offset_end: int,
output_prefix: str,
dataset_impl: str,
vocab_size=None,
) -> tp.Tuple[tp.Any, BinarizeSummary]: # (dataset builder, BinarizeSummary)
"""
creates a dataset builder and append binarized items to it. This function does not
finalize the builder, this is useful if you want to do other things with your bin file
like appending/merging other files
"""
bin_file = indexed_dataset.data_file_path(output_prefix)
ds = indexed_dataset.make_builder(
bin_file,
impl=dataset_impl,
vocab_size=vocab_size,
)
summary = BinarizeSummary()
with Chunker(
PathManager.get_local_path(filename), offset_start, offset_end
) as line_iterator:
for line in line_iterator:
ds.add_item(binarizer.binarize_line(line, summary))
return ds, summary
@classmethod
def _binarize_chunk_and_finalize(
cls,
binarizer: Binarizer,
filename: str,
offset_start: int,
offset_end: int,
output_prefix: str,
dataset_impl: str,
vocab_size=None,
):
"""
same as above, but also finalizes the builder
"""
ds, summ = cls._binarize_file_chunk(
binarizer,
filename,
offset_start,
offset_end,
output_prefix,
dataset_impl,
vocab_size=vocab_size,
)
idx_file = indexed_dataset.index_file_path(output_prefix)
ds.finalize(idx_file)
return summ
class VocabularyDatasetBinarizer(Binarizer):
"""
Takes a Dictionary/Vocabulary, assign ids to each
token using the dictionary encode_line function.
"""
def __init__(
self,
dict: Dictionary,
tokenize: tp.Callable[[str], tp.List[str]] = tokenize_line,
append_eos: bool = True,
reverse_order: bool = False,
already_numberized: bool = False,
) -> None:
self.dict = dict
self.tokenize = tokenize
self.append_eos = append_eos
self.reverse_order = reverse_order
self.already_numberized = already_numberized
super().__init__()
def binarize_line(
self,
line: str,
summary: BinarizeSummary,
):
if summary.replaced is None:
summary.replaced = Counter()
def replaced_consumer(word, idx):
if idx == self.dict.unk_index and word != self.dict.unk_word:
summary.replaced.update([word])
if self.already_numberized:
id_strings = line.strip().split()
id_list = [int(id_string) for id_string in id_strings]
if self.reverse_order:
id_list.reverse()
if self.append_eos:
id_list.append(self.dict.eos())
ids = torch.IntTensor(id_list)
else:
ids = self.dict.encode_line(
line=line,
line_tokenizer=self.tokenize,
add_if_not_exist=False,
consumer=replaced_consumer,
append_eos=self.append_eos,
reverse_order=self.reverse_order,
)
summary.num_seq += 1
summary.num_tok += len(ids)
return ids
class AlignmentDatasetBinarizer(Binarizer):
"""
binarize by parsing a set of alignments and packing
them in a tensor (see utils.parse_alignment)
"""
def __init__(
self,
alignment_parser: tp.Callable[[str], torch.IntTensor],
) -> None:
super().__init__()
self.alignment_parser = alignment_parser
def binarize_line(
self,
line: str,
summary: BinarizeSummary,
):
ids = self.alignment_parser(line)
summary.num_seq += 1
summary.num_tok += len(ids)
return ids
class LegacyBinarizer:
@classmethod
def binarize(
cls,
filename: str,
dico: Dictionary,
consumer: tp.Callable[[torch.IntTensor], None],
tokenize: tp.Callable[[str], tp.List[str]] = tokenize_line,
append_eos: bool = True,
reverse_order: bool = False,
offset: int = 0,
end: int = -1,
already_numberized: bool = False,
) -> tp.Dict[str, int]:
binarizer = VocabularyDatasetBinarizer(
dict=dico,
tokenize=tokenize,
append_eos=append_eos,
reverse_order=reverse_order,
already_numberized=already_numberized,
)
return cls._consume_file(
filename,
binarizer,
consumer,
offset_start=offset,
offset_end=end,
)
@classmethod
def binarize_alignments(
cls,
filename: str,
alignment_parser: tp.Callable[[str], torch.IntTensor],
consumer: tp.Callable[[torch.IntTensor], None],
offset: int = 0,
end: int = -1,
) -> tp.Dict[str, int]:
binarizer = AlignmentDatasetBinarizer(alignment_parser)
return cls._consume_file(
filename,
binarizer,
consumer,
offset_start=offset,
offset_end=end,
)
@staticmethod
def _consume_file(
filename: str,
binarizer: Binarizer,
consumer: tp.Callable[[torch.IntTensor], None],
offset_start: int,
offset_end: int,
) -> tp.Dict[str, int]:
summary = BinarizeSummary()
with Chunker(
PathManager.get_local_path(filename), offset_start, offset_end
) as line_iterator:
for line in line_iterator:
consumer(binarizer.binarize_line(line, summary))
return {
"nseq": summary.num_seq,
"nunk": summary.num_replaced,
"ntok": summary.num_tok,
"replaced": summary.replaced,
}
| 11,397 | 28.837696 | 99 | py |
null | DA-Transformer-main/fairseq/checkpoint_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ast
import collections
import contextlib
import inspect
import logging
import os
import re
import time
import traceback
from collections import OrderedDict
from pathlib import Path
from typing import Any, Dict, Optional, Union
import numpy as np
import torch
from fairseq.data import data_utils
from fairseq.dataclass.configs import CheckpointConfig
from fairseq.dataclass.utils import (
convert_namespace_to_omegaconf,
overwrite_args_by_name,
)
from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP
from fairseq.file_io import PathManager
from fairseq.models import FairseqDecoder, FairseqEncoder
from omegaconf import DictConfig, OmegaConf, open_dict
logger = logging.getLogger(__name__)
def save_checkpoint(cfg: CheckpointConfig, trainer, epoch_itr, val_loss):
from fairseq import meters
# only one worker should attempt to create the required dir
if trainer.data_parallel_rank == 0:
os.makedirs(cfg.save_dir, exist_ok=True)
prev_best = getattr(save_checkpoint, "best", val_loss)
if val_loss is not None:
best_function = max if cfg.maximize_best_checkpoint_metric else min
save_checkpoint.best = best_function(val_loss, prev_best)
if cfg.no_save:
return
trainer.consolidate_optimizer() # TODO(SS): do we need this if no_save_optimizer_state
if not trainer.should_save_checkpoint_on_current_rank:
if trainer.always_call_state_dict_during_save_checkpoint:
trainer.state_dict()
return
write_timer = meters.StopwatchMeter()
write_timer.start()
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
logger.info(f"Preparing to save checkpoint for epoch {epoch} @ {updates} updates")
def is_better(a, b):
return a >= b if cfg.maximize_best_checkpoint_metric else a <= b
suffix = trainer.checkpoint_suffix
checkpoint_conds = collections.OrderedDict()
checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = (
end_of_epoch and not cfg.no_epoch_checkpoints and epoch % cfg.save_interval == 0
)
checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = (
not end_of_epoch
and cfg.save_interval_updates > 0
and updates % cfg.save_interval_updates == 0
)
checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and (
not hasattr(save_checkpoint, "best")
or is_better(val_loss, save_checkpoint.best)
)
if val_loss is not None and cfg.keep_best_checkpoints > 0:
worst_best = getattr(save_checkpoint, "best", None)
chkpts = checkpoint_paths(
cfg.save_dir,
pattern=r"checkpoint\.best_{}_(\d+\.?\d*){}\.pt".format(
cfg.best_checkpoint_metric, suffix
),
)
if len(chkpts) > 0:
p = chkpts[-1] if cfg.maximize_best_checkpoint_metric else chkpts[0]
worst_best = float(p.rsplit("_")[-1].replace("{}.pt".format(suffix), ""))
# add random digits to resolve ties
with data_utils.numpy_seed(epoch, updates, val_loss):
rand_sfx = np.random.randint(0, cfg.keep_best_checkpoints)
checkpoint_conds[
"checkpoint.best_{}_{:.3f}{}{}.pt".format(
cfg.best_checkpoint_metric, val_loss, rand_sfx, suffix
)
] = worst_best is None or is_better(val_loss, worst_best)
checkpoint_conds[
"checkpoint_last{}.pt".format(suffix)
] = not cfg.no_last_checkpoints
extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss}
if hasattr(save_checkpoint, "best"):
extra_state.update({"best": save_checkpoint.best})
checkpoints = [
os.path.join(cfg.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond
]
if len(checkpoints) > 0 and trainer.should_save_checkpoint_on_current_rank:
trainer.save_checkpoint(checkpoints[0], extra_state)
for cp in checkpoints[1:]:
if cfg.write_checkpoints_asynchronously:
# TODO[ioPath]: Need to implement a delayed asynchronous
# file copying/moving feature.
logger.warning(
f"ioPath is not copying {checkpoints[0]} to {cp} "
"since async write mode is on."
)
else:
assert PathManager.copy(
checkpoints[0], cp, overwrite=True
), f"Failed to copy {checkpoints[0]} to {cp}"
write_timer.stop()
logger.info(
"Saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format(
checkpoints[0], epoch, updates, val_loss, write_timer.sum
)
)
if not end_of_epoch and cfg.keep_interval_updates > 0:
# remove old checkpoints; checkpoints are sorted in descending order
if cfg.keep_interval_updates_pattern == -1:
checkpoints = checkpoint_paths(
cfg.save_dir, pattern=r"checkpoint_\d+_(\d+){}\.pt".format(suffix)
)
else:
checkpoints = checkpoint_paths(
cfg.save_dir,
pattern=r"checkpoint_\d+_(\d+){}\.pt".format(suffix),
keep_match=True,
)
checkpoints = [
x[0]
for x in checkpoints
if x[1] % cfg.keep_interval_updates_pattern != 0
]
for old_chk in checkpoints[cfg.keep_interval_updates :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
elif PathManager.exists(old_chk):
PathManager.rm(old_chk)
if cfg.keep_last_epochs > 0:
# remove old epoch checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(
cfg.save_dir, pattern=r"checkpoint(\d+){}\.pt".format(suffix)
)
for old_chk in checkpoints[cfg.keep_last_epochs :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
elif PathManager.exists(old_chk):
PathManager.rm(old_chk)
if cfg.keep_best_checkpoints > 0:
# only keep the best N checkpoints according to validation metric
checkpoints = checkpoint_paths(
cfg.save_dir,
pattern=r"checkpoint\.best_{}_(\d+\.?\d*){}\.pt".format(
cfg.best_checkpoint_metric, suffix
),
)
if not cfg.maximize_best_checkpoint_metric:
checkpoints = checkpoints[::-1]
for old_chk in checkpoints[cfg.keep_best_checkpoints :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
elif PathManager.exists(old_chk):
PathManager.rm(old_chk)
def load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args):
"""
Load a checkpoint and restore the training iterator.
*passthrough_args* will be passed through to
``trainer.get_train_iterator``.
"""
reset_optimizer = cfg.reset_optimizer
reset_lr_scheduler = cfg.reset_lr_scheduler
optimizer_overrides = ast.literal_eval(cfg.optimizer_overrides)
reset_meters = cfg.reset_meters
reset_dataloader = cfg.reset_dataloader
if cfg.finetune_from_model is not None and (
reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader
):
raise ValueError(
"--finetune-from-model can not be set together with either --reset-optimizer"
" or reset_lr_scheduler or reset_meters or reset_dataloader"
)
suffix = trainer.checkpoint_suffix
if (
cfg.restore_file == "checkpoint_last.pt"
): # default value of restore_file is 'checkpoint_last.pt'
checkpoint_path = os.path.join(
cfg.save_dir, "checkpoint_last{}.pt".format(suffix)
)
first_launch = not PathManager.exists(checkpoint_path)
if first_launch and getattr(cfg, "continue_once", None) is not None:
checkpoint_path = cfg.continue_once
elif cfg.finetune_from_model is not None and first_launch:
# if there is no last checkpoint to restore, start the finetune from pretrained model
# else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc.
if PathManager.exists(cfg.finetune_from_model):
checkpoint_path = cfg.finetune_from_model
reset_optimizer = True
reset_lr_scheduler = True
reset_meters = True
reset_dataloader = True
logger.info(
f"loading pretrained model from {checkpoint_path}: "
"optimizer, lr scheduler, meters, dataloader will be reset"
)
else:
raise ValueError(
f"--funetune-from-model {cfg.finetune_from_model} does not exist"
)
elif suffix is not None:
checkpoint_path = cfg.restore_file.replace(".pt", suffix + ".pt")
else:
checkpoint_path = cfg.restore_file
if cfg.restore_file != "checkpoint_last.pt" and cfg.finetune_from_model:
raise ValueError(
"--finetune-from-model and --restore-file (non-default value) "
"can not be specified together: " + str(cfg)
)
extra_state = trainer.load_checkpoint(
checkpoint_path,
reset_optimizer,
reset_lr_scheduler,
optimizer_overrides,
reset_meters=reset_meters,
)
if (
extra_state is not None
and "best" in extra_state
and not reset_optimizer
and not reset_meters
):
save_checkpoint.best = extra_state["best"]
if extra_state is not None and not reset_dataloader:
# restore iterator from checkpoint
itr_state = extra_state["train_iterator"]
epoch_itr = trainer.get_train_iterator(
epoch=itr_state["epoch"], load_dataset=True, **passthrough_args
)
epoch_itr.load_state_dict(itr_state)
else:
epoch_itr = trainer.get_train_iterator(
epoch=1, load_dataset=True, **passthrough_args
)
trainer.lr_step(epoch_itr.epoch)
return extra_state, epoch_itr
def load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False):
"""Loads a checkpoint to CPU (with upgrading for backward compatibility).
If doing single-GPU training or if the checkpoint is only being loaded by at
most one process on each node (current default behavior is for only rank 0
to read the checkpoint from disk), load_on_all_ranks should be False to
avoid errors from torch.distributed not having been initialized or
torch.distributed.barrier() hanging.
If all processes on each node may be loading the checkpoint
simultaneously, load_on_all_ranks should be set to True to avoid I/O
conflicts.
There's currently no support for > 1 but < all processes loading the
checkpoint on each node.
"""
local_path = PathManager.get_local_path(path)
# The locally cached file returned by get_local_path() may be stale for
# remote files that are periodically updated/overwritten (ex:
# checkpoint_last.pt) - so we remove the local copy, sync across processes
# (if needed), and then download a fresh copy.
if local_path != path and PathManager.path_requires_pathmanager(path):
try:
os.remove(local_path)
except FileNotFoundError:
# With potentially multiple processes removing the same file, the
# file being missing is benign (missing_ok isn't available until
# Python 3.8).
pass
if load_on_all_ranks:
torch.distributed.barrier()
local_path = PathManager.get_local_path(path)
with open(local_path, "rb") as f:
state = torch.load(f, map_location=torch.device("cpu"))
if "args" in state and state["args"] is not None and arg_overrides is not None:
args = state["args"]
for arg_name, arg_val in arg_overrides.items():
setattr(args, arg_name, arg_val)
if "cfg" in state and state["cfg"] is not None:
# hack to be able to set Namespace in dict config. this should be removed when we update to newer
# omegaconf version that supports object flags, or when we migrate all existing models
from omegaconf import _utils
old_primitive = _utils.is_primitive_type
_utils.is_primitive_type = lambda _: True
state["cfg"] = OmegaConf.create(state["cfg"])
_utils.is_primitive_type = old_primitive
OmegaConf.set_struct(state["cfg"], True)
if arg_overrides is not None:
overwrite_args_by_name(state["cfg"], arg_overrides)
state = _upgrade_state_dict(state)
return state
def load_model_ensemble(
filenames,
arg_overrides: Optional[Dict[str, Any]] = None,
task=None,
strict=True,
suffix="",
num_shards=1,
state=None,
):
"""Loads an ensemble of models.
Args:
filenames (List[str]): checkpoint files to load
arg_overrides (Dict[str,Any], optional): override model args that
were used during model training
task (fairseq.tasks.FairseqTask, optional): task to use for loading
"""
assert not (
strict and num_shards > 1
), "Cannot load state dict with strict=True and checkpoint shards > 1"
ensemble, args, _task = load_model_ensemble_and_task(
filenames,
arg_overrides,
task,
strict,
suffix,
num_shards,
state,
)
return ensemble, args
def get_maybe_sharded_checkpoint_filename(
filename: str, suffix: str, shard_idx: int, num_shards: int
) -> str:
orig_filename = filename
filename = filename.replace(".pt", suffix + ".pt")
fsdp_filename = filename[:-3] + f"-shard{shard_idx}.pt"
model_parallel_filename = orig_filename[:-3] + f"_part{shard_idx}.pt"
if PathManager.exists(fsdp_filename):
return fsdp_filename
elif num_shards > 1:
return model_parallel_filename
else:
return filename
def load_model_ensemble_and_task(
filenames,
arg_overrides: Optional[Dict[str, Any]] = None,
task=None,
strict=True,
suffix="",
num_shards=1,
state=None,
):
assert state is None or len(filenames) == 1
from fairseq import tasks
assert not (
strict and num_shards > 1
), "Cannot load state dict with strict=True and checkpoint shards > 1"
ensemble = []
cfg = None
reload_task = task is not None
for filename in filenames:
orig_filename = filename
model_shard_state = {"shard_weights": [], "shard_metadata": []}
assert num_shards > 0
st = time.time()
for shard_idx in range(num_shards):
filename = get_maybe_sharded_checkpoint_filename(
orig_filename, suffix, shard_idx, num_shards
)
if not PathManager.exists(filename):
raise IOError("Model file not found: {}".format(filename))
if state is None:
state = load_checkpoint_to_cpu(filename, arg_overrides)
if "args" in state and state["args"] is not None:
cfg = convert_namespace_to_omegaconf(state["args"])
elif "cfg" in state and state["cfg"] is not None:
cfg = state["cfg"]
else:
raise RuntimeError(
f"Neither args nor cfg exist in state keys = {state.keys()}"
)
if task is None:
task = tasks.setup_task(cfg.task)
if "task_state" in state:
task.load_state_dict(state["task_state"])
if "fsdp_metadata" in state and num_shards > 1:
model_shard_state["shard_weights"].append(state["model"])
model_shard_state["shard_metadata"].append(state["fsdp_metadata"])
# check FSDP import before the code goes too far
if not has_FSDP:
raise ImportError(
"Cannot find FullyShardedDataParallel. "
"Please install fairscale with: pip install fairscale"
)
if shard_idx == num_shards - 1:
consolidated_model_state = FSDP.consolidate_shard_weights(
shard_weights=model_shard_state["shard_weights"],
shard_metadata=model_shard_state["shard_metadata"],
)
model = task.build_model(cfg.model)
if (
"optimizer_history" in state
and len(state["optimizer_history"]) > 0
and "num_updates" in state["optimizer_history"][-1]
):
model.set_num_updates(
state["optimizer_history"][-1]["num_updates"]
)
model.load_state_dict(
consolidated_model_state, strict=strict, model_cfg=cfg.model
)
else:
# model parallel checkpoint or unsharded checkpoint
# support old external tasks
argspec = inspect.getfullargspec(task.build_model)
if "from_checkpoint" in argspec.args and "reload_task" in argspec.args:
model = task.build_model(cfg.model, from_checkpoint=True, reload_task=reload_task)
else:
model = task.build_model(cfg.model)
if (
"optimizer_history" in state
and len(state["optimizer_history"]) > 0
and "num_updates" in state["optimizer_history"][-1]
):
model.set_num_updates(state["optimizer_history"][-1]["num_updates"])
model.load_state_dict(
state["model"], strict=strict, model_cfg=cfg.model
)
# reset state so it gets loaded for the next model in ensemble
state = None
if shard_idx % 10 == 0 and shard_idx > 0:
elapsed = time.time() - st
logger.info(
f"Loaded {shard_idx} shards in {elapsed:.2f}s, {elapsed / (shard_idx+1):.2f}s/shard"
)
# build model for ensemble
ensemble.append(model)
return ensemble, cfg, task
def load_model_ensemble_and_task_from_hf_hub(
model_id,
cache_dir: Optional[str] = None,
arg_overrides: Optional[Dict[str, Any]] = None,
**kwargs: Any,
):
try:
from huggingface_hub import snapshot_download
except ImportError:
raise ImportError(
"You need to install huggingface_hub to use `load_from_hf_hub`. "
"See https://pypi.org/project/huggingface-hub/ for installation."
)
library_name = "fairseq"
cache_dir = cache_dir or (Path.home() / ".cache" / library_name).as_posix()
cache_dir = snapshot_download(
model_id, cache_dir=cache_dir, library_name=library_name, **kwargs
)
_arg_overrides = arg_overrides or {}
_arg_overrides["data"] = cache_dir
return load_model_ensemble_and_task(
[p.as_posix() for p in Path(cache_dir).glob("*.pt")],
arg_overrides=_arg_overrides,
)
def checkpoint_paths(path, pattern=r"checkpoint(\d+)\.pt", keep_match=False):
"""Retrieves all checkpoints found in `path` directory.
Checkpoints are identified by matching filename to the specified pattern. If
the pattern contains groups, the result will be sorted by the first group in
descending order.
"""
pt_regexp = re.compile(pattern)
files = PathManager.ls(path)
entries = []
for i, f in enumerate(files):
m = pt_regexp.fullmatch(f)
if m is not None:
idx = float(m.group(1)) if len(m.groups()) > 0 else i
entries.append((idx, m.group(0)))
if keep_match:
return [(os.path.join(path, x[1]), x[0]) for x in sorted(entries, reverse=True)]
else:
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)]
def torch_persistent_save(obj, filename, async_write: bool = False):
if async_write:
with PathManager.opena(filename, "wb") as f:
_torch_persistent_save(obj, f)
else:
if PathManager.supports_rename(filename):
# do atomic save
with PathManager.open(filename + ".tmp", "wb") as f:
_torch_persistent_save(obj, f)
PathManager.rename(filename + ".tmp", filename)
else:
# fallback to non-atomic save
with PathManager.open(filename, "wb") as f:
_torch_persistent_save(obj, f)
def _torch_persistent_save(obj, f):
if isinstance(f, str):
with PathManager.open(f, "wb") as h:
torch_persistent_save(obj, h)
return
for i in range(3):
try:
return torch.save(obj, f)
except Exception:
if i == 2:
logger.error(traceback.format_exc())
raise
def _upgrade_state_dict(state):
"""Helper for upgrading old model checkpoints."""
# add optimizer_history
if "optimizer_history" not in state:
state["optimizer_history"] = [
{"criterion_name": "CrossEntropyCriterion", "best_loss": state["best_loss"]}
]
state["last_optimizer_state"] = state["optimizer"]
del state["optimizer"]
del state["best_loss"]
# move extra_state into sub-dictionary
if "epoch" in state and "extra_state" not in state:
state["extra_state"] = {
"epoch": state["epoch"],
"batch_offset": state["batch_offset"],
"val_loss": state["val_loss"],
}
del state["epoch"]
del state["batch_offset"]
del state["val_loss"]
# reduce optimizer history's memory usage (only keep the last state)
if "optimizer" in state["optimizer_history"][-1]:
state["last_optimizer_state"] = state["optimizer_history"][-1]["optimizer"]
for optim_hist in state["optimizer_history"]:
del optim_hist["optimizer"]
# record the optimizer class name
if "optimizer_name" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["optimizer_name"] = "FairseqNAG"
# move best_loss into lr_scheduler_state
if "lr_scheduler_state" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["lr_scheduler_state"] = {
"best": state["optimizer_history"][-1]["best_loss"]
}
del state["optimizer_history"][-1]["best_loss"]
# keep track of number of updates
if "num_updates" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["num_updates"] = 0
# use stateful training data iterator
if "train_iterator" not in state["extra_state"]:
state["extra_state"]["train_iterator"] = {
"epoch": state["extra_state"].get("epoch", 0),
"iterations_in_epoch": state["extra_state"].get("batch_offset", 0),
}
# backward compatibility, cfg updates
if "args" in state and state["args"] is not None:
# old model checkpoints may not have separate source/target positions
if hasattr(state["args"], "max_positions") and not hasattr(
state["args"], "max_source_positions"
):
state["args"].max_source_positions = state["args"].max_positions
state["args"].max_target_positions = state["args"].max_positions
# default to translation task
if not hasattr(state["args"], "task"):
state["args"].task = "translation"
# --raw-text and --lazy-load are deprecated
if getattr(state["args"], "raw_text", False):
state["args"].dataset_impl = "raw"
elif getattr(state["args"], "lazy_load", False):
state["args"].dataset_impl = "lazy"
# epochs start at 1
if state["extra_state"]["train_iterator"] is not None:
state["extra_state"]["train_iterator"]["epoch"] = max(
state["extra_state"]["train_iterator"].get("epoch", 1), 1
)
# --remove-bpe ==> --postprocess
if hasattr(state["args"], "remove_bpe"):
state["args"].post_process = state["args"].remove_bpe
# --min-lr ==> --stop-min-lr
if hasattr(state["args"], "min_lr"):
state["args"].stop_min_lr = state["args"].min_lr
del state["args"].min_lr
# binary_cross_entropy / kd_binary_cross_entropy => wav2vec criterion
if hasattr(state["args"], "criterion") and state["args"].criterion in [
"binary_cross_entropy",
"kd_binary_cross_entropy",
]:
state["args"].criterion = "wav2vec"
# remove log_keys if it's None (criteria will supply a default value of [])
if hasattr(state["args"], "log_keys") and state["args"].log_keys is None:
delattr(state["args"], "log_keys")
# speech_pretraining => audio pretraining
if (
hasattr(state["args"], "task")
and state["args"].task == "speech_pretraining"
):
state["args"].task = "audio_pretraining"
# audio_cpc => wav2vec
if hasattr(state["args"], "arch") and state["args"].arch == "audio_cpc":
state["args"].arch = "wav2vec"
# convert legacy float learning rate to List[float]
if hasattr(state["args"], "lr") and isinstance(state["args"].lr, float):
state["args"].lr = [state["args"].lr]
# convert task data arg to a string instead of List[string]
if (
hasattr(state["args"], "data")
and isinstance(state["args"].data, list)
and len(state["args"].data) > 0
):
state["args"].data = state["args"].data[0]
state["cfg"] = convert_namespace_to_omegaconf(state["args"])
if "cfg" in state and state["cfg"] is not None:
cfg = state["cfg"]
with open_dict(cfg):
# any upgrades for Hydra-based configs
if (
"task" in cfg
and "eval_wer_config" in cfg.task
and isinstance(cfg.task.eval_wer_config.print_alignment, bool)
):
cfg.task.eval_wer_config.print_alignment = "hard"
if "generation" in cfg and isinstance(cfg.generation.print_alignment, bool):
cfg.generation.print_alignment = (
"hard" if cfg.generation.print_alignment else None
)
if (
"model" in cfg
and "w2v_args" in cfg.model
and cfg.model.w2v_args is not None
and (
hasattr(cfg.model.w2v_args, "task") or "task" in cfg.model.w2v_args
)
and hasattr(cfg.model.w2v_args.task, "eval_wer_config")
and cfg.model.w2v_args.task.eval_wer_config is not None
and isinstance(
cfg.model.w2v_args.task.eval_wer_config.print_alignment, bool
)
):
cfg.model.w2v_args.task.eval_wer_config.print_alignment = "hard"
return state
def prune_state_dict(state_dict, model_cfg: Optional[DictConfig]):
"""Prune the given state_dict if desired for LayerDrop
(https://arxiv.org/abs/1909.11556).
Training with LayerDrop allows models to be robust to pruning at inference
time. This function prunes state_dict to allow smaller models to be loaded
from a larger model and re-maps the existing state_dict for this to occur.
It's called by functions that load models from checkpoints and does not
need to be called directly.
"""
arch = None
if model_cfg is not None:
arch = (
model_cfg._name
if isinstance(model_cfg, DictConfig)
else getattr(model_cfg, "arch", None)
)
if not model_cfg or arch is None or arch == "ptt_transformer":
# args should not be none, but don't crash if it is.
return state_dict
encoder_layers_to_keep = getattr(model_cfg, "encoder_layers_to_keep", None)
decoder_layers_to_keep = getattr(model_cfg, "decoder_layers_to_keep", None)
if not encoder_layers_to_keep and not decoder_layers_to_keep:
return state_dict
# apply pruning
logger.info(
"Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop"
)
def create_pruning_pass(layers_to_keep, layer_name):
keep_layers = sorted(
int(layer_string) for layer_string in layers_to_keep.split(",")
)
mapping_dict = {}
for i in range(len(keep_layers)):
mapping_dict[str(keep_layers[i])] = str(i)
regex = re.compile(r"^{layer}.*\.layers\.(\d+)".format(layer=layer_name))
return {"substitution_regex": regex, "mapping_dict": mapping_dict}
pruning_passes = []
if encoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, "encoder"))
if decoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, "decoder"))
new_state_dict = {}
for layer_name in state_dict.keys():
match = re.search(r"\.layers\.(\d+)\.", layer_name)
# if layer has no number in it, it is a supporting layer, such as an
# embedding
if not match:
new_state_dict[layer_name] = state_dict[layer_name]
continue
# otherwise, layer should be pruned.
original_layer_number = match.group(1)
# figure out which mapping dict to replace from
for pruning_pass in pruning_passes:
if original_layer_number in pruning_pass["mapping_dict"] and pruning_pass[
"substitution_regex"
].search(layer_name):
new_layer_number = pruning_pass["mapping_dict"][original_layer_number]
substitution_match = pruning_pass["substitution_regex"].search(
layer_name
)
new_state_key = (
layer_name[: substitution_match.start(1)]
+ new_layer_number
+ layer_name[substitution_match.end(1) :]
)
new_state_dict[new_state_key] = state_dict[layer_name]
# Since layers are now pruned, *_layers_to_keep are no longer needed.
# This is more of "It would make it work fix" rather than a proper fix.
if isinstance(model_cfg, DictConfig):
context = open_dict(model_cfg)
else:
context = contextlib.ExitStack()
with context:
if hasattr(model_cfg, "encoder_layers_to_keep"):
model_cfg.encoder_layers_to_keep = None
if hasattr(model_cfg, "decoder_layers_to_keep"):
model_cfg.decoder_layers_to_keep = None
return new_state_dict
def load_pretrained_component_from_model(
component: Union[FairseqEncoder, FairseqDecoder],
checkpoint: str,
strict: bool = True,
):
"""
Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the
provided `component` object. If state_dict fails to load, there may be a
mismatch in the architecture of the corresponding `component` found in the
`checkpoint` file.
"""
if not PathManager.exists(checkpoint):
raise IOError("Model file not found: {}".format(checkpoint))
state = load_checkpoint_to_cpu(checkpoint)
if isinstance(component, FairseqEncoder):
component_type = "encoder"
elif isinstance(component, FairseqDecoder):
component_type = "decoder"
else:
raise ValueError(
"component to load must be either a FairseqEncoder or "
"FairseqDecoder. Loading other component types are not supported."
)
component_state_dict = OrderedDict()
for key in state["model"].keys():
if key.startswith(component_type):
# encoder.input_layers.0.0.weight --> input_layers.0.0.weight
component_subkey = key[len(component_type) + 1 :]
component_state_dict[component_subkey] = state["model"][key]
component.load_state_dict(component_state_dict, strict=strict)
return component
def verify_checkpoint_directory(save_dir: str) -> None:
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
temp_file_path = os.path.join(save_dir, "dummy")
try:
with open(temp_file_path, "w"):
pass
except OSError as e:
logger.warning(
"Unable to access checkpoint save directory: {}".format(save_dir)
)
raise e
else:
os.remove(temp_file_path)
def save_ema_as_checkpoint(src_path, dst_path):
state = load_ema_from_checkpoint(src_path)
torch_persistent_save(state, dst_path)
def load_ema_from_checkpoint(fpath):
"""Loads exponential moving averaged (EMA) checkpoint from input and
returns a model with ema weights.
Args:
fpath: A string path of checkpoint to load from.
Returns:
A dict of string keys mapping to various values. The 'model' key
from the returned dict should correspond to an OrderedDict mapping
string parameter names to torch Tensors.
"""
params_dict = collections.OrderedDict()
new_state = None
with PathManager.open(fpath, "rb") as f:
new_state = torch.load(
f,
map_location=(
lambda s, _: torch.serialization.default_restore_location(s, "cpu")
),
)
# EMA model is stored in a separate "extra state"
model_params = new_state["extra_state"]["ema"]
for key in list(model_params.keys()):
p = model_params[key]
if isinstance(p, torch.HalfTensor):
p = p.float()
if key not in params_dict:
params_dict[key] = p.clone()
# NOTE: clone() is needed in case of p is a shared parameter
else:
raise ValueError("Key {} is repeated in EMA model params.".format(key))
if len(params_dict) == 0:
raise ValueError(
f"Input checkpoint path '{fpath}' does not contain "
"ema model weights, is this model trained with EMA?"
)
new_state["model"] = params_dict
return new_state
| 34,968 | 37.72536 | 114 | py |
null | DA-Transformer-main/fairseq/file_chunker_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import typing as tp
def _safe_readline(fd) -> str:
pos = fd.tell()
while True:
try:
return fd.readline()
except UnicodeDecodeError:
pos -= 1
fd.seek(pos) # search where this character begins
def find_offsets(filename: str, num_chunks: int) -> tp.List[int]:
"""
given a file and a number of chuncks, find the offsets in the file
to be able to chunk around full lines.
"""
with open(filename, "r", encoding="utf-8") as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_chunks
offsets = [0 for _ in range(num_chunks + 1)]
for i in range(1, num_chunks):
f.seek(chunk_size * i)
_safe_readline(f)
offsets[i] = f.tell()
offsets[-1] = size
return offsets
class ChunkLineIterator:
"""
Iterator to properly iterate over lines of a file chunck.
"""
def __init__(self, fd, start_offset: int, end_offset: int):
self._fd = fd
self._start_offset = start_offset
self._end_offset = end_offset
def __iter__(self) -> tp.Iterable[str]:
self._fd.seek(self._start_offset)
# next(f) breaks f.tell(), hence readline() must be used
line = _safe_readline(self._fd)
while line:
pos = self._fd.tell()
# f.tell() does not always give the byte position in the file
# sometimes it skips to a very large number
# it is unlikely that through a normal read we go from
# end bytes to end + 2**32 bytes (4 GB) and this makes it unlikely
# that the procedure breaks by the undeterministic behavior of
# f.tell()
if (
self._end_offset > 0
and pos > self._end_offset
and pos < self._end_offset + 2**32
):
break
yield line
line = self._fd.readline()
class Chunker:
"""
contextmanager to read a chunck of a file line by line.
"""
def __init__(self, path: str, start_offset: int, end_offset: int):
self.path = path
self.start_offset = start_offset
self.end_offset = end_offset
def __enter__(self) -> ChunkLineIterator:
self.fd = open(self.path, "r", encoding="utf-8")
return ChunkLineIterator(self.fd, self.start_offset, self.end_offset)
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.fd.close()
| 2,691 | 30.670588 | 78 | py |
null | DA-Transformer-main/fairseq/file_io.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import shutil
from typing import List, Optional
logger = logging.getLogger(__file__)
try:
from iopath.common.file_io import g_pathmgr as IOPathManager
try:
# [FB only - for now] AWS PathHandler for PathManager
from .fb_pathhandlers import S3PathHandler
IOPathManager.register_handler(S3PathHandler())
except KeyError:
logging.warning("S3PathHandler already registered.")
except ImportError:
logging.debug(
"S3PathHandler couldn't be imported. Either missing fb-only files, or boto3 module."
)
except ImportError:
IOPathManager = None
class PathManager:
"""
Wrapper for insulating OSS I/O (using Python builtin operations) from
iopath's PathManager abstraction (for transparently handling various
internal backends).
"""
@staticmethod
def open(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
if IOPathManager:
return IOPathManager.open(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
return open(
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
@staticmethod
def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool:
if IOPathManager:
return IOPathManager.copy(
src_path=src_path, dst_path=dst_path, overwrite=overwrite
)
return shutil.copyfile(src_path, dst_path)
@staticmethod
def get_local_path(path: str, **kwargs) -> str:
if IOPathManager:
return IOPathManager.get_local_path(path, **kwargs)
return path
@staticmethod
def exists(path: str) -> bool:
if IOPathManager:
return IOPathManager.exists(path)
return os.path.exists(path)
@staticmethod
def isfile(path: str) -> bool:
if IOPathManager:
return IOPathManager.isfile(path)
return os.path.isfile(path)
@staticmethod
def ls(path: str) -> List[str]:
if IOPathManager:
return IOPathManager.ls(path)
return os.listdir(path)
@staticmethod
def mkdirs(path: str) -> None:
if IOPathManager:
return IOPathManager.mkdirs(path)
os.makedirs(path, exist_ok=True)
@staticmethod
def rm(path: str) -> None:
if IOPathManager:
return IOPathManager.rm(path)
os.remove(path)
@staticmethod
def chmod(path: str, mode: int) -> None:
if not PathManager.path_requires_pathmanager(path):
os.chmod(path, mode)
@staticmethod
def register_handler(handler) -> None:
if IOPathManager:
return IOPathManager.register_handler(handler=handler)
@staticmethod
def copy_from_local(
local_path: str, dst_path: str, overwrite: bool = False, **kwargs
) -> None:
if IOPathManager:
return IOPathManager.copy_from_local(
local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs
)
return shutil.copyfile(local_path, dst_path)
@staticmethod
def path_requires_pathmanager(path: str) -> bool:
"""Do we require PathManager to access given path?"""
if IOPathManager:
for p in IOPathManager._path_handlers.keys():
if path.startswith(p):
return True
return False
@staticmethod
def supports_rename(path: str) -> bool:
# PathManager doesn't yet support renames
return not PathManager.path_requires_pathmanager(path)
@staticmethod
def rename(src: str, dst: str):
os.rename(src, dst)
"""
ioPath async PathManager methods:
"""
@staticmethod
def opena(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
"""
Return file descriptor with asynchronous write operations.
"""
global IOPathManager
if not IOPathManager:
logging.info("ioPath is initializing PathManager.")
try:
from iopath.common.file_io import PathManager
IOPathManager = PathManager()
except Exception:
logging.exception("Failed to initialize ioPath PathManager object.")
return IOPathManager.opena(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
@staticmethod
def async_close() -> bool:
"""
Wait for files to be written and clean up asynchronous PathManager.
NOTE: `PathManager.async_close()` must be called at the end of any
script that uses `PathManager.opena(...)`.
"""
global IOPathManager
if IOPathManager:
return IOPathManager.async_close()
return False
| 5,614 | 27.502538 | 96 | py |
null | DA-Transformer-main/fairseq/file_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Utilities for working with the local dataset cache.
This file is adapted from `AllenNLP <https://github.com/allenai/allennlp>`_.
and `huggingface <https://github.com/huggingface>`_.
"""
import fnmatch
import json
import logging
import os
import shutil
import tarfile
import tempfile
from functools import partial, wraps
from hashlib import sha256
from io import open
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv(
"TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")
)
)
default_cache_path = os.path.join(torch_cache_home, "pytorch_fairseq")
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_FAIRSEQ_CACHE = Path(os.getenv("PYTORCH_FAIRSEQ_CACHE", default_cache_path))
except (AttributeError, ImportError):
PYTORCH_FAIRSEQ_CACHE = os.getenv("PYTORCH_FAIRSEQ_CACHE", default_cache_path)
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def load_archive_file(archive_file):
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=None)
except EnvironmentError as err:
logger.info(err)
logger.info(
"Archive name '{}' was not found in archive name list. "
"We assumed '{}' was a path or URL but couldn't find any file "
"associated to this path or URL.".format(
archive_file,
archive_file,
)
)
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info(
"loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file
)
)
# Extract archive to temp dir and replace .tar.bz2 if necessary
tempdir = None
if not os.path.isdir(resolved_archive_file):
tempdir = tempfile.mkdtemp()
logger.info(
"extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir
)
)
ext = os.path.splitext(archive_file)[1][1:]
with tarfile.open(resolved_archive_file, "r:" + ext) as archive:
top_dir = os.path.commonprefix(archive.getnames())
archive.extractall(tempdir)
os.remove(resolved_archive_file)
shutil.move(os.path.join(tempdir, top_dir), resolved_archive_file)
shutil.rmtree(tempdir)
return resolved_archive_file
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the URL's, delimited
by a period.
"""
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def cached_path_from_pm(url_or_filename):
"""
Tries to cache the specified URL using PathManager class.
Returns the cached path if success otherwise failure.
"""
try:
from fairseq.file_io import PathManager
local_path = PathManager.get_local_path(url_or_filename)
return local_path
except Exception:
return None
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ("http", "https", "s3"):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif parsed.scheme == "hfhub":
from huggingface_hub import snapshot_download
return snapshot_download(repo_id=url_or_filename.split("//")[1])
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
cached_path = cached_path_from_pm(url_or_filename)
if cached_path:
return cached_path
# Something unknown
raise ValueError(
"unable to parse {} as a URL or as a local path".format(url_or_filename)
)
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
from botocore.exceptions import ClientError
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
import boto3
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
import boto3
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def request_wrap_timeout(func, url):
import requests
for attempt, timeout in enumerate([10, 20, 40, 60, 60]):
try:
return func(timeout=timeout)
except requests.exceptions.Timeout as e:
logger.warning(
"Request for %s timed-out (attempt %d). Retrying with a timeout of %d secs",
url,
attempt,
timeout,
exc_info=e,
)
continue
raise RuntimeError(f"Unable to fetch file {url}")
def http_get(url, temp_file):
import requests
from tqdm import tqdm
req = request_wrap_timeout(partial(requests.get, url, stream=True), url)
content_length = req.headers.get("Content-Length")
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
import requests
response = request_wrap_timeout(
partial(requests.head, url, allow_redirects=True), url
)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except RuntimeError:
etag = None
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + ".*")
matching_files = list(filter(lambda s: not s.endswith(".json"), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, "wb") as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
output_string = json.dumps(meta)
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
"""
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
"""
collection = set()
with open(filename, "r", encoding="utf-8") as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| 11,912 | 30.768 | 92 | py |
null | DA-Transformer-main/fairseq/hub_utils.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import logging
import os
from typing import Any, Dict, Iterator, List
import torch
from omegaconf import open_dict
from torch import nn
from fairseq import utils
from fairseq.data import encoders
logger = logging.getLogger(__name__)
def from_pretrained(
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
archive_map=None,
**kwargs
):
from fairseq import checkpoint_utils, file_utils
if archive_map is not None:
if model_name_or_path in archive_map:
model_name_or_path = archive_map[model_name_or_path]
if data_name_or_path is not None and data_name_or_path in archive_map:
data_name_or_path = archive_map[data_name_or_path]
# allow archive_map to set default arg_overrides (e.g., tokenizer, bpe)
# for each model
if isinstance(model_name_or_path, dict):
for k, v in model_name_or_path.items():
if k == "checkpoint_file":
checkpoint_file = v
elif (
k != "path"
# only set kwargs that don't already have overrides
and k not in kwargs
):
kwargs[k] = v
model_name_or_path = model_name_or_path["path"]
model_path = file_utils.load_archive_file(model_name_or_path)
# convenience hack for loading data and BPE codes from model archive
if data_name_or_path.startswith("."):
kwargs["data"] = os.path.abspath(os.path.join(model_path, data_name_or_path))
else:
kwargs["data"] = file_utils.load_archive_file(data_name_or_path)
for file, arg in {
"code": "bpe_codes",
"bpecodes": "bpe_codes",
"sentencepiece.bpe.model": "sentencepiece_model",
"merges.txt": "bpe_merges",
"vocab.json": "bpe_vocab",
}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
if "user_dir" in kwargs:
utils.import_user_module(argparse.Namespace(user_dir=kwargs["user_dir"]))
models, args, task = checkpoint_utils.load_model_ensemble_and_task(
[os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)],
arg_overrides=kwargs,
)
return {
"args": args,
"task": task,
"models": models,
}
class GeneratorHubInterface(nn.Module):
"""
PyTorch Hub interface for generating sequences from a pre-trained
translation or language model.
"""
def __init__(self, cfg, task, models):
super().__init__()
self.cfg = cfg
self.task = task
self.models = nn.ModuleList(models)
self.src_dict = task.source_dictionary
self.tgt_dict = task.target_dictionary
# optimize model for generation
for model in self.models:
model.prepare_for_inference_(cfg)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
self.align_dict = utils.load_align_dict(cfg.generation.replace_unk)
self.tokenizer = encoders.build_tokenizer(cfg.tokenizer)
self.bpe = encoders.build_bpe(cfg.bpe)
self.max_positions = utils.resolve_max_positions(
self.task.max_positions(), *[model.max_positions() for model in models]
)
# this is useful for determining the device
self.register_buffer("_float_tensor", torch.tensor([0], dtype=torch.float))
@property
def device(self):
return self._float_tensor.device
def translate(
self, sentences: List[str], beam: int = 5, verbose: bool = False, **kwargs
) -> List[str]:
return self.sample(sentences, beam, verbose, **kwargs)
def sample(
self, sentences: List[str], beam: int = 1, verbose: bool = False, **kwargs
) -> List[str]:
if isinstance(sentences, str):
return self.sample([sentences], beam=beam, verbose=verbose, **kwargs)[0]
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
batched_hypos = self.generate(tokenized_sentences, beam, verbose, **kwargs)
return [self.decode(hypos[0]["tokens"]) for hypos in batched_hypos]
def score(
self, sentences: List[str], replace_newline_with_eos: bool = False, **kwargs
):
if isinstance(sentences, str):
return self.score(
[sentences], replace_newline_with_eos=replace_newline_with_eos, **kwargs
)[0]
def encode(sentence):
if replace_newline_with_eos:
return torch.cat([self.encode(line) for line in sentence.splitlines()])
else:
return self.encode(sentence)
# NOTE: this doesn't support translation tasks currently
tokenized_sentences = [encode(sentence) for sentence in sentences]
return [
hypos[0]
for hypos in self.generate(
tokenized_sentences, score_reference=True, **kwargs
)
]
def generate(
self,
tokenized_sentences: List[torch.LongTensor],
beam: int = 5,
verbose: bool = False,
skip_invalid_size_inputs=False,
inference_step_args=None,
prefix_allowed_tokens_fn=None,
**kwargs
) -> List[List[Dict[str, torch.Tensor]]]:
if torch.is_tensor(tokenized_sentences) and tokenized_sentences.dim() == 1:
return self.generate(
tokenized_sentences.unsqueeze(0), beam=beam, verbose=verbose, **kwargs
)[0]
# build generator using current args as well as any kwargs
gen_args = copy.deepcopy(self.cfg.generation)
with open_dict(gen_args):
gen_args.beam = beam
for k, v in kwargs.items():
setattr(gen_args, k, v)
generator = self.task.build_generator(
self.models,
gen_args,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
)
inference_step_args = inference_step_args or {}
results = []
for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs):
batch = utils.apply_to_sample(lambda t: t.to(self.device), batch)
translations = self.task.inference_step(
generator, self.models, batch, **inference_step_args
)
for id, hypos in zip(batch["id"].tolist(), translations):
results.append((id, hypos))
# sort output to match input order
outputs = [hypos for _, hypos in sorted(results, key=lambda x: x[0])]
if verbose:
def getarg(name, default):
return getattr(gen_args, name, getattr(self.cfg, name, default))
for source_tokens, target_hypotheses in zip(tokenized_sentences, outputs):
src_str_with_unk = self.string(source_tokens)
logger.info("S\t{}".format(src_str_with_unk))
for hypo in target_hypotheses:
hypo_str = self.decode(hypo["tokens"])
logger.info("H\t{}\t{}".format(hypo["score"], hypo_str))
logger.info(
"P\t{}".format(
" ".join(
map(
lambda x: "{:.4f}".format(x),
hypo["positional_scores"].tolist(),
)
)
)
)
if hypo["alignment"] is not None and getarg(
"print_alignment", False
):
logger.info(
"A\t{}".format(
" ".join(
[
"{}-{}".format(src_idx, tgt_idx)
for src_idx, tgt_idx in hypo["alignment"]
]
)
)
)
return outputs
def encode(self, sentence: str) -> torch.LongTensor:
sentence = self.tokenize(sentence)
sentence = self.apply_bpe(sentence)
return self.binarize(sentence)
def decode(self, tokens: torch.LongTensor) -> str:
sentence = self.string(tokens)
sentence = self.remove_bpe(sentence)
return self.detokenize(sentence)
def tokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.encode(sentence)
return sentence
def detokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.decode(sentence)
return sentence
def apply_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.encode(sentence)
return sentence
def remove_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.decode(sentence)
return sentence
def binarize(self, sentence: str) -> torch.LongTensor:
return self.src_dict.encode_line(sentence, add_if_not_exist=False).long()
def string(self, tokens: torch.LongTensor) -> str:
return self.tgt_dict.string(tokens)
def _build_batches(
self, tokens: List[List[int]], skip_invalid_size_inputs: bool
) -> Iterator[Dict[str, Any]]:
lengths = torch.LongTensor([t.numel() for t in tokens])
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.build_dataset_for_inference(tokens, lengths),
max_tokens=self.cfg.dataset.max_tokens,
max_sentences=self.cfg.dataset.batch_size,
max_positions=self.max_positions,
ignore_invalid_inputs=skip_invalid_size_inputs,
disable_iterator_cache=True,
).next_epoch_itr(shuffle=False)
return batch_iterator
class BPEHubInterface(object):
"""PyTorch Hub interface for Byte-Pair Encoding (BPE)."""
def __init__(self, bpe, **kwargs):
super().__init__()
args = argparse.Namespace(bpe=bpe, **kwargs)
self.bpe = encoders.build_bpe(args)
assert self.bpe is not None
def encode(self, sentence: str) -> str:
return self.bpe.encode(sentence)
def decode(self, sentence: str) -> str:
return self.bpe.decode(sentence)
class TokenizerHubInterface(object):
"""PyTorch Hub interface for tokenization."""
def __init__(self, tokenizer, **kwargs):
super().__init__()
args = argparse.Namespace(tokenizer=tokenizer, **kwargs)
self.tokenizer = encoders.build_tokenizer(args)
assert self.tokenizer is not None
def encode(self, sentence: str) -> str:
return self.tokenizer.encode(sentence)
def decode(self, sentence: str) -> str:
return self.tokenizer.decode(sentence)
| 11,350 | 35.034921 | 88 | py |
null | DA-Transformer-main/fairseq/incremental_decoding_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import uuid
from typing import Dict, Optional
from torch import Tensor
class FairseqIncrementalState(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.init_incremental_state()
def init_incremental_state(self):
self._incremental_state_id = str(uuid.uuid4())
def _get_full_incremental_state_key(self, key: str) -> str:
return "{}.{}".format(self._incremental_state_id, key)
def get_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
full_key = self._get_full_incremental_state_key(key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = self._get_full_incremental_state_key(key)
incremental_state[full_key] = value
return incremental_state
def with_incremental_state(cls):
cls.__bases__ = (FairseqIncrementalState,) + tuple(
b for b in cls.__bases__ if b != FairseqIncrementalState
)
return cls
| 1,773 | 33.115385 | 76 | py |
null | DA-Transformer-main/fairseq/iterative_refinement_generator.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import namedtuple
import numpy as np
import torch
from fairseq import utils
DecoderOut = namedtuple(
"IterativeRefinementDecoderOut",
["output_tokens", "output_scores", "attn", "step", "max_step", "history"],
)
class IterativeRefinementGenerator(object):
def __init__(
self,
tgt_dict,
models=None,
eos_penalty=0.0,
max_iter=10,
max_ratio=2,
beam_size=1,
decoding_format=None,
retain_dropout=False,
adaptive=True,
retain_history=False,
reranking=False,
):
"""
Generates translations based on iterative refinement.
Args:
tgt_dict: target dictionary
eos_penalty: if > 0.0, it penalized early-stopping in decoding
max_iter: maximum number of refinement iterations
max_ratio: generate sequences of maximum length ax, where x is the source length
decoding_format: decoding mode in {'unigram', 'ensemble', 'vote', 'dp', 'bs'}
retain_dropout: retaining dropout in the inference
adaptive: decoding with early stop
"""
self.bos = tgt_dict.bos()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.eos_penalty = eos_penalty
self.max_iter = max_iter
self.max_ratio = max_ratio
self.beam_size = beam_size
self.reranking = reranking
self.decoding_format = decoding_format
self.retain_dropout = retain_dropout
self.retain_history = retain_history
self.adaptive = adaptive
self.models = models
def generate_batched_itr(
self,
data_itr,
maxlen_a=None,
maxlen_b=None,
cuda=False,
timer=None,
prefix_size=0,
):
"""Iterate over a batched dataset and yield individual translations.
Args:
maxlen_a/b: generate sequences of maximum length ax + b,
where x is the source sentence length.
cuda: use GPU for generation
timer: StopwatchMeter for timing generations.
"""
for sample in data_itr:
if "net_input" not in sample:
continue
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(
self.models,
sample,
prefix_tokens=sample["target"][:, :prefix_size]
if prefix_size > 0
else None,
)
if timer is not None:
timer.stop(sample["ntokens"])
for i, id in enumerate(sample["id"]):
# remove padding
src = utils.strip_pad(sample["net_input"]["src_tokens"][i, :], self.pad)
ref = utils.strip_pad(sample["target"][i, :], self.pad)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(self, models, sample, prefix_tokens=None, constraints=None):
if constraints is not None:
raise NotImplementedError(
"Constrained decoding with the IterativeRefinementGenerator is not supported"
)
# TODO: iterative refinement generator does not support ensemble for now.
if not self.retain_dropout:
for model in models:
model.eval()
model, reranker = models[0], None
if self.reranking:
assert len(models) > 1, "Assuming the last checkpoint is the reranker"
assert (
self.beam_size > 1
), "Reranking requires multiple translation for each example"
reranker = models[-1]
models = models[:-1]
if len(models) > 1 and hasattr(model, "enable_ensemble"):
assert model.allow_ensemble, "{} does not support ensembling".format(
model.__class__.__name__
)
model.enable_ensemble(models)
# TODO: better encoder inputs?
src_tokens = sample["net_input"]["src_tokens"]
src_lengths = sample["net_input"]["src_lengths"]
bsz, src_len = src_tokens.size()
# initialize
encoder_out = model.forward_encoder([src_tokens, src_lengths])
prev_decoder_out = model.initialize_output_tokens(encoder_out, src_tokens)
if self.beam_size > 1:
assert (
model.allow_length_beam
), "{} does not support decoding with length beam.".format(
model.__class__.__name__
)
# regenerate data based on length-beam
length_beam_order = (
utils.new_arange(src_tokens, self.beam_size, bsz).t().reshape(-1)
)
encoder_out = model.encoder.reorder_encoder_out(
encoder_out, length_beam_order
)
prev_decoder_out = model.regenerate_length_beam(
prev_decoder_out, self.beam_size
)
bsz = bsz * self.beam_size
sent_idxs = torch.arange(bsz)
prev_output_tokens = prev_decoder_out.output_tokens.clone()
if self.retain_history:
prev_decoder_out = prev_decoder_out._replace(history=[prev_output_tokens])
finalized = [[] for _ in range(bsz)]
def is_a_loop(x, y, s, a):
b, l_x, l_y = x.size(0), x.size(1), y.size(1)
if l_x > l_y:
y = torch.cat([y, x.new_zeros(b, l_x - l_y).fill_(self.pad)], 1)
s = torch.cat([s, s.new_zeros(b, l_x - l_y)], 1)
if a is not None:
a = torch.cat([a, a.new_zeros(b, l_x - l_y, a.size(2))], 1)
elif l_x < l_y:
x = torch.cat([x, y.new_zeros(b, l_y - l_x).fill_(self.pad)], 1)
return (x == y).all(1), y, s, a
def finalized_hypos(step, prev_out_token, prev_out_score, prev_out_attn):
cutoff = prev_out_token.ne(self.pad)
tokens = prev_out_token[cutoff]
if prev_out_score is None:
scores, score = None, None
else:
scores = prev_out_score[cutoff]
score = scores.mean()
if prev_out_attn is None:
hypo_attn, alignment = None, None
else:
hypo_attn = prev_out_attn[cutoff]
alignment = hypo_attn.max(dim=1)[1]
return {
"steps": step,
"tokens": tokens,
"positional_scores": scores,
"score": score,
"hypo_attn": hypo_attn,
"alignment": alignment,
}
for step in range(self.max_iter + 1):
decoder_options = {
"eos_penalty": self.eos_penalty,
"max_ratio": self.max_ratio,
"decoding_format": self.decoding_format,
}
prev_decoder_out = prev_decoder_out._replace(
step=step,
max_step=self.max_iter + 1,
)
decoder_out = model.forward_decoder(
prev_decoder_out, encoder_out, **decoder_options
)
if self.adaptive:
# terminate if there is a loop
terminated, out_tokens, out_scores, out_attn = is_a_loop(
prev_output_tokens,
decoder_out.output_tokens,
decoder_out.output_scores,
decoder_out.attn,
)
decoder_out = decoder_out._replace(
output_tokens=out_tokens,
output_scores=out_scores,
attn=out_attn,
)
else:
terminated = decoder_out.output_tokens.new_zeros(
decoder_out.output_tokens.size(0)
).bool()
if step == self.max_iter: # reach last iteration, terminate
terminated.fill_(1)
# collect finalized sentences
finalized_idxs = sent_idxs[terminated]
finalized_tokens = decoder_out.output_tokens[terminated]
finalized_scores = decoder_out.output_scores[terminated]
finalized_attn = (
None
if (decoder_out.attn is None or decoder_out.attn.size(0) == 0)
else decoder_out.attn[terminated]
)
if self.retain_history:
finalized_history_tokens = [h[terminated] for h in decoder_out.history]
for i in range(finalized_idxs.size(0)):
finalized[finalized_idxs[i]] = [
finalized_hypos(
step,
finalized_tokens[i],
finalized_scores[i],
None if finalized_attn is None else finalized_attn[i],
)
]
if self.retain_history:
finalized[finalized_idxs[i]][0]["history"] = []
for j in range(len(finalized_history_tokens)):
finalized[finalized_idxs[i]][0]["history"].append(
finalized_hypos(
step, finalized_history_tokens[j][i], None, None
)
)
# check if all terminated
if terminated.sum() == terminated.size(0):
break
# for next step
not_terminated = ~terminated
prev_decoder_out = decoder_out._replace(
output_tokens=decoder_out.output_tokens[not_terminated],
output_scores=decoder_out.output_scores[not_terminated],
attn=decoder_out.attn[not_terminated]
if (decoder_out.attn is not None and decoder_out.attn.size(0) > 0)
else None,
history=[h[not_terminated] for h in decoder_out.history]
if decoder_out.history is not None
else None,
)
encoder_out = model.encoder.reorder_encoder_out(
encoder_out, not_terminated.nonzero(as_tuple=False).squeeze()
)
sent_idxs = sent_idxs[not_terminated]
prev_output_tokens = prev_decoder_out.output_tokens.clone()
if self.beam_size > 1:
if reranker is not None:
finalized = self.rerank(
reranker, finalized, [src_tokens, src_lengths], self.beam_size
)
# aggregate information from length beam
finalized = [
finalized[
np.argmax(
[
finalized[self.beam_size * i + j][0]["score"]
for j in range(self.beam_size)
]
)
+ self.beam_size * i
]
for i in range(len(finalized) // self.beam_size)
]
return finalized
def rerank(self, reranker, finalized, encoder_input, beam_size):
def rebuild_batch(finalized):
finalized_tokens = [f[0]["tokens"] for f in finalized]
finalized_maxlen = max(f.size(0) for f in finalized_tokens)
final_output_tokens = (
finalized_tokens[0]
.new_zeros(len(finalized_tokens), finalized_maxlen)
.fill_(self.pad)
)
for i, f in enumerate(finalized_tokens):
final_output_tokens[i, : f.size(0)] = f
return final_output_tokens
final_output_tokens = rebuild_batch(finalized)
final_output_tokens[
:, 0
] = self.eos # autoregressive model assumes starting with EOS
reranker_encoder_out = reranker.encoder(*encoder_input)
length_beam_order = (
utils.new_arange(
final_output_tokens, beam_size, reranker_encoder_out.encoder_out.size(1)
)
.t()
.reshape(-1)
)
reranker_encoder_out = reranker.encoder.reorder_encoder_out(
reranker_encoder_out, length_beam_order
)
reranking_scores = reranker.get_normalized_probs(
reranker.decoder(final_output_tokens[:, :-1], reranker_encoder_out),
True,
None,
)
reranking_scores = reranking_scores.gather(2, final_output_tokens[:, 1:, None])
reranking_masks = final_output_tokens[:, 1:].ne(self.pad)
reranking_scores = (
reranking_scores[:, :, 0].masked_fill_(~reranking_masks, 0).sum(1)
)
reranking_scores = reranking_scores / reranking_masks.sum(1).type_as(
reranking_scores
)
for i in range(len(finalized)):
finalized[i][0]["score"] = reranking_scores[i]
return finalized
| 13,238 | 35.775 | 93 | py |
null | DA-Transformer-main/fairseq/nan_detector.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
logger = logging.getLogger(__name__)
class NanDetector:
"""
Detects the first NaN or Inf in forward and/or backward pass and logs, together with the module name
"""
def __init__(self, model, forward=True, backward=True):
self.bhooks = []
self.fhooks = []
self.forward = forward
self.backward = backward
self.named_parameters = list(model.named_parameters())
self.reset()
for name, mod in model.named_modules():
mod.__module_name = name
self.add_hooks(mod)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
# Dump out all model gnorms to enable better debugging
norm = {}
gradients = {}
for name, param in self.named_parameters:
if param.grad is not None:
grad_norm = torch.norm(param.grad.data.float(), p=2)
norm[name] = grad_norm.item()
if torch.isnan(grad_norm).any() or torch.isinf(grad_norm).any():
gradients[name] = param.grad.data
if len(gradients) > 0:
logger.info("Detected nan/inf grad norm, dumping norms...")
logger.info(f"norms: {norm}")
logger.info(f"gradients: {gradients}")
self.close()
def add_hooks(self, module):
if self.forward:
self.fhooks.append(module.register_forward_hook(self.fhook_fn))
if self.backward:
self.bhooks.append(module.register_backward_hook(self.bhook_fn))
def reset(self):
self.has_printed_f = False
self.has_printed_b = False
def _detect(self, tensor, name, backward):
err = None
if (
torch.is_floating_point(tensor)
# single value tensors (like the loss) will not provide much info
and tensor.numel() >= 2
):
with torch.no_grad():
if torch.isnan(tensor).any():
err = "NaN"
elif torch.isinf(tensor).any():
err = "Inf"
if err is not None:
err = f"{err} detected in output of {name}, shape: {tensor.shape}, {'backward' if backward else 'forward'}"
return err
def _apply(self, module, inp, x, backward):
if torch.is_tensor(x):
if isinstance(inp, tuple) and len(inp) > 0:
inp = inp[0]
err = self._detect(x, module.__module_name, backward)
if err is not None:
if torch.is_tensor(inp) and not backward:
err += (
f" input max: {inp.max().item()}, input min: {inp.min().item()}"
)
has_printed_attr = "has_printed_b" if backward else "has_printed_f"
logger.warning(err)
setattr(self, has_printed_attr, True)
elif isinstance(x, dict):
for v in x.values():
self._apply(module, inp, v, backward)
elif isinstance(x, list) or isinstance(x, tuple):
for v in x:
self._apply(module, inp, v, backward)
def fhook_fn(self, module, inp, output):
if not self.has_printed_f:
self._apply(module, inp, output, backward=False)
def bhook_fn(self, module, inp, output):
if not self.has_printed_b:
self._apply(module, inp, output, backward=True)
def close(self):
for hook in self.fhooks + self.bhooks:
hook.remove()
| 3,742 | 33.33945 | 119 | py |
null | DA-Transformer-main/fairseq/ngram_repeat_block.py | # Originally from Microsoft Corporation.
# Licensed under the MIT License.
""" Wrapper for ngram_repeat_block cuda extension """
import math
import warnings
from typing import Dict, List, Optional
import torch
from torch import nn
try:
from fairseq import ngram_repeat_block_cuda
EXTENSION_BUILT = True
except ImportError:
EXTENSION_BUILT = False
def is_cuda_extension_usable() -> bool:
"""Check whether ngram_repeat_block_cuda is built properly"""
if not EXTENSION_BUILT or not torch.cuda.is_available():
return False
bsz = 2
tokens = torch.tensor([[4, 4, 3, 2], [1, 2, 3, 4]], dtype=torch.long, device="cuda")
lprobs = torch.rand((8, 12), device="cuda")
try:
outputs = ngram_repeat_block_cuda.forward(tokens, lprobs, bsz, 3, 4, 3)
outputs = outputs + 4 # This line breaks if the extension is built incorrectly.
return True
except RuntimeError:
warnings.warn(
"NGramRepeatBlock extension must be rebuilt."
'Run TORCH_CUDA_ARCH_LIST="6.0;6.1;7.0" python setup.py build_ext --inplace'
)
return False
class NGramRepeatBlock(nn.Module):
"""Wrapper class for calling ngram_repeat_block cuda extension"""
def __init__(self, no_repeat_ngram_size: int, use_extension: bool = True):
super().__init__()
self.use_extension = is_cuda_extension_usable() if use_extension else False
self.no_repeat_ngram_size = no_repeat_ngram_size
def reset_parameters(self):
pass
@torch.jit.unused
def call_cuda_extension(
self,
tokens,
lprobs,
bsz: int,
beam_size: int,
step: int,
):
return ngram_repeat_block_cuda.forward(
tokens, lprobs, bsz, step, beam_size, self.no_repeat_ngram_size
)
def forward(
self,
tokens,
lprobs,
bsz: int,
beam_size: int,
step: int,
):
"""
Args:
tokens(Tensor): Input tokens(Bsz*beam, seq_len)
lprobs(Tensor): likelihood probability,
Expected to be updated in place.(Bsz*beam, vocab_size)
bsz(int): batch size
step(int): current step
beam_size(int): beam size
no_repeat_ngram_size(int): Ngram size
"""
msg = f"expected {bsz *beam_size} got"
assert tokens.size(0) == bsz * beam_size, f"{msg} {tokens.size(0)}"
assert lprobs.size(0) == bsz * beam_size, f"{msg} {lprobs.size(0)}"
if self.use_extension:
return self.call_cuda_extension(tokens, lprobs, bsz, beam_size, step)
else:
return self._no_repeat_ngram(
tokens,
lprobs,
bsz,
beam_size,
step,
)
def _no_repeat_ngram(self, tokens, lprobs, bsz: int, beam_size: int, step: int):
"""For each hypothesis generate a list of previous ngrams and set associated lprobs to -inf"""
gen_ngrams: List[Dict[str, List[int]]] = [
torch.jit.annotate(Dict[str, List[int]], {})
for bbsz_idx in range(bsz * beam_size)
]
cpu_tokens = tokens.cpu()
for bbsz_idx in range(bsz * beam_size):
gen_tokens: List[int] = cpu_tokens[bbsz_idx].tolist()
for ngram in self.transpose_list(
[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]
):
key = ",".join([str(x) for x in ngram[:-1]])
gen_ngrams[bbsz_idx][key] = gen_ngrams[bbsz_idx].get(
key, torch.jit.annotate(List[int], [])
) + [ngram[-1]]
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
banned_tokens = [
self.calculate_banned_tokens(
tokens, step, gen_ngrams, self.no_repeat_ngram_size, bbsz_idx
)
for bbsz_idx in range(bsz * beam_size)
]
else:
banned_tokens = [
torch.jit.annotate(List[int], []) for bbsz_idx in range(bsz * beam_size)
]
for bbsz_idx in range(bsz * beam_size):
lprobs[bbsz_idx][
torch.tensor(banned_tokens[bbsz_idx], dtype=torch.int64)
] = torch.tensor(-math.inf).to(lprobs)
return lprobs
@staticmethod
def calculate_banned_tokens(
tokens,
step: int,
gen_ngrams: List[Dict[str, List[int]]],
no_repeat_ngram_size: int,
bbsz_idx: int,
):
tokens_list: List[int] = tokens[
bbsz_idx, step + 2 - no_repeat_ngram_size : step + 1
].tolist()
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = ",".join([str(x) for x in tokens_list])
return gen_ngrams[bbsz_idx].get(ngram_index, torch.jit.annotate(List[int], []))
@staticmethod
def transpose_list(l: List[List[int]]):
# GeneratorExp aren't supported in TS so ignoring the lint
min_len = min([len(x) for x in l]) # noqa
l2 = [[row[i] for row in l] for i in range(min_len)]
return l2
| 5,286 | 34.013245 | 102 | py |
null | DA-Transformer-main/fairseq/options.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from pathlib import Path
from typing import Callable, List, Optional, Union
import torch
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
EvalLMConfig,
GenerationConfig,
InteractiveConfig,
OptimizationConfig,
EMAConfig,
)
from fairseq.dataclass.utils import gen_parser_from_dataclass
# this import is for backward compatibility
from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list # noqa
def get_preprocessing_parser(default_task="translation"):
parser = get_parser("Preprocessing", default_task)
add_preprocess_args(parser)
return parser
def get_training_parser(default_task="translation"):
parser = get_parser("Trainer", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
add_ema_args(parser)
return parser
def get_generation_parser(interactive=False, default_task="translation"):
parser = get_parser("Generation", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_generation_args(parser)
add_checkpoint_args(parser)
if interactive:
add_interactive_args(parser)
return parser
def get_speech_generation_parser(default_task="text_to_speech"):
parser = get_parser("Speech Generation", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_speech_generation_args(parser)
return parser
def get_interactive_generation_parser(default_task="translation"):
return get_generation_parser(interactive=True, default_task=default_task)
def get_eval_lm_parser(default_task="language_modeling"):
parser = get_parser("Evaluate Language Model", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_eval_lm_args(parser)
return parser
def get_validation_parser(default_task=None):
parser = get_parser("Validation", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser, default_world_size=1)
group = parser.add_argument_group("Evaluation")
gen_parser_from_dataclass(group, CommonEvalConfig())
return parser
def parse_args_and_arch(
parser: argparse.ArgumentParser,
input_args: List[str] = None,
parse_known: bool = False,
suppress_defaults: bool = False,
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None,
):
"""
Args:
parser (ArgumentParser): the parser
input_args (List[str]): strings to parse, defaults to sys.argv
parse_known (bool): only parse known arguments, similar to
`ArgumentParser.parse_known_args`
suppress_defaults (bool): parse while ignoring all default values
modify_parser (Optional[Callable[[ArgumentParser], None]]):
function to modify the parser, e.g., to set default values
"""
if suppress_defaults:
# Parse args without any default values. This requires us to parse
# twice, once to identify all the necessary task/model args, and a second
# time with all defaults set to None.
args = parse_args_and_arch(
parser,
input_args=input_args,
parse_known=parse_known,
suppress_defaults=False,
)
if parse_known:
args, extra = args
suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser], allow_abbrev=False)
suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})
if parse_known:
args, extra = suppressed_parser.parse_known_args(input_args)
args = argparse.Namespace(
**{k: v for k, v in vars(args).items() if v is not None}
)
return args, extra
else:
args = suppressed_parser.parse_args(input_args)
args = argparse.Namespace(
**{k: v for k, v in vars(args).items() if v is not None}
)
return args
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args(input_args)
utils.import_user_module(usr_args)
if modify_parser is not None:
modify_parser(parser)
# The parser doesn't know about model/criterion/optimizer-specific args, so
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, "arch"):
model_specific_group = parser.add_argument_group(
"Model-specific configuration",
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
if args.arch in ARCH_MODEL_REGISTRY:
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
elif args.arch in MODEL_REGISTRY:
MODEL_REGISTRY[args.arch].add_args(model_specific_group)
else:
raise RuntimeError()
if hasattr(args, "task"):
from fairseq.tasks import TASK_REGISTRY
TASK_REGISTRY[args.task].add_args(parser)
if getattr(args, "use_bmuf", False):
# hack to support extra args for block distributed data parallelism
from fairseq.optim.bmuf import FairseqBMUF
FairseqBMUF.add_args(parser)
# Add *-specific args to parser.
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
if hasattr(cls, "add_args"):
cls.add_args(parser)
elif hasattr(cls, "__dataclass"):
gen_parser_from_dataclass(parser, cls.__dataclass())
# Modify the parser a second time, since defaults may have been reset
if modify_parser is not None:
modify_parser(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Post-process args.
if (
hasattr(args, "batch_size_valid") and args.batch_size_valid is None
) or not hasattr(args, "batch_size_valid"):
args.batch_size_valid = args.batch_size
if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None:
args.max_tokens_valid = args.max_tokens
if getattr(args, "memory_efficient_fp16", False):
args.fp16 = True
if getattr(args, "memory_efficient_bf16", False):
args.bf16 = True
args.tpu = getattr(args, "tpu", False)
args.bf16 = getattr(args, "bf16", False)
if args.bf16:
args.tpu = True
if args.tpu and args.fp16:
raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs")
if getattr(args, "seed", None) is None:
args.seed = 1 # default seed for training
args.no_seed_provided = True
else:
args.no_seed_provided = False
if getattr(args, "update_epoch_batch_itr", None) is None:
if hasattr(args, "grouped_shuffling"):
args.update_epoch_batch_itr = args.grouped_shuffling
else:
args.grouped_shuffling = False
args.update_epoch_batch_itr = False
# Apply architecture configuration.
if hasattr(args, "arch") and args.arch in ARCH_CONFIG_REGISTRY:
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args
def get_parser(desc, default_task="translation"):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
gen_parser_from_dataclass(parser, CommonConfig())
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
parser.add_argument(
"--" + registry_name.replace("_", "-"),
default=REGISTRY["default"],
choices=REGISTRY["registry"].keys(),
)
# Task definitions can be found under fairseq/tasks/
from fairseq.tasks import TASK_REGISTRY
parser.add_argument(
"--task",
metavar="TASK",
default=default_task,
choices=TASK_REGISTRY.keys(),
help="task",
)
# fmt: on
return parser
def add_preprocess_args(parser):
group = parser.add_argument_group("Preprocessing")
# fmt: off
group.add_argument("-s", "--source-lang", default=None, metavar="SRC",
help="source language")
group.add_argument("-t", "--target-lang", default=None, metavar="TARGET",
help="target language")
group.add_argument("--trainpref", metavar="FP", default=None,
help="train file prefix (also used to build dictionaries)")
group.add_argument("--validpref", metavar="FP", default=None,
help="comma separated, valid file prefixes "
"(words missing from train set are replaced with <unk>)")
group.add_argument("--testpref", metavar="FP", default=None,
help="comma separated, test file prefixes "
"(words missing from train set are replaced with <unk>)")
group.add_argument("--align-suffix", metavar="FP", default=None,
help="alignment file suffix")
group.add_argument("--destdir", metavar="DIR", default="data-bin",
help="destination dir")
group.add_argument("--thresholdtgt", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--thresholdsrc", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--tgtdict", metavar="FP",
help="reuse given target dictionary")
group.add_argument("--srcdict", metavar="FP",
help="reuse given source dictionary")
group.add_argument("--nwordstgt", metavar="N", default=-1, type=int,
help="number of target words to retain")
group.add_argument("--nwordssrc", metavar="N", default=-1, type=int,
help="number of source words to retain")
group.add_argument("--alignfile", metavar="ALIGN", default=None,
help="an alignment file (optional)")
parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap',
choices=get_available_dataset_impl(),
help='output dataset implementation')
group.add_argument("--joined-dictionary", action="store_true",
help="Generate joined dictionary")
group.add_argument("--only-source", action="store_true",
help="Only process the source language")
group.add_argument("--padding-factor", metavar="N", default=8, type=int,
help="Pad dictionary size to be multiple of N")
group.add_argument("--workers", metavar="N", default=1, type=int,
help="number of parallel workers")
group.add_argument("--dict-only", action='store_true',
help="if true, only builds a dictionary and then exits")
# fmt: on
return parser
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group("dataset_data_loading")
gen_parser_from_dataclass(group, DatasetConfig())
# fmt: on
return group
def add_distributed_training_args(parser, default_world_size=None):
group = parser.add_argument_group("distributed_training")
if default_world_size is None:
default_world_size = max(1, torch.cuda.device_count())
gen_parser_from_dataclass(
group, DistributedTrainingConfig(distributed_world_size=default_world_size)
)
return group
def add_optimization_args(parser):
group = parser.add_argument_group("optimization")
# fmt: off
gen_parser_from_dataclass(group, OptimizationConfig())
# fmt: on
return group
def add_checkpoint_args(parser):
group = parser.add_argument_group("checkpoint")
# fmt: off
gen_parser_from_dataclass(group, CheckpointConfig())
# fmt: on
return group
def add_common_eval_args(group):
gen_parser_from_dataclass(group, CommonEvalConfig())
def add_eval_lm_args(parser):
group = parser.add_argument_group("LM Evaluation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, EvalLMConfig())
def add_generation_args(parser):
group = parser.add_argument_group("Generation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, GenerationConfig())
return group
def add_speech_generation_args(parser):
group = parser.add_argument_group("Speech Generation")
add_common_eval_args(group) # NOTE: remove_bpe is not needed
# fmt: off
group.add_argument('--eos_prob_threshold', default=0.5, type=float,
help='terminate when eos probability exceeds this')
# fmt: on
return group
def add_interactive_args(parser):
group = parser.add_argument_group("Interactive")
gen_parser_from_dataclass(group, InteractiveConfig())
def add_model_args(parser):
group = parser.add_argument_group("Model configuration")
# fmt: off
# Model definitions can be found under fairseq/models/
#
# The model architecture can be specified in several ways.
# In increasing order of priority:
# 1) model defaults (lowest priority)
# 2) --arch argument
# 3) --encoder/decoder-* arguments (highest priority)
from fairseq.models import ARCH_MODEL_REGISTRY
group.add_argument('--arch', '-a', metavar='ARCH',
choices=ARCH_MODEL_REGISTRY.keys(),
help='model architecture')
# fmt: on
return group
def get_args(
data: Union[str, Path],
task: str = "translation",
arch: str = "transformer",
**overrides
):
parser = get_training_parser(task)
args = parse_args_and_arch(parser, [str(data), "--task", task, "--arch", arch])
for k, v in overrides.items():
setattr(args, k, v)
return args
def add_ema_args(parser):
group = parser.add_argument_group("EMA configuration")
gen_parser_from_dataclass(group, EMAConfig())
| 15,823 | 36.320755 | 105 | py |
null | DA-Transformer-main/fairseq/pdb.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing
import os
import pdb
import sys
__all__ = ["set_trace"]
_stdin = [None]
_stdin_lock = multiprocessing.Lock()
try:
_stdin_fd = sys.stdin.fileno()
except Exception:
_stdin_fd = None
class MultiprocessingPdb(pdb.Pdb):
"""A Pdb wrapper that works in a multiprocessing environment.
Usage: `from fairseq import pdb; pdb.set_trace()`
"""
def __init__(self):
pdb.Pdb.__init__(self, nosigint=True)
def _cmdloop(self):
stdin_bak = sys.stdin
with _stdin_lock:
try:
if _stdin_fd is not None:
if not _stdin[0]:
_stdin[0] = os.fdopen(_stdin_fd)
sys.stdin = _stdin[0]
self.cmdloop()
finally:
sys.stdin = stdin_bak
def set_trace():
pdb = MultiprocessingPdb()
pdb.set_trace(sys._getframe().f_back)
| 1,089 | 21.708333 | 65 | py |
null | DA-Transformer-main/fairseq/quantization_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from fairseq.modules.quantization import pq, quantization_options, scalar
from omegaconf import DictConfig
logger = logging.getLogger(__name__)
def quantize_model_scalar(model, model_cfg: DictConfig):
quant_noise_scalar = getattr(model_cfg, "quant_noise_scalar", 0) or 0
if quant_noise_scalar > 0:
# quantize_model edits the model in place
scalar.quantize_model_(model, p=quant_noise_scalar, bits=8, update_step=1000)
return model
class Quantizer(object):
def __init__(self, config_path, max_epoch, max_update):
try:
import yaml
except ImportError:
raise ImportError("Please install yaml with: pip install yaml")
# parse config
if config_path:
with open(config_path) as config_file:
config = quantization_options.parse_config_yaml(
yaml.safe_load(config_file)
)
else:
config = quantization_options.parse_config_yaml({})
self.n_centroids_config = config["n_centroids"]
self.block_sizes_config = config["block_sizes"]
self.layers_to_quantize = config["layers_to_quantize"]
# We assume that training will run for a fixed number of epochs
# (or updates) and that we should train for equal durations
# between iterations of PQ.
num_iterations = len(self.layers_to_quantize)
if max_epoch > 0:
assert max_epoch % num_iterations == 0, (
"for iterative PQ, --max-epoch (={}) must be evenly divisible by "
"len(layers_to_quantize) (={})".format(max_epoch, num_iterations)
)
self.epoch_schedule = max_epoch // num_iterations
else:
self.epoch_schedule = None
if max_update > 0:
assert max_update % num_iterations == 0, (
"for iterative PQ, --max-update (={}) must be evenly divisible by "
"len(layers_to_quantize) (={})".format(max_update, num_iterations)
)
self.update_schedule = max_update // num_iterations
else:
self.update_schedule = None
assert (self.epoch_schedule is not None) ^ (
self.update_schedule is not None
), "for iterative PQ, cannot specify both --max-update and --max-epoch"
# 0 is a special value for quantization step, which will force
# the first call to begin_epoch() to call step()
self.quantization_step = 0
def set_trainer(self, trainer):
self.trainer = trainer
self.size_tracker = pq.SizeTracker(self.trainer.get_model())
def step(self):
"""Move to the next stage of quantization."""
if self.quantization_step >= len(self.layers_to_quantize):
# Maybe we just finished the last training step or we loaded
# a checkpoint for an iterative PQ model which previously
# finished training. Either way, don't quantize again.
return
logger.info(
"quantizing model (step={}; layers_to_quantize[step]={})".format(
self.quantization_step, self.layers_to_quantize[self.quantization_step]
)
)
quantized_layers = pq.quantize_model_(
self.trainer.get_model(),
self.size_tracker,
self.layers_to_quantize,
self.block_sizes_config,
self.n_centroids_config,
step=self.quantization_step,
)
logger.info("quantized layers: {}".format(quantized_layers))
logger.info(self.size_tracker)
self.quantization_step += 1
# reintialize the Trainer since model parameters have changed
self.trainer.reinitialize()
def begin_epoch(self, epoch):
"""Called at the beginning of each epoch (epochs start at 1)."""
if (
(
self.epoch_schedule is not None
and epoch > 0
and (epoch - 1) % self.epoch_schedule == 0
)
# we always step once in the beginning, even if using
# update-based quantization
or self.quantization_step == 0
):
self.step()
def step_update(self, num_updates):
"""Called at the end of each step."""
if (
self.update_schedule is not None
and num_updates > 0
and num_updates % self.update_schedule == 0
):
self.step()
def state_dict(self):
return {
"n_centroids_config": self.n_centroids_config,
"block_sizes_config": self.block_sizes_config,
"layers_to_quantize": self.layers_to_quantize,
"epoch_schedule": self.epoch_schedule,
"update_schedule": self.update_schedule,
"quantization_step": self.quantization_step,
}
def load_state_dict(self, state_dict):
self.n_centroids_config = state_dict["n_centroids_config"]
self.block_sizes_config = state_dict["block_sizes_config"]
self.layers_to_quantize = state_dict["layers_to_quantize"]
self.epoch_schedule = state_dict["epoch_schedule"]
self.update_schedule = state_dict["update_schedule"]
self.quantization_step = state_dict["quantization_step"]
| 5,507 | 37.25 | 87 | py |
null | DA-Transformer-main/fairseq/registry.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from argparse import Namespace
from typing import Union
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import merge_with_parent
from hydra.core.config_store import ConfigStore
from omegaconf import DictConfig
REGISTRIES = {}
def setup_registry(registry_name: str, base_class=None, default=None, required=False):
assert registry_name.startswith("--")
registry_name = registry_name[2:].replace("-", "_")
REGISTRY = {}
REGISTRY_CLASS_NAMES = set()
DATACLASS_REGISTRY = {}
# maintain a registry of all registries
if registry_name in REGISTRIES:
return # registry already exists
REGISTRIES[registry_name] = {
"registry": REGISTRY,
"default": default,
"dataclass_registry": DATACLASS_REGISTRY,
}
def build_x(cfg: Union[DictConfig, str, Namespace], *extra_args, **extra_kwargs):
if isinstance(cfg, DictConfig):
choice = cfg._name
if choice and choice in DATACLASS_REGISTRY:
dc = DATACLASS_REGISTRY[choice]
cfg = merge_with_parent(dc(), cfg)
elif isinstance(cfg, str):
choice = cfg
if choice in DATACLASS_REGISTRY:
cfg = DATACLASS_REGISTRY[choice]()
else:
choice = getattr(cfg, registry_name, None)
if choice in DATACLASS_REGISTRY:
cfg = DATACLASS_REGISTRY[choice].from_namespace(cfg)
if choice is None:
if required:
raise ValueError("{} is required!".format(registry_name))
return None
cls = REGISTRY[choice]
if hasattr(cls, "build_" + registry_name):
builder = getattr(cls, "build_" + registry_name)
else:
builder = cls
return builder(cfg, *extra_args, **extra_kwargs)
def register_x(name, dataclass=None):
def register_x_cls(cls):
if name in REGISTRY:
raise ValueError(
"Cannot register duplicate {} ({})".format(registry_name, name)
)
if cls.__name__ in REGISTRY_CLASS_NAMES:
raise ValueError(
"Cannot register {} with duplicate class name ({})".format(
registry_name, cls.__name__
)
)
if base_class is not None and not issubclass(cls, base_class):
raise ValueError(
"{} must extend {}".format(cls.__name__, base_class.__name__)
)
if dataclass is not None and not issubclass(dataclass, FairseqDataclass):
raise ValueError(
"Dataclass {} must extend FairseqDataclass".format(dataclass)
)
cls.__dataclass = dataclass
if cls.__dataclass is not None:
DATACLASS_REGISTRY[name] = cls.__dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group=registry_name, node=node, provider="fairseq")
REGISTRY[name] = cls
return cls
return register_x_cls
return build_x, register_x, REGISTRY, DATACLASS_REGISTRY
| 3,449 | 33.158416 | 87 | py |
null | DA-Transformer-main/fairseq/search.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import List, Optional
import torch
import torch.nn as nn
from fairseq.token_generation_constraints import (
ConstraintState,
OrderedConstraintState,
UnorderedConstraintState,
)
from torch import Tensor
class Search(nn.Module):
def __init__(self, tgt_dict):
super().__init__()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.src_lengths = torch.tensor(-1)
self.supports_constraints = False
self.stop_on_max_len = False
def step(
self, step, lprobs, scores, prev_output_tokens=None, original_batch_idxs=None
):
"""Take a single search step.
Args:
step: the current search step, starting at 0
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
scores: (bsz x input_beam_size x step)
the historical model scores of each hypothesis up to this point
prev_output_tokens: (bsz x step)
the previously generated oputput tokens
original_batch_idxs: (bsz)
the tensor with the batch indices, in the range [0, bsz)
this is useful in case there has been applied a re-ordering
and we need to know the orignal indices
Return: A tuple of (scores, indices, beams) where:
scores: (bsz x output_beam_size)
the scores of the chosen elements; output_beam_size can be
larger than input_beam_size, e.g., we may return
2*input_beam_size to account for EOS
indices: (bsz x output_beam_size)
the indices of the chosen elements
beams: (bsz x output_beam_size)
the hypothesis ids of the chosen elements, in the range [0, input_beam_size)
"""
raise NotImplementedError
@torch.jit.export
def set_src_lengths(self, src_lengths):
self.src_lengths = src_lengths
@torch.jit.export
def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int):
"""Initialize constraint states for constrained decoding (if supported).
Args:
batch_constraints: (torch.Tensor, optional)
the list of constraints, in packed form
beam_size: (int)
the beam size
Returns:
*encoder_out* rearranged according to *new_order*
"""
pass
def prune_sentences(self, batch_idxs: Tensor):
"""
Removes constraint states for completed sentences (if supported).
This is called from sequence_generator._generate() when sentences are
deleted from the batch.
Args:
batch_idxs: Indices of *sentences* whose constraint state should be *kept*.
"""
pass
def update_constraints(self, active_hypos: Tensor):
"""
Updates the constraint states by selecting the beam items that are retained.
This is called at each time step of sequence_generator._generate() when
the set of 2 * {beam_size} candidate hypotheses are reduced to the beam size.
Args:
active_hypos: (batch size, beam size)
list of integers denoting, for each sentence, which beam candidate items
should be kept.
"""
pass
class BeamSearch(Search):
def __init__(self, tgt_dict):
super().__init__(tgt_dict)
self.constraint_states = None
@torch.jit.export
def step(
self,
step: int,
lprobs,
scores: Optional[Tensor],
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
assert scores is not None
lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1)
top_prediction = torch.topk(
lprobs.view(bsz, -1),
k=min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
),
)
scores_buf = top_prediction[0]
indices_buf = top_prediction[1]
# Project back into relative indices and beams
beams_buf = indices_buf // vocab_size
indices_buf = indices_buf.fmod(vocab_size)
# At this point, beams_buf and indices_buf are single-dim and contain relative indices
return scores_buf, indices_buf, beams_buf
class PrefixConstrainedBeamSearch(Search):
def __init__(self, tgt_dict, prefix_allowed_tokens_fn):
super().__init__(tgt_dict)
self.prefix_allowed_tokens_fn = prefix_allowed_tokens_fn
self.stop_on_max_len = True
@torch.jit.export
def apply_mask(self, x, prev_output_tokens, original_batch_idxs):
beam_size = x.shape[0] // original_batch_idxs.shape[0]
original_batch_idxs = (
original_batch_idxs.unsqueeze(-1).repeat((1, beam_size)).flatten().tolist()
)
mask = torch.full_like(x, -math.inf)
for sent_i, (sent, batch_i) in enumerate(
zip(prev_output_tokens, original_batch_idxs)
):
mask[sent_i, :, self.prefix_allowed_tokens_fn(batch_i, sent)] = 0
return mask
@torch.jit.export
def step(
self,
step: int,
lprobs: Tensor,
scores: Tensor,
prev_output_tokens: Tensor,
original_batch_idxs: Tensor,
):
bsz, beam_size, vocab_size = lprobs.size()
lprobs += self.apply_mask(
lprobs.view(bsz * beam_size, 1, vocab_size),
prev_output_tokens,
original_batch_idxs,
).view(bsz, beam_size, vocab_size)
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
assert scores is not None
lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1)
top_prediction = torch.topk(
lprobs.view(bsz, -1),
k=min(
# Take the best beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
),
)
scores_buf = top_prediction[0]
indices_buf = top_prediction[1]
beams_buf = indices_buf // vocab_size
indices_buf = indices_buf.fmod(vocab_size)
return scores_buf, indices_buf, beams_buf
class LexicallyConstrainedBeamSearch(Search):
"""Implements lexically constrained beam search as described in
Fast Lexically Constrained Decoding with Dynamic Beam
Allocation for Neural Machine Translation. Post & Vilar,
NAACL 2018. https://www.aclweb.org/anthology/N18-1119/
and
Improved Lexically Constrained Decoding for Translation and
Monolingual Rewriting. Hu et al, NAACL
2019. https://www.aclweb.org/anthology/N19-1090/
This is accomplished by maintaining, for each beam hypothesis, a
ConstraintState object (see constraints.py) that tracks which
constraints have been generated and using this information to
shape the beam for each input sentence.
"""
def __init__(self, tgt_dict, representation):
super().__init__(tgt_dict)
self.representation = representation
self.vocab_size = len(tgt_dict)
self.num_cands = 0
self.supports_constraints = True
@torch.jit.export
def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int):
self.constraint_states = []
for constraint_tensor in batch_constraints:
if self.representation == "ordered":
constraint_state = OrderedConstraintState.create(constraint_tensor)
elif self.representation == "unordered":
constraint_state = UnorderedConstraintState.create(constraint_tensor)
self.constraint_states.append([constraint_state for i in range(beam_size)])
@torch.jit.export
def prune_sentences(self, batch_idxs: Tensor):
self.constraint_states = [
self.constraint_states[i] for i in batch_idxs.tolist()
]
@torch.jit.export
def update_constraints(self, active_hypos: Tensor):
if self.constraint_states:
batch_size = active_hypos.size(0)
for sentid in range(batch_size):
self.constraint_states[sentid] = [
self.constraint_states[sentid][i] for i in active_hypos[sentid]
]
@torch.jit.export
def step(
self,
step: int,
lprobs: Tensor,
scores: Optional[Tensor],
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
"""
A constrained step builds a large candidates list from the following:
- the top 2 * {beam_size} items over the whole beam
- for each item in the beam
- the top {each_k} (default 1)
- all next constraints
We then compute the constrained state of each beam item, and assign
stripe codes: 0 to the best in each bank, 1 to the 2nd-best, and so
on. We then sort by (stripe, score), and truncate the list at
2 * beam size.
Args:
step: the decoder step
lprobs: (batch size, beam size, target vocab)
the target-vocab distributions for each item in the beam.
Retrun: A tuple of (scores, indices, beams, constraints) where:
scores: (batch, output beam size)
the scores of the chosen elements
indices: (batch, output beam size)
the target vocab indices of the chosen elements
beams: (batch, output beam size)
the 0-indexed hypothesis ids of the chosen elements
constraints: (batch, output beam size)
the new constraint states
"""
each_k = 1
device = lprobs.device
batch_size, beam_size, vocab_size = lprobs.size()
self.num_cands = min(
# Just take the k-best. We'll get another k from the 1-best from each
# row, plus more from the constraints
beam_size * 2,
lprobs.view(batch_size, -1).size(1) - 1, # -1 so we never select pad
)
# STEP 0: Preliminary. Prevent EOS for unfinished hyps across all batch items
constraint_states = self.constraint_states
if constraint_states and step > 0:
not_finished_indices = []
for sentno, sent_constraints in enumerate(constraint_states):
for beamno, state in enumerate(sent_constraints):
index = sentno * beam_size + beamno
if not state.finished:
not_finished_indices.append(index)
not_finished_indices = torch.tensor(not_finished_indices)
if not_finished_indices.numel() > 0:
lprobs.view(batch_size * beam_size, -1)[
not_finished_indices, self.eos
] = -math.inf
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam entry for each batch item
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
assert scores is not None
lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1)
top_prediction = torch.topk(
lprobs.view(batch_size, -1),
self.num_cands,
)
scores_buf, indices_buf = top_prediction
# Project back into relative indices and beams
beams_buf = indices_buf // vocab_size
indices_buf = indices_buf.fmod(vocab_size)
# Short circuit if there are no constraints in this batch
if not constraint_states:
return scores_buf, indices_buf, beams_buf
# STEP 1: get top-1 from each hypothesis across all sentences in the batch
if step > 0:
top_scores, top_indices = torch.topk(
lprobs.view(batch_size * beam_size, -1),
k=each_k,
dim=1,
)
top_scores = top_scores.view(batch_size, -1)
top_indices = top_indices.view(batch_size, -1)
scores_buf = torch.cat((scores_buf, top_scores), dim=1)
indices_buf = torch.cat((indices_buf, top_indices), dim=1)
new_beams = torch.arange(0, beam_size, device=device).repeat(batch_size, 1)
beams_buf = torch.cat((beams_buf, new_beams), dim=1)
# Now, process sentences in the batch one by one.
new_scores_buf = torch.zeros((batch_size, 2 * beam_size), device=device)
new_indices_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long()
new_beams_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long()
for sentno, states in enumerate(constraint_states):
scores, indices, beams, new_states = self.step_sentence(
step,
sentno,
lprobs[sentno],
constraint_states[sentno],
beams_buf[sentno].clone(),
indices_buf[sentno].clone(),
scores_buf[sentno].clone(),
)
new_scores_buf[sentno] = scores
new_indices_buf[sentno] = indices
new_beams_buf[sentno] = beams
self.constraint_states[sentno] = new_states
return new_scores_buf, new_indices_buf, new_beams_buf
@torch.jit.export
def step_sentence(
self,
step: int,
sentno: int,
lprobs: Tensor,
constraint_states: List[List[ConstraintState]],
beams_buf: Tensor,
indices_buf: Tensor,
scores_buf: Tensor,
):
"""Does per-sentence processing. Adds all constraints for each
hypothesis to the list of candidates; then removes duplicates,
sorts, and dynamically stripes across the banks. All tensor inputs
are collapsed to those pertaining to a single input sentence.
"""
device = lprobs.device
# STEP 2: Add all constraints for each beam item
for beamno, state in enumerate(constraint_states):
next_tokens = torch.tensor(list(state.next_tokens()), device=device).long()
if next_tokens.numel() != 0:
indices_buf = torch.cat((indices_buf, next_tokens))
next_beams = (
torch.tensor(beamno, device=device)
.repeat(next_tokens.size(0))
.long()
)
beams_buf = torch.cat((beams_buf, next_beams))
next_values = lprobs[beamno].take(next_tokens.view(-1))
scores_buf = torch.cat((scores_buf, next_values))
# At the 0th time step, there is just one beam item
if step == 0:
break
# STEP 3: Compute the "bank" for each candidate. This is the
# number of constraints it's generated. We need this so that
# we can do round-robin allocation of the beam across these
# banks. If C is the number of constraints, we select the best
# item in bank C, then the best in bank C-1, etc, followed by
# the 2nd-best in bank C, the 2nd-best in bank C-1, etc, and so
# on, until the maximum beam size. We accomplish this by
# creating a sort key and striping across the banks.
# Compute the new states for all candidates
cands_size = indices_buf.size(0)
constraint_states = [
constraint_states[beams_buf[i]].advance(indices_buf[i])
for i in range(cands_size)
]
banks = torch.tensor([state.bank for state in constraint_states], device=device)
# STEP 4: Sort
num_constraint_tokens = len(state.tokens)
# Sort by keys (bank, score) (i.e., sort banks together, and scores
# within banks). AFAIK pytorch doesn't support either stable sort or
# multi-key sorting, so we have to hack this.
MAX_SCORE = -100
sort_key = (num_constraint_tokens - banks) * MAX_SCORE + scores_buf
sort_values, sort_indices = sort_key.sort(dim=0, descending=True)
scores_buf = scores_buf[sort_indices]
indices_buf = indices_buf[sort_indices]
beams_buf = beams_buf[sort_indices]
banks = banks[sort_indices]
# Sort the constraints to follow suit
constraint_states = [constraint_states[i] for i in sort_indices]
# STEP 5: Remove duplicates. The topk calls (overall and
# per-row) plus the per-row generation of constraints will
# produce duplicates. Here we remove them.
def roll(t):
"""Rolls a 1d tensor left by 1.
[0, 1, 2, 3, 4] becomes [4, 0, 1, 2, 3]
"""
return torch.cat((t[-1].unsqueeze(0), t[0:-1]), dim=0)
# We map candidates (beam, token_id) to a single dimension.
# This is then shifted by 1. We can then easily identify
# duplicates and create a mask that identifies unique
# extensions.
uniques_mask = beams_buf * (self.vocab_size + 1) + indices_buf
uniques_mask = roll(uniques_mask) != uniques_mask
# Use the mask to pare down the data structures
scores_buf = torch.masked_select(scores_buf, uniques_mask)
indices_buf = torch.masked_select(indices_buf, uniques_mask)
beams_buf = torch.masked_select(beams_buf, uniques_mask)
banks = torch.masked_select(banks, uniques_mask)
i = 1
for mask in uniques_mask[1:]:
if not mask:
constraint_states.pop(i)
i += mask
# STEP 6: Assign IDs round-robin across banks, sort, and
# truncate. Now that the candidates are sorted by (bank,
# score) and uniqed, we dynamically allocate the {beam_size}
# beam by striping across the candidates. These stripes will
# be used as sort keys to do round-robin selection. This is
# accomplished in a single pass with offsets. Sorting by
# highest-banks (furthest-along hypotheses) first ensures
# progress through the constraints.
#
# e.g., BANKS: 3 3 3 2 2 2 2 1 1 1 0 0
# OLD STRIPES: 0 1 2 0 1 2 3 0 1 2 0 1
# NEW STRIPES: 0 1+4 2+8 0+1 1+5 2+9 3+11 0+2 1+6 2+10 0+3 1+7
# = 0 5 10 1 6 11 13 2 7 12 3 8
#
# Sorting by this then gives the following banks:
#
# 3 2 1 0 3 2 1 0 3 2 1 2
#
# We'll take the top {beam_size} of these.
stripe_offsets = [offset * (len(banks) + 1) for offset in range(len(banks) + 1)]
stripes = torch.zeros_like(banks)
cur_bank_count = -1
cur_bank = banks[0]
for i, bank in enumerate(banks):
if bank != cur_bank:
cur_bank_count = 0
cur_bank = bank
else:
cur_bank_count += 1
stripes[i] = num_constraint_tokens - bank + stripe_offsets[cur_bank_count]
# STEP 7: Sort by the stripes values
sort_values, sort_indices = stripes.sort(dim=0)
scores_buf = scores_buf[sort_indices]
indices_buf = indices_buf[sort_indices]
beams_buf = beams_buf[sort_indices]
constraint_states = [constraint_states[i] for i in sort_indices]
# STEP 8: Truncate to the candidates size!
scores_buf = scores_buf[: self.num_cands]
indices_buf = indices_buf[: self.num_cands]
beams_buf = beams_buf[: self.num_cands]
return scores_buf, indices_buf, beams_buf, constraint_states
class LengthConstrainedBeamSearch(Search):
def __init__(self, tgt_dict, min_len_a, min_len_b, max_len_a, max_len_b):
super().__init__(tgt_dict)
self.min_len_a = min_len_a
self.min_len_b = min_len_b
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.beam = BeamSearch(tgt_dict)
self.needs_src_lengths = True
def step(
self,
step: int,
lprobs,
scores,
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
min_lens = self.min_len_a * self.src_lengths + self.min_len_b
max_lens = self.max_len_a * self.src_lengths + self.max_len_b
lprobs[step < min_lens, :, self.eos] = -math.inf
lprobs[step >= max_lens, :, self.eos] = 0
return self.beam.step(step, lprobs, scores)
class DiverseBeamSearch(Search):
"""Diverse Beam Search.
See "Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence
Models" for details.
We only implement the Hamming Diversity penalty here, which performed best
in the original paper.
"""
def __init__(self, tgt_dict, num_groups, diversity_strength):
super().__init__(tgt_dict)
self.num_groups = num_groups
self.diversity_strength = -diversity_strength
self.beam = BeamSearch(tgt_dict)
@torch.jit.export
def step(
self,
step: int,
lprobs,
scores,
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
bsz, beam_size, vocab_size = lprobs.size()
if beam_size % self.num_groups != 0:
raise ValueError(
"DiverseBeamSearch requires --beam to be divisible by the number of groups"
)
# initialize diversity penalty
diversity_buf = torch.zeros(lprobs[:, 0, :].size()).to(lprobs)
scores_G, indices_G, beams_G = [], [], []
for g in range(self.num_groups):
lprobs_g = lprobs[:, g :: self.num_groups, :]
scores_g = scores[:, g :: self.num_groups, :] if step > 0 else None
# apply diversity penalty
if g > 0:
lprobs_g = torch.add(
lprobs_g,
other=diversity_buf.unsqueeze(1),
alpha=self.diversity_strength,
)
else:
lprobs_g = lprobs_g.contiguous()
scores_buf, indices_buf, beams_buf = self.beam.step(
step, lprobs_g, scores_g
)
beams_buf.mul_(self.num_groups).add_(g)
scores_G.append(scores_buf.clone())
indices_G.append(indices_buf.clone())
beams_G.append(beams_buf.clone())
# update diversity penalty
diversity_buf.scatter_add_(
1, indices_buf, torch.ones(indices_buf.size()).to(diversity_buf)
)
# interleave results from different groups
scores_buf = torch.stack(scores_G, dim=2).view(bsz, -1)
indices_buf = torch.stack(indices_G, dim=2).view(bsz, -1)
beams_buf = torch.stack(beams_G, dim=2).view(bsz, -1)
return scores_buf, indices_buf, beams_buf
class Sampling(Search):
sampling_topk: int
sampling_topp: float
def __init__(self, tgt_dict, sampling_topk=-1, sampling_topp=-1.0):
super().__init__(tgt_dict)
self.sampling_topk = sampling_topk
self.sampling_topp = sampling_topp
def _sample_topp(self, lprobs):
"""Sample among the smallest set of elements whose cumulative probability mass exceeds p.
See `"The Curious Case of Neural Text Degeneration"
(Holtzman et al., 2019) <https://arxiv.org/abs/1904.09751>`_.
Args:
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
Return: A tuple of (trimed_probs, truncated_indices) where:
trimed_probs: (bsz x input_beam_size x ?)
the model's probabilities over the elements selected to sample from. The
width of the third dimension is determined by top-P.
truncated_indices: (bsz x input_beam_size x ?)
the indices of the chosen elements.
"""
probs = lprobs.exp_()
# sort the last dimension (vocab dimension) in descending order
sorted_probs, sorted_indices = probs.sort(descending=True)
# compute a mask to indicate the words to be included in the top-P set.
cumsum_probs = sorted_probs.cumsum(dim=2)
mask = cumsum_probs.lt(self.sampling_topp)
# note that mask was computed by 'lt'. One more word needs to be included
# so that the cumulative probability mass can exceed p.
cumsum_mask = mask.cumsum(dim=2)
last_included = cumsum_mask[:, :, -1:]
last_included.clamp_(0, mask.size()[2] - 1)
mask = mask.scatter_(2, last_included, 1)
# truncate unnecessary dims.
max_dim = last_included.max()
truncated_mask = mask[:, :, : max_dim + 1]
truncated_probs = sorted_probs[:, :, : max_dim + 1]
truncated_indices = sorted_indices[:, :, : max_dim + 1]
# trim the words that are not in top-P by setting their probabilities
# to 0, so that they would not be sampled later.
trim_mask = ~truncated_mask
trimed_probs = truncated_probs.masked_fill_(trim_mask, 0)
return trimed_probs, truncated_indices
@torch.jit.export
def step(
self,
step: int,
lprobs,
scores,
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
if self.sampling_topp > 0:
# only sample from the smallest set of words whose cumulative probability mass exceeds p
probs, top_indices = self._sample_topp(lprobs)
elif self.sampling_topk > 0:
# only sample from top-k candidates
lprobs, top_indices = lprobs.topk(self.sampling_topk)
probs = lprobs.exp_()
else:
probs = lprobs.exp_()
# dummy data to be consistent with true branch for type check
top_indices = torch.empty(0).to(probs)
# sample
if step == 0:
indices_buf = torch.multinomial(
probs.view(bsz, -1),
beam_size,
replacement=True,
).view(bsz, beam_size)
else:
indices_buf = torch.multinomial(
probs.view(bsz * beam_size, -1),
1,
replacement=True,
).view(bsz, beam_size)
if step == 0:
# expand to beam size
probs = probs.expand(bsz, beam_size, -1)
# gather scores
scores_buf = torch.gather(probs, dim=2, index=indices_buf.unsqueeze(-1))
scores_buf = scores_buf.log_().view(bsz, -1)
# remap indices if using top-k or top-P sampling
if self.sampling_topk > 0 or self.sampling_topp > 0:
indices_buf = torch.gather(
top_indices.expand(bsz, beam_size, -1),
dim=2,
index=indices_buf.unsqueeze(-1),
).squeeze(2)
if step == 0:
beams_buf = indices_buf.new_zeros(bsz, beam_size)
else:
beams_buf = torch.arange(0, beam_size).to(indices_buf).repeat(bsz, 1)
# make scores cumulative
scores_buf.add_(
torch.gather(scores[:, :, step - 1], dim=1, index=beams_buf)
)
return scores_buf, indices_buf, beams_buf
class DiverseSiblingsSearch(Search):
"""
Beam search with diverse siblings.
See "A Simple, Fast Diverse Decoding Algorithm for Neural Generation" for details.
https://arxiv.org/abs/1611.08562
1/ Calculate hypotheses for each beam
2/ Intra-sibling ordering
3/ Rewrite scores
4/ Choose top K hypotheses
if diversity_rate == 0 is equivalent to BeamSearch
"""
def __init__(self, tgt_dict, diversity_rate):
super().__init__(tgt_dict)
self.diversity_rate = diversity_rate
self.beam = BeamSearch(tgt_dict)
def step(
self,
step: int,
lprobs,
scores,
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
bsz, beam_size, vocab_size = lprobs.size()
k = min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
)
s_list: List[Tensor]
i_list: List[Tensor]
s_list = [torch.empty(0).to(lprobs) for i in range(beam_size)]
i_list = [torch.LongTensor().to(device=lprobs.device) for i in range(beam_size)]
sibling_score = torch.arange(1, k + 1).to(lprobs) * self.diversity_rate
if step == 0:
return self.beam.step(step, lprobs, scores)
lprobs.add_(scores[:, :, step - 1].unsqueeze(-1))
# 1/ Calculate hypotheses for each beam
for i in range(beam_size):
torch.topk(lprobs[:, i, :].view(bsz, -1), k, out=(s_list[i], i_list[i]))
i_list[i].fmod_(vocab_size)
# 2/ Intra-sibling ordering by default from topk + 3/ Rewrite scores
s_list[i].sub_(sibling_score)
# 4/ Choose top K hypotheses
indices = torch.stack(i_list, dim=1).view(bsz, -1)
final_scores = torch.empty(0).to(lprobs)
final_indices = torch.LongTensor().to(device=lprobs.device)
final_beams = torch.LongTensor().to(device=lprobs.device)
(final_scores, final_indices) = torch.topk(
torch.stack(s_list, dim=1).view(bsz, -1),
k,
)
final_beams = final_indices // k
for i in range(bsz):
final_indices[i] = indices[i][final_indices[i]]
return final_scores, final_indices, final_beams
| 31,337 | 37.451534 | 100 | py |
null | DA-Transformer-main/fairseq/sequence_generator.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, List, Optional
import sys
import torch
import torch.nn as nn
from fairseq import search, utils
from fairseq.data import data_utils
from fairseq.models import FairseqIncrementalDecoder
from torch import Tensor
from fairseq.ngram_repeat_block import NGramRepeatBlock
class SequenceGenerator(nn.Module):
def __init__(
self,
models,
tgt_dict,
beam_size=1,
max_len_a=0,
max_len_b=200,
max_len=0,
min_len=1,
normalize_scores=True,
len_penalty=1.0,
unk_penalty=0.0,
temperature=1.0,
match_source_len=False,
no_repeat_ngram_size=0,
search_strategy=None,
eos=None,
symbols_to_strip_from_output=None,
lm_model=None,
lm_weight=1.0,
):
"""Generates translations of a given source sentence.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models,
currently support fairseq.models.TransformerModel for scripting
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
max_len (int, optional): the maximum length of the generated output
(not including end-of-sentence)
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
match_source_len (bool, optional): outputs should match the source
length (default: False)
"""
super().__init__()
if isinstance(models, EnsembleModel):
self.model = models
else:
self.model = EnsembleModel(models)
self.tgt_dict = tgt_dict
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos() if eos is None else eos
self.symbols_to_strip_from_output = (
symbols_to_strip_from_output.union({self.eos})
if symbols_to_strip_from_output is not None
else {self.eos}
)
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.max_len = max_len or self.model.max_decoder_positions()
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.temperature = temperature
self.match_source_len = match_source_len
if no_repeat_ngram_size > 0:
self.repeat_ngram_blocker = NGramRepeatBlock(no_repeat_ngram_size)
else:
self.repeat_ngram_blocker = None
assert temperature > 0, "--temperature must be greater than 0"
self.search = (
search.BeamSearch(tgt_dict) if search_strategy is None else search_strategy
)
# We only need to set src_lengths in LengthConstrainedBeamSearch.
# As a module attribute, setting it would break in multithread
# settings when the model is shared.
self.should_set_src_lengths = (
hasattr(self.search, "needs_src_lengths") and self.search.needs_src_lengths
)
self.model.eval()
self.lm_model = lm_model
self.lm_weight = lm_weight
if self.lm_model is not None:
self.lm_model.eval()
def cuda(self):
self.model.cuda()
return self
@torch.no_grad()
def forward(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
"""Generate a batch of translations.
Args:
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, prefix_tokens, bos_token=bos_token)
# TODO(myleott): unused, deprecate after pytorch-translate migration
def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None):
"""Iterate over a batched dataset and yield individual translations.
Args:
cuda (bool, optional): use GPU for generation
timer (StopwatchMeter, optional): time generations
"""
for sample in data_itr:
s = utils.move_to_cuda(sample) if cuda else sample
if "net_input" not in s:
continue
input = s["net_input"]
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in input.items() if k != "prev_output_tokens"
}
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(encoder_input)
if timer is not None:
timer.stop(sum(len(h[0]["tokens"]) for h in hypos))
for i, id in enumerate(s["id"].data):
# remove padding
src = utils.strip_pad(input["src_tokens"].data[i, :], self.pad)
ref = (
utils.strip_pad(s["target"].data[i, :], self.pad)
if s["target"] is not None
else None
)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(
self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs
) -> List[List[Dict[str, Tensor]]]:
"""Generate translations. Match the api of other fairseq generators.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
constraints (torch.LongTensor, optional): force decoder to include
the list of constraints
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, **kwargs)
def _generate(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
constraints: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(self.model.models_size)
],
)
net_input = sample["net_input"]
if "src_tokens" in net_input:
src_tokens = net_input["src_tokens"]
# length of the source text being the character length except EndOfSentence and pad
src_lengths = (
(src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
)
elif "source" in net_input:
src_tokens = net_input["source"]
src_lengths = (
net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
if net_input["padding_mask"] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
elif "features" in net_input:
src_tokens = net_input["features"]
src_lengths = (
net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
if net_input["padding_mask"] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
else:
raise Exception(
"expected src_tokens or source in net input. input keys: "
+ str(net_input.keys())
)
# bsz: total number of sentences in beam
# Note that src_tokens may have more than 2 dimensions (i.e. audio features)
bsz, src_len = src_tokens.size()[:2]
beam_size = self.beam_size
if constraints is not None and not self.search.supports_constraints:
raise NotImplementedError(
"Target-side constraints were provided, but search method doesn't support them"
)
# Initialize constraints, when active
self.search.init_constraints(constraints, beam_size)
max_len: int = -1
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
self.max_len - 1,
)
assert (
self.min_len <= max_len
), "min_len cannot be larger than max_len, please adjust these!"
# compute the encoder output for each beam
with torch.autograd.profiler.record_function("EnsembleModel: forward_encoder"):
encoder_outs = self.model.forward_encoder(net_input)
# placeholder of indices for bsz * beam_size to hold tokens and accumulative scores
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order)
# ensure encoder_outs is a List.
assert encoder_outs is not None
# initialize buffers
scores = (
torch.zeros(bsz * beam_size, max_len + 1).to(src_tokens).float()
) # +1 for eos; pad is never chosen for scoring
tokens = (
torch.zeros(bsz * beam_size, max_len + 2)
.to(src_tokens)
.long()
.fill_(self.pad)
) # +2 for eos and pad
tokens[:, 0] = self.eos if bos_token is None else bos_token
attn: Optional[Tensor] = None
# A list that indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then cands_to_ignore would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
cands_to_ignore = (
torch.zeros(bsz, beam_size).to(src_tokens).eq(-1)
) # forward and backward-compatible False mask
# list of completed sentences
finalized = torch.jit.annotate(
List[List[Dict[str, Tensor]]],
[torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)],
) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step
# a boolean array indicating if the sentence at the index is finished or not
finished = [False for i in range(bsz)]
num_remaining_sent = bsz # number of sentences remaining
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (
(torch.arange(0, bsz) * beam_size)
.unsqueeze(1)
.type_as(tokens)
.to(src_tokens.device)
)
cand_offsets = torch.arange(0, cand_size).type_as(tokens).to(src_tokens.device)
reorder_state: Optional[Tensor] = None
batch_idxs: Optional[Tensor] = None
original_batch_idxs: Optional[Tensor] = None
if "id" in sample and isinstance(sample["id"], Tensor):
original_batch_idxs = sample["id"]
else:
original_batch_idxs = torch.arange(0, bsz).type_as(tokens)
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(
batch_idxs
)
reorder_state.view(-1, beam_size).add_(
corr.unsqueeze(-1) * beam_size
)
original_batch_idxs = original_batch_idxs[batch_idxs]
self.model.reorder_incremental_state(incremental_states, reorder_state)
encoder_outs = self.model.reorder_encoder_out(
encoder_outs, reorder_state
)
with torch.autograd.profiler.record_function(
"EnsembleModel: forward_decoder"
):
lprobs, avg_attn_scores = self.model.forward_decoder(
tokens[:, : step + 1],
encoder_outs,
incremental_states,
self.temperature,
)
if self.lm_model is not None:
lm_out = self.lm_model(tokens[:, : step + 1])
probs = self.lm_model.get_normalized_probs(
lm_out, log_probs=True, sample=None
)
probs = probs[:, -1, :] * self.lm_weight
lprobs += probs
lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs)
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
# handle max length constraint
if step >= max_len:
lprobs[:, : self.eos] = -math.inf
lprobs[:, self.eos + 1 :] = -math.inf
# handle prefix tokens (possibly with different lengths)
if (
prefix_tokens is not None
and step < prefix_tokens.size(1)
and step < max_len
):
lprobs, tokens, scores = self._prefix_tokens(
step, lprobs, scores, tokens, prefix_tokens, beam_size
)
elif step < self.min_len:
# minimum length constraint (does not apply if using prefix_tokens)
lprobs[:, self.eos] = -math.inf
# Record attention scores, only support avg_attn_scores is a Tensor
if avg_attn_scores is not None:
if attn is None:
attn = torch.empty(
bsz * beam_size, avg_attn_scores.size(1), max_len + 2
).to(scores)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
eos_bbsz_idx = torch.empty(0).to(
tokens
) # indices of hypothesis ending with eos (finished sentences)
eos_scores = torch.empty(0).to(
scores
) # scores of hypothesis ending with eos (finished sentences)
if self.should_set_src_lengths:
self.search.set_src_lengths(src_lengths)
if self.repeat_ngram_blocker is not None:
lprobs = self.repeat_ngram_blocker(tokens, lprobs, bsz, beam_size, step)
# Shape: (batch, cand_size)
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
tokens[:, : step + 1],
original_batch_idxs,
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
# Shape of eos_mask: (batch size, beam size)
eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask)
# only consider eos when it's among the top beam_size indices
# Now we know what beam item(s) to finish
# Shape: 1d list of absolute-numbered
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents: List[int] = []
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.masked_select(
cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents = self.finalize_hypos(
step,
eos_bbsz_idx,
eos_scores,
tokens,
scores,
finalized,
finished,
beam_size,
attn,
src_lengths,
max_len,
)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
if self.search.stop_on_max_len and step >= max_len:
break
assert step < max_len, f"{step} < {max_len}"
# Remove finalized sentences (ones for which {beam_size}
# finished hypotheses have been generated) from the batch.
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = torch.ones(
bsz, dtype=torch.bool, device=cand_indices.device
)
batch_mask[finalized_sents] = False
# TODO replace `nonzero(as_tuple=False)` after TorchScript supports it
batch_idxs = torch.arange(
bsz, device=cand_indices.device
).masked_select(batch_mask)
# Choose the subset of the hypothesized constraints that will continue
self.search.prune_sentences(batch_idxs)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
cands_to_ignore = cands_to_ignore[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(
new_bsz * beam_size, attn.size(1), -1
)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
# Rewrite the operator since the element wise or is not supported in torchscript.
eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size]))
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just
# the hypos with the smallest values in active_mask.
# {active_hypos} indicates which {beam_size} hypotheses
# from the list of {2 * beam_size} candidates were
# selected. Shapes: (batch size, beam size)
new_cands_to_ignore, active_hypos = torch.topk(
active_mask, k=beam_size, dim=1, largest=False
)
# update cands_to_ignore to ignore any finalized hypos.
cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size]
# Make sure there is at least one active item for each sentence in the batch.
assert (~cands_to_ignore).any(dim=1).all()
# update cands_to_ignore to ignore any finalized hypos
# {active_bbsz_idx} denotes which beam number is continued for each new hypothesis (a beam
# can be selected more than once).
active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos)
active_scores = torch.gather(cand_scores, dim=1, index=active_hypos)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
# Set the tokens for each beam (can select the same row more than once)
tokens[:, : step + 1] = torch.index_select(
tokens[:, : step + 1], dim=0, index=active_bbsz_idx
)
# Select the next token for each of them
tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather(
cand_indices, dim=1, index=active_hypos
)
if step > 0:
scores[:, :step] = torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx
)
scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather(
cand_scores, dim=1, index=active_hypos
)
# Update constraints based on which candidates were selected for the next beam
self.search.update_constraints(active_hypos)
# copy attention for active hypotheses
if attn is not None:
attn[:, :, : step + 2] = torch.index_select(
attn[:, :, : step + 2], dim=0, index=active_bbsz_idx
)
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
scores = torch.tensor(
[float(elem["score"].item()) for elem in finalized[sent]]
)
_, sorted_scores_indices = torch.sort(scores, descending=True)
finalized[sent] = [finalized[sent][ssi] for ssi in sorted_scores_indices]
finalized[sent] = torch.jit.annotate(
List[Dict[str, Tensor]], finalized[sent]
)
return finalized
def _prefix_tokens(
self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int
):
"""Handle prefix tokens"""
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = torch.tensor(-math.inf).to(lprobs)
lprobs[prefix_mask] = lprobs[prefix_mask].scatter(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask]
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[
:, 0, 1 : step + 1
]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
# copy tokens, scores and lprobs from the first beam to all beams
tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size)
scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size)
lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size)
return lprobs, tokens, scores
def replicate_first_beam(self, tensor, mask, beam_size: int):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
def finalize_hypos(
self,
step: int,
bbsz_idx,
eos_scores,
tokens,
scores,
finalized: List[List[Dict[str, Tensor]]],
finished: List[bool],
beam_size: int,
attn: Optional[Tensor],
src_lengths,
max_len: int,
):
"""Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.
A sentence is finalized when {beam_size} finished items have been collected for it.
Returns number of sentences (not beam items) being finalized.
These will be removed from the batch and not processed further.
Args:
bbsz_idx (Tensor):
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors.
# tokens is (batch * beam, max_len). So the index_select
# gets the newly EOS rows, then selects cols 1..{step + 2}
tokens_clone = tokens.index_select(0, bbsz_idx)[
:, 1 : step + 2
] # skip the first index, which is EOS
tokens_clone[:, step] = self.eos
attn_clone = (
attn.index_select(0, bbsz_idx)[:, :, 1 : step + 2]
if attn is not None
else None
)
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
# cum_unfin records which sentences in the batch are finished.
# It helps match indexing between (a) the original sentences
# in the batch and (b) the current, possibly-reduced set of
# sentences.
cum_unfin: List[int] = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
cum_fin_tensor = torch.tensor(cum_unfin, dtype=torch.int).to(bbsz_idx)
unfin_idx = bbsz_idx // beam_size
sent = unfin_idx + torch.index_select(cum_fin_tensor, 0, unfin_idx)
# Create a set of "{sent}{unfin_idx}", where
# "unfin_idx" is the index in the current (possibly reduced)
# list of sentences, and "sent" is the index in the original,
# unreduced batch
# For every finished beam item
# sentence index in the current (possibly reduced) batch
seen = (sent << 32) + unfin_idx
unique_seen: List[int] = torch.unique(seen).tolist()
if self.match_source_len:
condition = step > torch.index_select(src_lengths, 0, unfin_idx)
eos_scores = torch.where(condition, torch.tensor(-math.inf), eos_scores)
sent_list: List[int] = sent.tolist()
for i in range(bbsz_idx.size()[0]):
# An input sentence (among those in a batch) is finished when
# beam_size hypotheses have been collected for it
if len(finalized[sent_list[i]]) < beam_size:
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = torch.empty(0)
finalized[sent_list[i]].append(
{
"tokens": tokens_clone[i],
"score": eos_scores[i],
"attention": hypo_attn, # src_len x tgt_len
"alignment": torch.empty(0),
"positional_scores": pos_scores[i],
}
)
newly_finished: List[int] = []
for unique_s in unique_seen:
# check termination conditions for this sentence
unique_sent: int = unique_s >> 32
unique_unfin_idx: int = unique_s - (unique_sent << 32)
if not finished[unique_sent] and self.is_finished(
step, unique_unfin_idx, max_len, len(finalized[unique_sent]), beam_size
):
finished[unique_sent] = True
newly_finished.append(unique_unfin_idx)
return newly_finished
def is_finished(
self,
step: int,
unfin_idx: int,
max_len: int,
finalized_sent_len: int,
beam_size: int,
):
"""
Check whether decoding for a sentence is finished, which
occurs when the list of finalized sentences has reached the
beam size, or when we reach the maximum length.
"""
assert finalized_sent_len <= beam_size
if finalized_sent_len == beam_size or step == max_len:
return True
return False
class EnsembleModel(nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models_size = len(models)
# method '__len__' is not supported in ModuleList for torch script
self.single_model = models[0]
self.models = nn.ModuleList(models)
self.has_incremental: bool = False
if all(
hasattr(m, "decoder") and isinstance(m.decoder, FairseqIncrementalDecoder)
for m in models
):
self.has_incremental = True
def forward(self):
pass
def has_encoder(self):
return hasattr(self.single_model, "encoder")
def has_incremental_states(self):
return self.has_incremental
def max_decoder_positions(self):
return min(
[
m.max_decoder_positions()
for m in self.models
if hasattr(m, "max_decoder_positions")
]
+ [sys.maxsize]
)
@torch.jit.export
def forward_encoder(self, net_input: Dict[str, Tensor]):
if not self.has_encoder():
return None
return [model.encoder.forward_torchscript(net_input) for model in self.models]
@torch.jit.export
def forward_decoder(
self,
tokens,
encoder_outs: List[Dict[str, List[Tensor]]],
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
temperature: float = 1.0,
):
log_probs = []
avg_attn: Optional[Tensor] = None
encoder_out: Optional[Dict[str, List[Tensor]]] = None
for i, model in enumerate(self.models):
if self.has_encoder():
encoder_out = encoder_outs[i]
# decode each model
if self.has_incremental_states():
decoder_out = model.decoder.forward(
tokens,
encoder_out=encoder_out,
incremental_state=incremental_states[i],
)
else:
if hasattr(model, "decoder"):
decoder_out = model.decoder.forward(tokens, encoder_out=encoder_out)
else:
decoder_out = model.forward(tokens)
attn: Optional[Tensor] = None
decoder_len = len(decoder_out)
if decoder_len > 1 and decoder_out[1] is not None:
if isinstance(decoder_out[1], Tensor):
attn = decoder_out[1]
else:
attn_holder = decoder_out[1]["attn"]
if isinstance(attn_holder, Tensor):
attn = attn_holder
elif attn_holder is not None:
attn = attn_holder[0]
if attn is not None:
attn = attn[:, -1, :]
decoder_out_tuple = (
decoder_out[0][:, -1:, :].div_(temperature),
None if decoder_len <= 1 else decoder_out[1],
)
probs = model.get_normalized_probs(
decoder_out_tuple, log_probs=True, sample=None
)
probs = probs[:, -1, :]
if self.models_size == 1:
return probs, attn
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(
self.models_size
)
if avg_attn is not None:
avg_attn.div_(self.models_size)
return avg_probs, avg_attn
@torch.jit.export
def reorder_encoder_out(
self, encoder_outs: Optional[List[Dict[str, List[Tensor]]]], new_order
):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
new_outs: List[Dict[str, List[Tensor]]] = []
if not self.has_encoder():
return new_outs
for i, model in enumerate(self.models):
assert encoder_outs is not None
new_outs.append(
model.encoder.reorder_encoder_out(encoder_outs[i], new_order)
)
return new_outs
@torch.jit.export
def reorder_incremental_state(
self,
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
new_order,
):
if not self.has_incremental_states():
return
for i, model in enumerate(self.models):
model.decoder.reorder_incremental_state_scripting(
incremental_states[i], new_order
)
class SequenceGeneratorWithAlignment(SequenceGenerator):
def __init__(
self, models, tgt_dict, left_pad_target=False, print_alignment="hard", **kwargs
):
"""Generates translations of a given source sentence.
Produces alignments following "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
left_pad_target (bool, optional): Whether or not the
hypothesis should be left padded or not when they are
teacher forced for generating alignments.
"""
super().__init__(EnsembleModelWithAlignment(models), tgt_dict, **kwargs)
self.left_pad_target = left_pad_target
if print_alignment == "hard":
self.extract_alignment = utils.extract_hard_alignment
elif print_alignment == "soft":
self.extract_alignment = utils.extract_soft_alignment
@torch.no_grad()
def generate(self, models, sample, **kwargs):
finalized = super()._generate(sample, **kwargs)
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.shape[0]
beam_size = self.beam_size
(
src_tokens,
src_lengths,
prev_output_tokens,
tgt_tokens,
) = self._prepare_batch_for_alignment(sample, finalized)
if any(getattr(m, "full_context_alignment", False) for m in self.model.models):
attn = self.model.forward_align(src_tokens, src_lengths, prev_output_tokens)
else:
attn = [
finalized[i // beam_size][i % beam_size]["attention"].transpose(1, 0)
for i in range(bsz * beam_size)
]
if src_tokens.device != "cpu":
src_tokens = src_tokens.to("cpu")
tgt_tokens = tgt_tokens.to("cpu")
attn = [i.to("cpu") for i in attn]
# Process the attn matrix to extract hard alignments.
for i in range(bsz * beam_size):
alignment = self.extract_alignment(
attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos
)
finalized[i // beam_size][i % beam_size]["alignment"] = alignment
return finalized
def _prepare_batch_for_alignment(self, sample, hypothesis):
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.shape[0]
src_tokens = (
src_tokens[:, None, :]
.expand(-1, self.beam_size, -1)
.contiguous()
.view(bsz * self.beam_size, -1)
)
src_lengths = sample["net_input"]["src_lengths"]
src_lengths = (
src_lengths[:, None]
.expand(-1, self.beam_size)
.contiguous()
.view(bsz * self.beam_size)
)
prev_output_tokens = data_utils.collate_tokens(
[beam["tokens"] for example in hypothesis for beam in example],
self.pad,
self.eos,
self.left_pad_target,
move_eos_to_beginning=True,
)
tgt_tokens = data_utils.collate_tokens(
[beam["tokens"] for example in hypothesis for beam in example],
self.pad,
self.eos,
self.left_pad_target,
move_eos_to_beginning=False,
)
return src_tokens, src_lengths, prev_output_tokens, tgt_tokens
class EnsembleModelWithAlignment(EnsembleModel):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__(models)
def forward_align(self, src_tokens, src_lengths, prev_output_tokens):
avg_attn = None
for model in self.models:
decoder_out = model(src_tokens, src_lengths, prev_output_tokens)
attn = decoder_out[1]["attn"][0]
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(self.models) > 1:
avg_attn.div_(len(self.models))
return avg_attn
| 39,404 | 38.843276 | 110 | py |
null | DA-Transformer-main/fairseq/sequence_scorer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import torch
from fairseq import utils
class SequenceScorer(object):
"""Scores the target for a given source sentence."""
def __init__(
self,
tgt_dict,
softmax_batch=None,
compute_alignment=False,
eos=None,
symbols_to_strip_from_output=None,
):
self.pad = tgt_dict.pad()
self.eos = tgt_dict.eos() if eos is None else eos
self.softmax_batch = softmax_batch or sys.maxsize
assert self.softmax_batch > 0
self.compute_alignment = compute_alignment
self.symbols_to_strip_from_output = (
symbols_to_strip_from_output.union({self.eos})
if symbols_to_strip_from_output is not None
else {self.eos}
)
@torch.no_grad()
def generate(self, models, sample, **kwargs):
"""Score a batch of translations."""
net_input = sample["net_input"]
def batch_for_softmax(dec_out, target):
# assumes decoder_out[0] is the only thing needed (may not be correct for future models!)
first, rest = dec_out[0], dec_out[1:]
bsz, tsz, dim = first.shape
if bsz * tsz < self.softmax_batch:
yield dec_out, target, True
else:
flat = first.contiguous().view(1, -1, dim)
flat_tgt = target.contiguous().view(flat.shape[:-1])
s = 0
while s < flat.size(1):
e = s + self.softmax_batch
yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False
s = e
def gather_target_probs(probs, target):
probs = probs.gather(
dim=2,
index=target.unsqueeze(-1),
)
return probs
orig_target = sample["target"]
# compute scores for each model in the ensemble
avg_probs = None
avg_attn = None
for model in models:
model.eval()
decoder_out = model(**net_input)
attn = decoder_out[1] if len(decoder_out) > 1 else None
if type(attn) is dict:
attn = attn.get("attn", None)
batched = batch_for_softmax(decoder_out, orig_target)
probs, idx = None, 0
for bd, tgt, is_single in batched:
sample["target"] = tgt
curr_prob = model.get_normalized_probs(
bd, log_probs=len(models) == 1, sample=sample
).data
if is_single:
probs = gather_target_probs(curr_prob, orig_target)
else:
if probs is None:
probs = curr_prob.new(orig_target.numel())
step = curr_prob.size(0) * curr_prob.size(1)
end = step + idx
tgt_probs = gather_target_probs(
curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt
)
probs[idx:end] = tgt_probs.view(-1)
idx = end
sample["target"] = orig_target
probs = probs.view(sample["target"].shape)
if avg_probs is None:
avg_probs = probs
else:
avg_probs.add_(probs)
if attn is not None:
if torch.is_tensor(attn):
attn = attn.data
else:
attn = attn[0]
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(models) > 1:
avg_probs.div_(len(models))
avg_probs.log_()
if avg_attn is not None:
avg_attn.div_(len(models))
bsz = avg_probs.size(0)
hypos = []
start_idxs = sample["start_indices"] if "start_indices" in sample else [0] * bsz
for i in range(bsz):
# remove padding from ref
ref = (
utils.strip_pad(sample["target"][i, start_idxs[i] :], self.pad)
if sample["target"] is not None
else None
)
tgt_len = ref.numel()
avg_probs_i = avg_probs[i][start_idxs[i] : start_idxs[i] + tgt_len]
score_i = avg_probs_i.sum() / tgt_len
if avg_attn is not None:
avg_attn_i = avg_attn[i]
if self.compute_alignment:
alignment = utils.extract_hard_alignment(
avg_attn_i,
sample["net_input"]["src_tokens"][i],
sample["target"][i],
self.pad,
self.eos,
)
else:
alignment = None
else:
avg_attn_i = alignment = None
hypos.append(
[
{
"tokens": ref,
"score": score_i,
"attention": avg_attn_i,
"alignment": alignment,
"positional_scores": avg_probs_i,
}
]
)
return hypos
| 5,450 | 34.396104 | 101 | py |
null | DA-Transformer-main/fairseq/speech_generator.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
from fairseq.data.audio.speech_to_text_dataset import S2TDataConfig
class SpeechGenerator(object):
def __init__(self, model, vocoder, data_cfg: S2TDataConfig):
self.model = model
self.vocoder = vocoder
stats_npz_path = data_cfg.global_cmvn_stats_npz
self.gcmvn_stats = None
if stats_npz_path is not None:
self.gcmvn_stats = np.load(stats_npz_path)
def gcmvn_denormalize(self, x):
# x: B x T x C
if self.gcmvn_stats is None:
return x
mean = torch.from_numpy(self.gcmvn_stats["mean"]).to(x)
std = torch.from_numpy(self.gcmvn_stats["std"]).to(x)
assert len(x.shape) == 3 and mean.shape[0] == std.shape[0] == x.shape[2]
x = x * std.view(1, 1, -1).expand_as(x)
return x + mean.view(1, 1, -1).expand_as(x)
def get_waveform(self, feat):
# T x C -> T
return None if self.vocoder is None else self.vocoder(feat).squeeze(0)
class AutoRegressiveSpeechGenerator(SpeechGenerator):
def __init__(
self,
model,
vocoder,
data_cfg,
max_iter: int = 6000,
eos_prob_threshold: float = 0.5,
):
super().__init__(model, vocoder, data_cfg)
self.max_iter = max_iter
self.eos_prob_threshold = eos_prob_threshold
@torch.no_grad()
def generate(self, model, sample, has_targ=False, **kwargs):
model.eval()
src_tokens = sample["net_input"]["src_tokens"]
src_lengths = sample["net_input"]["src_lengths"]
bsz, src_len = src_tokens.size()[:2]
n_frames_per_step = model.decoder.n_frames_per_step
out_dim = model.decoder.out_dim
raw_dim = out_dim // n_frames_per_step
# initialize
encoder_out = model.forward_encoder(
src_tokens, src_lengths, speaker=sample["speaker"]
)
incremental_state = {}
feat, attn, eos_prob = [], [], []
finished = src_tokens.new_zeros((bsz,)).bool()
out_lens = src_lengths.new_zeros((bsz,)).long().fill_(self.max_iter)
prev_feat_out = encoder_out["encoder_out"][0].new_zeros(bsz, 1, out_dim)
for step in range(self.max_iter):
cur_out_lens = out_lens.clone()
cur_out_lens.masked_fill_(cur_out_lens.eq(self.max_iter), step + 1)
_, cur_eos_out, cur_extra = model.forward_decoder(
prev_feat_out,
encoder_out=encoder_out,
incremental_state=incremental_state,
target_lengths=cur_out_lens,
speaker=sample["speaker"],
**kwargs
)
cur_eos_prob = torch.sigmoid(cur_eos_out).squeeze(2)
feat.append(cur_extra["feature_out"])
attn.append(cur_extra["attn"])
eos_prob.append(cur_eos_prob)
cur_finished = cur_eos_prob.squeeze(1) > self.eos_prob_threshold
out_lens.masked_fill_((~finished) & cur_finished, step + 1)
finished = finished | cur_finished
if finished.sum().item() == bsz:
break
prev_feat_out = cur_extra["feature_out"]
feat = torch.cat(feat, dim=1)
feat = model.decoder.postnet(feat) + feat
eos_prob = torch.cat(eos_prob, dim=1)
attn = torch.cat(attn, dim=2)
alignment = attn.max(dim=1)[1]
feat = feat.reshape(bsz, -1, raw_dim)
feat = self.gcmvn_denormalize(feat)
eos_prob = eos_prob.repeat_interleave(n_frames_per_step, dim=1)
attn = attn.repeat_interleave(n_frames_per_step, dim=2)
alignment = alignment.repeat_interleave(n_frames_per_step, dim=1)
out_lens = out_lens * n_frames_per_step
finalized = [
{
"feature": feat[b, :out_len],
"eos_prob": eos_prob[b, :out_len],
"attn": attn[b, :, :out_len],
"alignment": alignment[b, :out_len],
"waveform": self.get_waveform(feat[b, :out_len]),
}
for b, out_len in zip(range(bsz), out_lens)
]
if has_targ:
assert sample["target"].size(-1) == out_dim
tgt_feats = sample["target"].view(bsz, -1, raw_dim)
tgt_feats = self.gcmvn_denormalize(tgt_feats)
tgt_lens = sample["target_lengths"] * n_frames_per_step
for b, (f, l) in enumerate(zip(tgt_feats, tgt_lens)):
finalized[b]["targ_feature"] = f[:l]
finalized[b]["targ_waveform"] = self.get_waveform(f[:l])
return finalized
class NonAutoregressiveSpeechGenerator(SpeechGenerator):
@torch.no_grad()
def generate(self, model, sample, has_targ=False, **kwargs):
model.eval()
bsz, max_src_len = sample["net_input"]["src_tokens"].size()
n_frames_per_step = model.encoder.n_frames_per_step
out_dim = model.encoder.out_dim
raw_dim = out_dim // n_frames_per_step
feat, feat_post, out_lens, log_dur_out, _, _ = model(
src_tokens=sample["net_input"]["src_tokens"],
src_lengths=sample["net_input"]["src_lengths"],
prev_output_tokens=sample["net_input"]["prev_output_tokens"],
incremental_state=None,
target_lengths=sample["target_lengths"],
speaker=sample["speaker"],
)
if feat_post is not None:
feat = feat_post
feat = feat.view(bsz, -1, raw_dim)
feat = self.gcmvn_denormalize(feat)
dur_out = torch.clamp(torch.round(torch.exp(log_dur_out) - 1).long(), min=0)
def get_dur_plot_data(d):
r = []
for i, dd in enumerate(d):
r += [i + 1] * dd.item()
return r
out_lens = out_lens * n_frames_per_step
finalized = [
{
"feature": feat[b, :l] if l > 0 else feat.new_zeros([1, raw_dim]),
"waveform": self.get_waveform(
feat[b, :l] if l > 0 else feat.new_zeros([1, raw_dim])
),
"attn": feat.new_tensor(get_dur_plot_data(dur_out[b])),
}
for b, l in zip(range(bsz), out_lens)
]
if has_targ:
tgt_feats = sample["target"].view(bsz, -1, raw_dim)
tgt_feats = self.gcmvn_denormalize(tgt_feats)
tgt_lens = sample["target_lengths"] * n_frames_per_step
for b, (f, l) in enumerate(zip(tgt_feats, tgt_lens)):
finalized[b]["targ_feature"] = f[:l]
finalized[b]["targ_waveform"] = self.get_waveform(f[:l])
return finalized
class TeacherForcingAutoRegressiveSpeechGenerator(AutoRegressiveSpeechGenerator):
@torch.no_grad()
def generate(self, model, sample, has_targ=False, **kwargs):
model.eval()
src_tokens = sample["net_input"]["src_tokens"]
src_lens = sample["net_input"]["src_lengths"]
prev_out_tokens = sample["net_input"]["prev_output_tokens"]
tgt_lens = sample["target_lengths"]
n_frames_per_step = model.decoder.n_frames_per_step
raw_dim = model.decoder.out_dim // n_frames_per_step
bsz = src_tokens.shape[0]
feat, eos_prob, extra = model(
src_tokens,
src_lens,
prev_out_tokens,
incremental_state=None,
target_lengths=tgt_lens,
speaker=sample["speaker"],
)
attn = extra["attn"] # B x T_s x T_t
alignment = attn.max(dim=1)[1]
feat = feat.reshape(bsz, -1, raw_dim)
feat = self.gcmvn_denormalize(feat)
eos_prob = eos_prob.repeat_interleave(n_frames_per_step, dim=1)
attn = attn.repeat_interleave(n_frames_per_step, dim=2)
alignment = alignment.repeat_interleave(n_frames_per_step, dim=1)
tgt_lens = sample["target_lengths"] * n_frames_per_step
finalized = [
{
"feature": feat[b, :tgt_len],
"eos_prob": eos_prob[b, :tgt_len],
"attn": attn[b, :, :tgt_len],
"alignment": alignment[b, :tgt_len],
"waveform": self.get_waveform(feat[b, :tgt_len]),
}
for b, tgt_len in zip(range(bsz), tgt_lens)
]
if has_targ:
tgt_feats = sample["target"].view(bsz, -1, raw_dim)
tgt_feats = self.gcmvn_denormalize(tgt_feats)
for b, (f, l) in enumerate(zip(tgt_feats, tgt_lens)):
finalized[b]["targ_feature"] = f[:l]
finalized[b]["targ_waveform"] = self.get_waveform(f[:l])
return finalized
| 8,840 | 37.107759 | 84 | py |
null | DA-Transformer-main/fairseq/token_generation_constraints.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Implements tracking of constraints for a beam item.
A list of constraints is given as a list of one or more token
sequences, each of length at least one token. For example, for an input sentence
> Die maschinelle Übersetzung ist schwer zu kontrollieren.
We could have the constraints:
* to influence
* hard
There are two implementations:
* OrderedConstraintState: Tracks progress through an ordered list of multitoken constraints.
* UnorderedConstraintState: Tracks progress through an unordered list of multitoken constraints.
The difference is that in the first, the constraints are assumed to be
in order; the algorithm will permit zero or more tokens between them.
In the second, the constraints are not ordered, so many orderings will
be explored.
The same sequence can be present any number of times, and will appear
that many times in the output.
"""
from collections import Counter
from typing import List, Optional, Set, Tuple
import torch
class ConstraintState:
def __init__(self):
pass
def pack_constraints(batch_constraints: List[List[torch.Tensor]]) -> torch.Tensor:
"""Takes a list of list of constraints in tensor form (a list of
tensor constraints for each sentence) and transforms it into a
packed Tensor. For example, here is a batch of size 3 with 3, 0,
and 1 constraints:
[ [ [3 1 2], [3], [4 5 6 7], ]
[],
[ [1 8 9 10 1 4 11 12], ]
]
Its corresponding packed structure is:
[ [ 3 3 1 2 0 3 0 4 5 6 7 0],
[ 0 0 0 0 0 0 0 0 0 0 0 0],
[ 1 1 8 9 10 1 4 11 12 0 0 0] ]
The packed tensor has shape (batch size, maxlen), where
maxlen is defined below. Each row contains concatenated
constraint tokens for that sentence, with 0 appended after
each constraint. The first item in each row is the number
of constraints for that sentence. So maxlen is the maximum
of
(number of constraints) + (sum length of constraints) + 1.
across all sentences in the batch.
"""
# The maximum word length of concatenated constraints for any sentence
max_constraints_len = 1
for sentence_constraints in batch_constraints:
if len(sentence_constraints):
# number of constraints, plus sum of constrain lens, plus a zero after each
constraints_len = (
1
+ sum([c.size(0) for c in sentence_constraints])
+ len(sentence_constraints)
)
max_constraints_len = max(max_constraints_len, constraints_len)
batch_size = len(batch_constraints)
constraints_tensor = torch.zeros((batch_size, max_constraints_len)).long()
for i, sentence_constraints in enumerate(batch_constraints):
constraints_tensor[i, 0] = len(sentence_constraints)
offset = 1
for j, constraint in enumerate(sentence_constraints):
this_len = constraint.size(0)
constraints_tensor[i, offset : offset + this_len] = constraint
offset += this_len + 1
return constraints_tensor.long()
def unpack_constraints(constraint_tensor: torch.Tensor) -> List[torch.Tensor]:
"""
Transforms *one row* of a packed constraint tensor (e.g., for one
sentence in the batch) into a list of constraint tensors.
"""
constraint_list = []
num_constraints = constraint_tensor[0]
constraints = constraint_tensor.tolist()
offset = 1
for i in range(num_constraints):
where = constraints.index(0, offset)
constraint_list.append(constraint_tensor[offset:where])
offset = where + 1
return constraint_list
class ConstraintNode:
"""
Represents a node in a trie managing unordered constraints.
"""
def __init__(self, token: int = None, parent=None):
# The token associate with this node (None for the root)
self.token = int(token) if token is not None else None
# The parent (None at the root)
self.parent = parent
# Whether this node is a completed constraint
self.terminal = 0
# List of child nodes
self.children = {}
# The cumulative number of constraints from this point in the
# trie forward
self.num_constraints = 0
@property
def id(self):
return self.token
def __str__(self):
term = self.terminal != 0
return f"[{self.token}].{term}#{self.num_constraints}"
def __getitem__(self, key: int):
return self.children.get(key, None)
def next_tokens(self) -> Set[int]:
"""The set of child labels."""
return set(self.children.keys())
@staticmethod
def create(constraints: List[List[int]]):
root = ConstraintNode()
for sequence in constraints:
root.add_sequence(sequence)
return root
@staticmethod
def print_graph(node: "ConstraintNode"):
if len(node.children) == 0:
return str(node)
else:
s = f"({node}"
for child in node.children.values():
s += " " + ConstraintNode.print_graph(child)
s += ")"
return s
def token_counts(self) -> Counter:
"""Returns a counter of the number of times each token is used
in a constraint.
"""
token_counts = Counter()
kids = list(self.children.values())
while len(kids) > 0:
kid = kids.pop()
token_counts[kid.id] += kid.num_constraints
kids += list(kid.children.values())
return token_counts
def tokens(self) -> Set[int]:
"""Returns the set of tokens in constraints."""
return set(self.token_counts().keys())
def add_sequence(self, sequence: List[int]):
"""Adds a constraint, represented as a list of integers, to
the trie."""
assert len(sequence) > 0
token = int(sequence[0])
if token not in self.children:
self.children[token] = ConstraintNode(token, parent=self)
node = self.children[token]
if len(sequence) == 1:
node.terminal += 1
node.num_constraints += 1
parent = node.parent
while parent is not None:
parent.num_constraints += 1
parent = parent.parent
else:
node.add_sequence(sequence[1:])
class UnorderedConstraintState(ConstraintState):
"""
Records progress through the set of constraints for each item in the beam
using a trie.
"""
def __init__(self, node: ConstraintNode, copy_from: "ConstraintState" = None):
self.node = node
if copy_from is None:
# The root node
self.root = node
# The set of states in the graph that have been completed
self.completed = Counter()
# The...
self.generated = Counter()
# The list of tokens we need to generate
self.needed_tokens = self.root.tokens()
else:
self.completed = Counter(copy_from.completed)
self.generated = Counter(copy_from.generated)
self.root = copy_from.root
# Mark the node as generated
if self.node != self.root:
self.generated[node] += 1
@staticmethod
def create(constraint_tensor: torch.Tensor):
constraint_list = unpack_constraints(constraint_tensor)
constraint_trie_root = ConstraintNode.create(constraint_list)
return UnorderedConstraintState(constraint_trie_root)
def __str__(self):
gen_str = ",".join([str(node) for node in self.generated])
return f"{self.name}/{self.bank}({gen_str})x{self.num_completed}"
def __copy__(self):
copied_state = UnorderedConstraintState(self.node, copy_from=self)
return copied_state
def copy(self):
return self.__copy__()
@property
def name(self):
if self.node.id is None:
return "ROOT"
else:
return str(self.node.id)
@property
def is_root(self):
return self.node == self.root
@property
def bank(self):
return sum(self.generated.values())
@property
def num_completed(self):
"""The number of constraints (not constraint tokens) that are completed.
In addition to the already-completed states, we need to account for the
current state, which might get marked as completed when another token
is generated.
"""
in_final = self.node.terminal and self.completed[self.node] < self.node.terminal
return sum(self.completed.values()) + in_final
@property
def finished(self):
return self.root.num_constraints - self.num_completed == 0
@property
def token_counts(self):
return self.root.token_counts()
@property
def tokens(self):
return self.root.tokens()
@property
def num_constraint_tokens(self):
return sum(self.token_counts.values())
def next_tokens(self) -> Set[int]:
"""Returns the list of tokens that could come next.
These are (a) all tokens extending the root state and, for
non-root states, additionally all tokens extending the current
state."""
if self.node != self.root:
return self.root.next_tokens().union(self.node.next_tokens())
else:
return self.root.next_tokens()
def advance(self, token: int):
"""Reads in a token and advances the state. Here's how it works.
We can advance to the next state if:
- there is a matching child
- its path isn't blocked
A path is blocked when all constraints that are descendants of
that node have already been generated, in the current state.
If we are not able to advance from the current state, we "fall
off the graph" and return to the root state. There, we again
try to advance, checking the same criteria.
In any case, when falling off the graph, we need to do some
bookkeeping. We:
- check whether any constraints were met (all prefixes of
current state)
- if one is found, mark it as completed
- adjust visited nodes accordingly
"""
token = int(token)
next_state = None
child = self.node[token]
if child is not None and self.generated[child] < child.num_constraints:
next_state = UnorderedConstraintState(child, copy_from=self)
def rewind():
"""If we're mid-trie and an "illegal" token is chosen next, we need
to reset our state to the root state. However, along the way, we need
to check whether a prefix of the current trie state represents a state
we could mark as completed.
"""
node = self.node
while node != self.root:
if node.terminal and self.completed[node] < node.terminal:
next_state.completed[node] += 1
return
next_state.generated[node] -= 1
node = node.parent
# Fall off the graph, check the root
if next_state is None and token in self.root.next_tokens():
child = self.root[token]
# We can only traverse this edge if it's not saturated
if self.generated[child] < child.num_constraints:
next_state = UnorderedConstraintState(child, copy_from=self)
else:
next_state = UnorderedConstraintState(self.root, copy_from=self)
# Rewind
rewind()
elif next_state is None:
next_state = UnorderedConstraintState(self.root, copy_from=self)
# Rewind
rewind()
return next_state
class ConstraintSequence:
def __init__(self, sequences: List[List[int]]):
"""Represents a set of possibly multitoken constraints by
concatenating them and internally recording the end points.
"""
self.sequences = []
self.endpoints = []
self.num_tokens = 0
self.tokens = set()
for sequence in sequences:
for token in sequence:
self.tokens.add(token)
self.num_tokens += len(sequence)
self.endpoints += [False for x in range(len(sequence) - 1)] + [True]
self.sequences += sequence
def __getitem__(self, key: int):
return self.sequences[key]
def __len__(self):
return len(self.sequences)
def __str__(self):
return str(self.sequences)
class OrderedConstraintState(ConstraintState):
"""
Records progress through the set of linear nonbranching constraints with gaps.
"""
def __init__(self, sequence: ConstraintSequence, state: int = -1):
self.sequence = sequence
self.state = state
@staticmethod
def create(constraint_tensor: torch.Tensor):
constraint_list = unpack_constraints(constraint_tensor)
return OrderedConstraintState(ConstraintSequence(constraint_list), -1)
def __str__(self):
return f"{self.state}/{self.bank}x{self.num_completed}"
def __copy__(self):
return OrderedConstraintState(self.sequence, self.state)
def copy(self):
return self.__copy__()
@property
def num_completed(self):
if self.state == -1:
return 0
count = len(
list(filter(lambda x: x, self.sequence.endpoints[0 : self.state + 1]))
)
return count
@property
def is_root(self):
return self.state == -1
@property
def name(self):
if self.state == -1:
return "ROOT"
else:
return str(self.sequence[self.state])
@property
def bank(self) -> int:
return self.state + 1
@property
def finished(self):
return self.state + 1 == len(self.sequence)
@property
def token_counts(self):
return self.sequence.token_counts()
@property
def tokens(self):
return self.sequence.tokens
@property
def num_constraint_tokens(self):
return sum(self.token_counts.values())
def next_tokens(self) -> Set[int]:
"""Returns the list of tokens that could come next.
These are (a) all tokens extending the root state and, for
non-root states, additionally all tokens extending the current
state."""
tokens = set()
if self.state > 0:
tokens.add(self.sequence[0])
if not self.finished:
tokens.add(self.sequence[self.state + 1])
return tokens
def advance(self, token: int):
"""Reads in a token and advances the state. Here's how it works.
We can advance to the next state if:
- there is a matching child
- its path isn't blocked
A path is blocked when all constraints that are descendants of
that node have already been generated, in the current state.
If we are not able to advance from the current state, we "fall
off the graph" and return to the root state. There, we again
try to advance, checking the same criteria.
In any case, when falling off the graph, we need to do some
bookkeeping. We:
- check whether any constraints were met (all prefixes of
current state)
- if one is found, mark it as completed
- adjust visited nodes accordingly
"""
token = int(token)
# print(f"{self} ADVANCE({token}) {self.sequence} -> ", end="")
if self.finished:
# Accept anything
next_state = self.copy()
elif self.sequence[self.state + 1] == token:
# Advance to the next token
next_state = OrderedConstraintState(self.sequence, self.state + 1)
elif self.sequence.endpoints[self.state]:
# Accept anything between constraints (*)
next_state = self.copy()
elif token == self.sequence[0]:
# Start over having generated the first token
next_state = OrderedConstraintState(self.sequence, 0)
else:
# Start over from the root
next_state = OrderedConstraintState(self.sequence, -1)
return next_state
| 16,555 | 31.654832 | 96 | py |
null | DA-Transformer-main/fairseq/tokenizer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
SPACE_NORMALIZER = re.compile(r"\s+")
def tokenize_line(line):
line = SPACE_NORMALIZER.sub(" ", line)
line = line.strip()
return line.split()
| 346 | 20.6875 | 65 | py |
null | DA-Transformer-main/fairseq/trainer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a network across multiple GPUs.
"""
import contextlib
import logging
import os
import sys
import time
from argparse import Namespace
from itertools import chain
from typing import Any, Dict, List
import torch
from omegaconf import OmegaConf
from fairseq import checkpoint_utils, models, optim, utils
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.distributed import utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.logging import meters, metrics
from fairseq.models.ema import build_ema
from fairseq.nan_detector import NanDetector
from fairseq.optim import lr_scheduler
from fairseq.utils import safe_hasattr
logger = logging.getLogger(__name__)
class Trainer(object):
"""Main class for data parallel training.
This class supports synchronous distributed data parallel training,
where multiple workers each have a full model replica and gradients
are accumulated across workers before each update. We use
:class:`~torch.nn.parallel.DistributedDataParallel` to handle
communication of the gradients across workers.
"""
def __init__(self, cfg: FairseqConfig, task, model, criterion, quantizer=None):
if isinstance(cfg, Namespace):
logger.warning(
"argparse.Namespace configuration is deprecated! Automatically converting to OmegaConf"
)
cfg = convert_namespace_to_omegaconf(cfg)
self.cfg = cfg
self.task = task
# catalog shared parameters
shared_params = _catalog_shared_params(model)
self.tpu = cfg.common.tpu
self.cuda = torch.cuda.is_available() and not cfg.common.cpu and not self.tpu
if self.cuda:
self.device = torch.device("cuda")
elif self.tpu:
self.device = utils.get_tpu_device()
else:
self.device = torch.device("cpu")
if self.is_fsdp:
import fairscale
if self.cfg.common.bf16:
raise ValueError(
"FullyShardedDataParallel is not compatible with --bf16 or "
"--memory-efficient-bf16"
)
if self.cfg.distributed_training.zero_sharding != "none":
raise ValueError(
"FullyShardedDataParallel is not compatible with --zero-sharding "
"option (it's already built in)"
)
if (
max(self.cfg.optimization.update_freq) > 1
and fairscale.__version__ < "0.4.0"
):
raise RuntimeError(
"Please update to fairscale 0.4.0 or newer when combining "
"--update-freq with FullyShardedDataParallel"
)
else:
if (
hasattr(self.cfg.distributed_training, "cpu_offload")
and self.cfg.distributed_training.cpu_offload
):
raise ValueError("--cpu-offload requires --ddp-backend=fully_sharded")
# copy model and criterion to current device/dtype
self._criterion = criterion
self._model = model
if not self.is_fsdp:
if cfg.common.fp16:
assert not cfg.common.amp, "Cannot use fp16 and AMP together"
self._criterion = self._criterion.half()
self._model = self._model.half()
elif cfg.common.bf16:
self._criterion = self._criterion.to(dtype=torch.bfloat16)
self._model = self._model.to(dtype=torch.bfloat16)
elif cfg.common.amp:
self._amp_retries = 0
if (
not cfg.distributed_training.pipeline_model_parallel
# the DistributedFairseqModel wrapper will handle moving to device,
# so only handle cases which don't use the wrapper
and not self.use_distributed_wrapper
):
self._criterion = self._criterion.to(device=self.device)
self._model = self._model.to(device=self.device)
self.pipeline_model_parallel = cfg.distributed_training.pipeline_model_parallel
self.last_device = None
if self.cuda and self.pipeline_model_parallel:
self.last_device = torch.device(
cfg.distributed_training.pipeline_devices[-1]
)
# check that shared parameters are preserved after device transfer
for shared_param in shared_params:
ref = _get_module_by_path(self._model, shared_param[0])
for path in shared_param[1:]:
logger.info(
"detected shared parameter: {} <- {}".format(shared_param[0], path)
)
_set_module_by_path(self._model, path, ref)
self._dummy_batch = None # indicates we don't have a dummy batch at first
self._lr_scheduler = None
self._num_updates = 0
self._num_xla_compiles = 0 # for TPUs
self._optim_history = None
self._optimizer = None
self._warn_once = set()
self._wrapped_criterion = None
self._wrapped_model = None
self._ema = None
# TODO(myleott): support tpu
if self.cuda and self.data_parallel_world_size > 1:
self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size)
else:
self._grad_norm_buf = None
self.quantizer = quantizer
if self.quantizer is not None:
self.quantizer.set_trainer(self)
# get detailed cuda environment
if self.cuda:
self.cuda_env = utils.CudaEnvironment()
if self.data_parallel_world_size > 1:
self.cuda_env_arr = distributed_utils.all_gather_list(
self.cuda_env, group=distributed_utils.get_global_group()
)
else:
self.cuda_env_arr = [self.cuda_env]
if self.data_parallel_rank == 0:
utils.CudaEnvironment.pretty_print_cuda_env_list(self.cuda_env_arr)
else:
self.cuda_env = None
self.cuda_env_arr = None
metrics.log_start_time("wall", priority=790, round=0)
self._start_time = time.time()
self._previous_training_time = 0
self._cumulative_training_time = None
def reinitialize(self):
"""Reinitialize the Trainer, typically after model params change."""
self._lr_scheduler = None
self._optimizer = None
self._wrapped_criterion = None
self._wrapped_model = None
@property
def data_parallel_world_size(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 1
return distributed_utils.get_data_parallel_world_size()
@property
def data_parallel_process_group(self):
return distributed_utils.get_data_parallel_group()
@property
def data_parallel_rank(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 0
return distributed_utils.get_data_parallel_rank()
@property
def is_data_parallel_master(self):
# NOTE: this returns true for all model parallel replicas with data
# parallel rank 0
return self.data_parallel_rank == 0
@property
def use_distributed_wrapper(self) -> bool:
return (
self.data_parallel_world_size > 1 and not self.cfg.optimization.use_bmuf
) or (self.is_fsdp and self.cfg.distributed_training.cpu_offload)
@property
def should_save_checkpoint_on_current_rank(self) -> bool:
"""Indicates whether to save checkpoints on the current DDP rank."""
if (
self.is_fsdp and self.cfg.distributed_training.use_sharded_state
) or getattr(self.cfg.model, "base_layers", 0) > 0:
return True
else:
return self.is_data_parallel_master
@property
def always_call_state_dict_during_save_checkpoint(self) -> bool:
if self.is_fsdp and not self.cfg.distributed_training.use_sharded_state:
# FSDP calls communication collective when consolidating checkpoints
return True
else:
return False
@property
def checkpoint_suffix(self) -> str:
"""Suffix to add to the checkpoint file name."""
if self.is_fsdp and self.cfg.distributed_training.use_sharded_state:
return self.cfg.checkpoint.checkpoint_suffix + "-shard{0}".format(
self.data_parallel_rank
)
else:
return self.cfg.checkpoint.checkpoint_suffix or ""
@property
def criterion(self):
if self._wrapped_criterion is None:
if utils.has_parameters(self._criterion) and self.use_distributed_wrapper:
self._wrapped_criterion = models.DistributedFairseqModel(
self.cfg.distributed_training,
self._criterion,
process_group=self.data_parallel_process_group,
device=self.device,
)
else:
self._wrapped_criterion = self._criterion
return self._wrapped_criterion
@property
def model(self):
if self._wrapped_model is None:
if self.use_distributed_wrapper:
self._wrapped_model = models.DistributedFairseqModel(
self.cfg.distributed_training,
self._model,
process_group=self.data_parallel_process_group,
device=self.device,
)
else:
self._wrapped_model = self._model
return self._wrapped_model
@property
def ema(self):
if self._ema is None:
self._build_ema()
return self._ema
def _build_ema(self):
if self.cfg.ema.store_ema:
self._ema = build_ema(self._model, self.cfg.ema, self.device)
logger.info("Exponential Moving Average Shadow Model is initialized.")
@property
def optimizer(self):
if self._optimizer is None:
self._build_optimizer()
return self._optimizer
@property
def lr_scheduler(self):
if self._lr_scheduler is None:
self._build_optimizer() # this will initialize self._lr_scheduler
return self._lr_scheduler
def _build_optimizer(self):
params = list(
filter(
lambda p: p.requires_grad,
chain(self.model.parameters(), self.criterion.parameters()),
)
)
if self.is_fsdp and self.cfg.common.fp16:
# FullyShardedDataParallel always uses MemoryEfficientFP16 wrapper,
# mostly for the grad scaling. But if we don't have the
# --memory-efficient-fp16 flag set, then we're effectively doing
# regular --fp16 and can allow the use of optimizers that would
# otherwise be unsupported by MemoryEfficientFP16Optimizer.
allow_unsupported = not self.cfg.common.memory_efficient_fp16
self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(
self.cfg, params, allow_unsupported=allow_unsupported
)
elif self.cfg.common.fp16 or self.cfg.common.bf16 or self.cfg.common.amp:
if self.cuda and torch.cuda.get_device_capability(0)[0] < 7:
logger.info(
"NOTE: your device does NOT support faster training with --fp16 or --amp, "
"please switch to FP32 which is likely to be faster"
)
if (
self.cfg.common.memory_efficient_fp16
or self.cfg.common.memory_efficient_bf16
):
self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(
self.cfg, params
)
elif self.cfg.common.amp:
self._optimizer = optim.AMPOptimizer.build_optimizer(self.cfg, params)
else:
self._optimizer = optim.FP16Optimizer.build_optimizer(self.cfg, params)
else:
if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7:
logger.info(
"NOTE: your device may support faster training with --fp16 or --amp"
)
self._optimizer = optim.build_optimizer(self.cfg.optimizer, params)
if self.is_fsdp:
assert (
not self.cfg.optimization.use_bmuf
), "--ddp-backend=fully_sharded is not compatible with BMUF"
assert self._optimizer.supports_flat_params, (
"--ddp-backend=fully_sharded is only compatible with pointwise "
"optimizers (e.g., Adam, AdamW, Adadelta, Adamax, SGD, etc.). "
"However, the sharding will result in slightly different results when "
"using non-pointwise optimizers (e.g., Adagrad, Adafactor, LAMB)"
)
if self.cfg.optimization.use_bmuf:
self._optimizer = optim.FairseqBMUF(
self.cfg.bmuf,
self._optimizer,
)
if self.cfg.distributed_training.zero_sharding == "os":
if (
self.cfg.common.fp16
and not self.cfg.common.memory_efficient_fp16
and not self.cfg.common.memory_efficient_bf16
) and not self.cfg.common.fp16_no_flatten_grads:
raise ValueError(
"ZeRO is incomptabile with fp16 and flattened grads. "
"Please use --fp16-no-flatten-grads"
)
else:
optim.shard_(self._optimizer, self.data_parallel_process_group)
# We should initialize the learning rate scheduler immediately after
# building the optimizer, so that the initial learning rate is set.
self._lr_scheduler = lr_scheduler.build_lr_scheduler(
self.cfg.lr_scheduler,
self.optimizer,
)
self._lr_scheduler.step_update(0)
@property
def is_fsdp(self):
return self.cfg.distributed_training.ddp_backend == "fully_sharded"
def consolidate_optimizer(self):
"""For OSS, we need to consolidate the state dict."""
if self.cfg.checkpoint.no_save_optimizer_state:
return
self._gathered_optim_state = None
if hasattr(self.optimizer.optimizer, "consolidate_state_dict"):
self.optimizer.optimizer.consolidate_state_dict()
elif self.is_fsdp and not self.model.use_sharded_state:
st = self.model.gather_full_optim_state_dict(
self.optimizer
) # only returns on rank 0
self._gathered_optim_state = st
def state_dict(self):
state_dict = {
"args": None, # legacy
"cfg": (
OmegaConf.to_container(self.cfg, resolve=True, enum_to_str=True)
if OmegaConf.is_config(self.cfg)
else self.cfg
),
"model": self.model.state_dict(),
"criterion": (
self.criterion.state_dict()
if utils.has_parameters(self.criterion)
else None
),
"optimizer_history": (self._optim_history or [])
+ [
{
"criterion_name": self.get_criterion().__class__.__name__,
"optimizer_name": self.optimizer.__class__.__name__,
"lr_scheduler_state": self.lr_scheduler.state_dict(),
"num_updates": self.get_num_updates(),
}
],
"task_state": self.task.state_dict() if self.task is not None else {},
"extra_state": {
"metrics": metrics.state_dict(),
"previous_training_time": self.cumulative_training_time(),
},
}
if self.cfg.ema.store_ema:
# Save EMA model state as extra state
state_dict["extra_state"]["ema"] = self.ema.get_model().state_dict()
if self.cfg.ema.ema_fp32:
# Save EMA params in fp32
state_dict["extra_state"]["ema_fp32_params"] = self.ema.fp32_params
if not self.cfg.checkpoint.no_save_optimizer_state:
if self._gathered_optim_state is not None:
state_dict["last_optimizer_state"] = self._gathered_optim_state
self._gathered_optim_state = None
else:
state_dict["last_optimizer_state"] = self.optimizer.state_dict()
if self.is_fsdp:
# save meta data for recombining checkpoint upon loading
state_dict["fsdp_metadata"] = self.model.local_metadata_dict()
return state_dict
def save_checkpoint(self, filename, extra_state):
"""Save all training state in a checkpoint file."""
logger.info(f"Saving checkpoint to {os.path.abspath(filename)}")
# call state_dict on all ranks in case it needs internal communication
state_dict = utils.move_to_cpu(self.state_dict())
state_dict["extra_state"].update(extra_state)
if self.should_save_checkpoint_on_current_rank:
checkpoint_utils.torch_persistent_save(
state_dict,
filename,
async_write=self.cfg.checkpoint.write_checkpoints_asynchronously,
)
logger.info(f"Finished saving checkpoint to {os.path.abspath(filename)}")
def load_checkpoint(
self,
filename,
reset_optimizer=False,
reset_lr_scheduler=False,
optimizer_overrides=None,
reset_meters=False,
):
"""
Load all training state from a checkpoint file.
rank = 0 will load the checkpoint, and then broadcast it to all
other ranks.
"""
extra_state, self._optim_history, last_optim_state = None, [], None
logger.info(f"Preparing to load checkpoint {filename}")
is_distributed = self.data_parallel_world_size > 1
bexists = PathManager.isfile(filename)
if bexists:
load_on_all_ranks = (
self.cfg.checkpoint.load_checkpoint_on_all_dp_ranks
# TPUs don't support broadcast yet, so load checkpoints
# on every worker for now
or self.tpu
# FSDP requires loading checkpoint shards on all ranks
or (self.is_fsdp and self.cfg.distributed_training.use_sharded_state)
or getattr(self.cfg.model, "base_layers", 0) > 0
)
if load_on_all_ranks or self.data_parallel_rank == 0:
state = checkpoint_utils.load_checkpoint_to_cpu(
filename, load_on_all_ranks=load_on_all_ranks
)
last_optim_state = state.get("last_optimizer_state", None)
# If doing zero_sharding, do not broadcast global optimizer
# state. Later we will broadcast sharded states to each rank
# to avoid memory from exploding.
if (
not load_on_all_ranks
and self.cfg.distributed_training.zero_sharding == "os"
and "last_optimizer_state" in state
and is_distributed
):
state["last_optimizer_state"] = "SHARDED"
else:
last_optim_state = None
state = None
if is_distributed and not load_on_all_ranks:
state = distributed_utils.broadcast_object(
state,
src_rank=0,
group=self.data_parallel_process_group,
dist_device=self.device,
)
if self.data_parallel_rank > 0:
last_optim_state = state.get("last_optimizer_state", None)
# load model parameters
try:
if (
"optimizer_history" in state
and len(state["optimizer_history"]) > 0
and "num_updates" in state["optimizer_history"][-1]
):
self.model.set_num_updates(
state["optimizer_history"][-1]["num_updates"]
)
# this is the code related to AdaPrune
# In short, it removes redundant heads in multi-head attention module based on heads importance provided
# For more info, please refer to the paper: https://openreview.net/forum?id=_CMSV7FTzGI
# The idea of prune in mha can be summarized as
# Fine tune model (e.g. roberta encoder) on a certain datasets with regularization
# After the model is trained. User could use get_reserve_head_index and _adaptive_prune_heads functions to get the top X heads with most importance.
# Then user uses the rank to prune a new roberta encoder and save the pruned ckpt manually.
# User will fine tune the the new roberta encoder via the ckpt saved above
# To get rid of registering different pruned version of Roberta, I use the argument --mha-heads-to-keep to prune the Roberta model into a pruned version which matches the pruned ckpt.
if (
safe_hasattr(self.model, "args")
and safe_hasattr(self.model.args, "mha_heads_to_keep")
and self.model.args.mha_heads_to_keep != -1
):
logger.info(
f"Prune model: keep {self.model.args.mha_heads_to_keep} heads for each multihead attention module"
)
for layer in self.model.encoder.sentence_encoder.layers:
reserve_head_index = layer.self_attn._get_reserve_head_index(
num_heads_to_keep=self.model.args.mha_heads_to_keep
)
layer.self_attn._adaptive_prune_heads(
reserve_head_index=reserve_head_index
)
layer.self_attn._set_skip_embed_dim_check()
logger.info(self.model)
# this is the code related to AdaPrune
# In short, it removes redundant units in feedforward layer in each transformer layer based on importance
# For more info, please refer to the paper: https://openreview.net/forum?id=_CMSV7FTzGI
# The idea of prune in ffn can be summarized as
# Fine tune model (e.g. roberta encoder) on a certain datasets with regularization
# After the model is trained. User could use _get_fc_rank and _prune_fc_layer functions to get the top X units with most importance.
# Then user uses the rank to prune a new roberta encoder and save the pruned ckpt manually.
# User will fine tune the the new roberta encoder via the ckpt saved above
# To get rid of registering different pruned version of Roberta, I use the argument --ffn-blocks-to-remove to prune the Roberta model into a pruned version which matches the pruned ckpt.
if (
safe_hasattr(self.model, "args")
and safe_hasattr(self.model.args, "ffn_blocks_to_remove")
and self.model.args.ffn_blocks_to_remove != -1
):
logger.info(
f"Prune model: remove {self.model.args.ffn_blocks_to_remove} ffn blocks for each transformer layer"
)
for layer in self.model.encoder.sentence_encoder.layers:
remove_index = layer._get_fc_rank(
remove_num=self.model.args.ffn_blocks_to_remove
)
layer._prune_fc_layer(remove_index=remove_index)
logger.info(self.model)
self.model.load_state_dict(
state["model"], strict=True, model_cfg=self.cfg.model
)
# save memory for later steps
del state["model"]
if utils.has_parameters(self.get_criterion()):
self.get_criterion().load_state_dict(
state["criterion"], strict=True
)
del state["criterion"]
except Exception:
raise Exception(
"Cannot load model parameters from checkpoint {}; "
"please ensure that the architectures match.".format(filename)
)
extra_state = state["extra_state"]
self._optim_history = state["optimizer_history"]
if last_optim_state is not None and not reset_optimizer:
# rebuild optimizer after loading model, since params may have changed
self._build_optimizer()
# only reload optimizer and lr_scheduler if they match
last_optim = self._optim_history[-1]
assert (
last_optim["criterion_name"] == self.get_criterion().__class__.__name__
), f"Criterion does not match; please reset the optimizer (--reset-optimizer). {last_optim['criterion_name']} vs {self.get_criterion().__class__.__name__}"
assert (
last_optim["optimizer_name"] == self.optimizer.__class__.__name__
), f"Optimizer does not match; please reset the optimizer (--reset-optimizer). {last_optim['optimizer_name']} vs {self.optimizer.__class__.__name__}"
if not reset_lr_scheduler:
self.lr_scheduler.load_state_dict(last_optim["lr_scheduler_state"])
if self.is_fsdp and not self.model.use_sharded_state:
# if use_sharded_state, the last_optim_state is already sharded, skip this
last_optim_state = self.model.get_shard_from_optim_state_dict(
last_optim_state
)
elif not load_on_all_ranks and is_distributed:
last_optim_state = self.optimizer.broadcast_global_state_dict(
last_optim_state
)
self.optimizer.load_state_dict(last_optim_state, optimizer_overrides)
self.set_num_updates(last_optim["num_updates"])
if extra_state is not None:
itr_state = extra_state["train_iterator"]
epoch = itr_state["epoch"]
if "previous_training_time" in extra_state:
self._previous_training_time = extra_state["previous_training_time"]
self._start_time = time.time()
self.lr_step(epoch)
if (
itr_state.get("version", 1) >= 2
and itr_state["iterations_in_epoch"] == 0
):
# reset meters at start of epoch
reset_meters = True
if "metrics" in extra_state and not reset_meters:
metrics.load_state_dict(extra_state["metrics"])
# reset TimeMeters, since their start times don't make sense anymore
for meter in metrics.get_meters("default"):
if isinstance(meter, meters.TimeMeter):
meter.reset()
if self.cfg.ema.store_ema:
if "ema" not in extra_state:
logger.warn(
"EMA not found in checkpoint. But store_ema is True. "
"EMA is re-initialized from checkpoint."
)
self.ema.restore(
state["model"], build_fp32_params=self.cfg.ema.ema_fp32
)
else:
logger.info("Loading EMA from checkpoint")
self.ema.restore(extra_state["ema"], build_fp32_params=False)
if self.cfg.ema.ema_fp32:
if "ema_fp32_params" in extra_state:
logger.info("Loading EMA fp32 params from checkpoint")
self.ema.build_fp32_params(extra_state["ema_fp32_params"])
else:
logger.info(
"Building EMA fp32 params from EMA model in checkpoint"
)
self.ema.build_fp32_params()
logger.info(
"Loaded checkpoint {} (epoch {} @ {} updates)".format(
filename, epoch, self.get_num_updates()
)
)
else:
logger.info("No existing checkpoint found {}".format(filename))
return extra_state
def get_train_iterator(
self,
epoch,
combine=True,
load_dataset=True,
data_selector=None,
shard_batch_itr=True,
disable_iterator_cache=False,
):
"""Return an EpochBatchIterator over the training set for a given epoch."""
if load_dataset:
logger.info("loading train data for epoch {}".format(epoch))
self.task.load_dataset(
self.cfg.dataset.train_subset,
epoch=epoch,
combine=combine,
data_selector=data_selector,
tpu=self.tpu,
)
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.dataset(self.cfg.dataset.train_subset),
max_tokens=self.cfg.dataset.max_tokens,
max_sentences=self.cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
self.task.max_positions(),
self.model.max_positions(),
self.cfg.dataset.max_tokens,
),
ignore_invalid_inputs=True,
required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple,
seed=(self.cfg.common.seed + epoch)
if self.cfg.dataset.update_ordered_indices_seed
else self.cfg.common.seed,
num_shards=self.data_parallel_world_size if shard_batch_itr else 1,
shard_id=self.data_parallel_rank if shard_batch_itr else 0,
num_workers=self.cfg.dataset.num_workers,
epoch=epoch,
data_buffer_size=self.cfg.dataset.data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
skip_remainder_batch=self.cfg.optimization.skip_remainder_batch,
grouped_shuffling=self.cfg.dataset.grouped_shuffling,
update_epoch_batch_itr=self.cfg.dataset.update_epoch_batch_itr,
)
self.reset_dummy_batch(batch_iterator.first_batch)
return batch_iterator
def get_valid_iterator(
self,
subset,
disable_iterator_cache=False,
):
"""Return an EpochBatchIterator over given validation subset for a given epoch."""
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.dataset(subset),
max_tokens=self.cfg.dataset.max_tokens_valid,
max_sentences=self.cfg.dataset.batch_size_valid,
max_positions=utils.resolve_max_positions(
self.task.max_positions(),
self.model.max_positions(),
),
ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple,
seed=self.cfg.common.seed,
num_shards=self.data_parallel_world_size,
shard_id=self.data_parallel_rank,
num_workers=self.cfg.dataset.num_workers,
# always pass a fixed "epoch" to keep validation data consistent
# across training epochs
epoch=1,
data_buffer_size=self.cfg.dataset.data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
skip_remainder_batch=False,
)
self.reset_dummy_batch(batch_iterator.first_batch)
return batch_iterator
def begin_epoch(self, epoch):
"""Called at the beginning of each epoch."""
logger.info("begin training epoch {}".format(epoch))
self.lr_step_begin_epoch(epoch)
if self.quantizer is not None:
self.quantizer.begin_epoch(epoch)
# task specific setup per epoch
self.task.begin_epoch(epoch, self.get_model())
if self.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous("begin_epoch") # wait for all workers
xm.mark_step()
def begin_valid_epoch(self, epoch):
"""Called at the beginning of each validation epoch."""
# task specific setup per validation epoch
self.task.begin_valid_epoch(epoch, self.get_model())
def _split_batch(self, samples):
# split samples
if self.cfg.optimization.batch_split_by_src > 0:
new_samples = []
for sample in samples:
if 'net_input' not in sample:
new_samples.append(sample)
continue
batch_size, srclen = sample['net_input']['src_tokens'].shape
tgtlen = sample['target'].shape[1]
# split_num = bisect_right(self.cfg.optimization.split_batch_by_length, length)
split_num = batch_size * srclen // self.cfg.optimization.batch_split_by_src + 1
# print(split_num, srclen, tgtlen, file=sys.stderr, flush=True)
if split_num == 1:
new_samples.append(sample)
else:
new_samples += [{"net_input":{}} for _ in range(split_num)]
# sample id an target
for key, value in sample.items():
if torch.is_tensor(value):
chunks = torch.chunk(value, split_num)
for i, chunk in enumerate(chunks):
new_samples[-split_num + i][key] = chunk
# tensor in net_input
for key, value in sample['net_input'].items():
if torch.is_tensor(value):
chunks = torch.chunk(value, split_num)
for i, chunk in enumerate(chunks):
new_samples[-split_num + i]['net_input'][key] = chunk
else:
for i in range(split_num):
new_samples[-split_num + i]['net_input'][key] = value
# nsentences, ntokens
for i in range(split_num):
new_samples[-split_num + i]['nsentences'] = new_samples[-split_num + i]['id'].shape[0]
new_samples[-split_num + i]['ntokens'] = (new_samples[-split_num + i]['target'] != self.task.tgt_dict.pad()).sum().tolist()
return new_samples
return samples
def reset_dummy_batch(self, batch):
samples = self._split_batch([batch])
self._dummy_batch = samples[0]
@metrics.aggregate("train")
def train_step(self, samples, raise_oom=False):
"""Do forward, backward and parameter update."""
self._set_seed()
self.model.train()
self.criterion.train()
self.zero_grad()
metrics.log_start_time("train_wall", priority=800, round=0)
# If EMA is enabled through store_ema=True
# and task.uses_ema is True, pass the EMA model as a keyword
# argument to the task.
extra_kwargs = {}
if self.cfg.ema.store_ema and getattr(self.task, "uses_ema", False):
extra_kwargs["ema_model"] = self.ema.get_model()
# forward and backward pass
logging_outputs, sample_size, ooms = [], 0, 0
samples = self._split_batch(samples)
for i, sample in enumerate(samples): # delayed update loop
sample, is_dummy_batch = self._prepare_sample(sample)
def maybe_no_sync():
"""
Whenever *samples* contains more than one mini-batch, we
want to accumulate gradients locally and only call
all-reduce in the last backwards pass.
"""
if (
self.data_parallel_world_size > 1
and hasattr(self.model, "no_sync")
and i < len(samples) - 1
# The no_sync context manager results in increased memory
# usage with FSDP, since full-size gradients will be
# accumulated on each GPU. It's typically a better tradeoff
# to do the extra communication with FSDP.
and not self.is_fsdp
):
return self.model.no_sync()
else:
return contextlib.ExitStack() # dummy contextmanager
try:
with maybe_no_sync():
# forward and backward
loss, sample_size_i, logging_output = self.task.train_step(
sample=sample,
model=self.model,
criterion=self.criterion,
optimizer=self.optimizer,
update_num=self.get_num_updates(),
ignore_grad=is_dummy_batch,
**extra_kwargs,
)
del loss
logging_outputs.append(logging_output)
sample_size += sample_size_i
# emptying the CUDA cache after the first step can
# reduce the chance of OOM
if self.cuda and self.get_num_updates() == 0:
torch.cuda.empty_cache()
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
if raise_oom:
raise e
logger.warning(
"attempting to recover from OOM in forward/backward pass"
)
ooms += 1
self.zero_grad()
if self.cuda:
torch.cuda.empty_cache()
if self.cfg.distributed_training.distributed_world_size == 1:
return None
else:
raise e
except Exception:
self.consolidate_optimizer()
self.save_checkpoint(
os.path.join(self.cfg.checkpoint.save_dir, "crash.pt"), {}
)
raise
if self.tpu and i < len(samples) - 1:
# tpu-comment: every XLA operation before marking step is
# appended to the IR graph, and processing too many batches
# before marking step can lead to OOM errors.
# To handle gradient accumulation use case, we explicitly
# mark step here for every forward pass without a backward pass
self._xla_markstep_and_send_to_cpu()
if is_dummy_batch:
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.0
if torch.is_tensor(sample_size):
sample_size = sample_size.float()
else:
sample_size = float(sample_size)
# gather logging outputs from all replicas
if self._sync_stats():
train_time = self._local_cumulative_training_time()
(
logging_outputs,
(
sample_size,
ooms,
total_train_time,
),
) = self._aggregate_logging_outputs(
logging_outputs, sample_size, ooms, train_time, ignore=is_dummy_batch
)
self._cumulative_training_time = (
total_train_time / self.data_parallel_world_size
)
overflow = False
try:
with torch.autograd.profiler.record_function("reduce-grads"):
# reduce gradients across workers
self.optimizer.all_reduce_grads(self.model)
if utils.has_parameters(self.criterion):
self.optimizer.all_reduce_grads(self.criterion)
with torch.autograd.profiler.record_function("multiply-grads"):
# multiply gradients by (data_parallel_size / sample_size) since
# DDP normalizes by the number of data parallel workers for
# improved fp16 precision.
# Thus we get (sum_of_gradients / sample_size) at the end.
# In case of fp16, this step also undoes loss scaling.
# (Debugging note: Some optimizers perform this scaling on the
# fly, so inspecting model.parameters() or optimizer.params may
# still show the original, unscaled gradients.)
numer = (
self.data_parallel_world_size
if not self.cfg.optimization.use_bmuf or self._sync_stats()
else 1
)
self.optimizer.multiply_grads(numer / (sample_size or 1.0))
# Note: (sample_size or 1.0) handles the case of a zero gradient, in a
# way that avoids CPU/device transfers in case sample_size is a GPU or
# TPU object. The assumption is that the gradient itself is also 0.
with torch.autograd.profiler.record_function("clip-grads"):
# clip grads
grad_norm = self.clip_grad_norm(self.cfg.optimization.clip_norm)
# check that grad norms are consistent across workers
# on tpu check tensor is slow
if not self.tpu:
if (
not self.cfg.optimization.use_bmuf
and self.cfg.distributed_training.ddp_backend != "slowmo"
):
self._check_grad_norms(grad_norm)
if not torch.isfinite(grad_norm).all():
# in case of AMP, if gradients are Nan/Inf then
# optimizer step is still required
if self.cfg.common.amp:
overflow = True
else:
# check local gradnorm single GPU case, trigger NanDetector
raise FloatingPointError("gradients are Nan/Inf")
with torch.autograd.profiler.record_function("optimizer"):
# take an optimization step
self.task.optimizer_step(
self.optimizer, model=self.model, update_num=self.get_num_updates()
)
if self.cfg.common.amp and overflow:
if self._amp_retries == self.cfg.common.amp_batch_retries:
logger.info("AMP: skipping this batch.")
self._amp_retries = 0
else:
self._amp_retries += 1
return self.train_step(
samples, raise_oom
) # recursion to feed in same batch
except FloatingPointError:
self.consolidate_optimizer()
self.save_checkpoint(
os.path.join(self.cfg.checkpoint.save_dir, "crash.pt"), {}
)
# re-run the forward and backward pass with hooks attached to print
# out where it fails
self.zero_grad()
with NanDetector(self.get_model()):
for _, sample in enumerate(samples):
sample, _ = self._prepare_sample(sample)
self.task.train_step(
sample,
self.model,
self.criterion,
self.optimizer,
self.get_num_updates(),
ignore_grad=False,
**extra_kwargs,
)
raise
except OverflowError as e:
overflow = True
logger.info(
f"NOTE: gradient overflow detected, ignoring gradient, {str(e)}"
)
grad_norm = torch.tensor(0.0).cuda()
self.zero_grad()
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
logger.error("OOM during optimization, irrecoverable")
raise e
# Some distributed wrappers (e.g., SlowMo) need access to the optimizer
# after the step
if hasattr(self.model, "perform_slowmo"):
self.model.perform_slowmo(
self.optimizer.optimizer, getattr(self.optimizer, "fp32_params", None)
)
logging_output = None
if not overflow or self.cfg.distributed_training.ddp_backend == "slowmo":
self.set_num_updates(self.get_num_updates() + 1)
if self.cfg.ema.store_ema:
# Step EMA forward with new model.
self.ema.step(
self.get_model(),
self.get_num_updates(),
)
metrics.log_scalar(
"ema_decay",
self.ema.get_decay(),
priority=10000,
round=5,
weight=0,
)
if self.tpu:
import torch_xla.core.xla_model as xm
# mark step on TPUs
self._xla_markstep_and_send_to_cpu()
# only log stats every log_interval steps
# this causes wps to be misreported when log_interval > 1
logging_output = {}
if self.get_num_updates() % self.cfg.common.log_interval == 0:
# log memory usage
mem_info = xm.get_memory_info(self.device)
gb_free = mem_info["kb_free"] / 1024 / 1024
gb_total = mem_info["kb_total"] / 1024 / 1024
metrics.log_scalar(
"gb_free", gb_free, priority=1500, round=1, weight=0
)
metrics.log_scalar(
"gb_total", gb_total, priority=1600, round=1, weight=0
)
logging_outputs = self._xla_markstep_and_send_to_cpu(
logging_outputs
)
logging_output = self._reduce_and_log_stats(
logging_outputs, sample_size, grad_norm
)
# log whenever there's an XLA compilation, since these
# slow down training and may indicate opportunities for
# optimization
self._check_xla_compilation()
else:
if self.cuda and self.cuda_env is not None:
# log minimum free memory over the iteration
gb_used = torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024
torch.cuda.reset_peak_memory_stats()
gb_free = self.cuda_env.total_memory_in_GB - gb_used
metrics.log_scalar(
"gb_free", gb_free, priority=1500, round=1, weight=0
)
# log stats
logging_output = self._reduce_and_log_stats(
logging_outputs, sample_size, grad_norm
)
# clear CUDA cache to reduce memory fragmentation
if (
self.cuda
and self.cfg.common.empty_cache_freq > 0
and (
(self.get_num_updates() + self.cfg.common.empty_cache_freq - 1)
% self.cfg.common.empty_cache_freq
)
== 0
):
torch.cuda.empty_cache()
if self.cfg.common.fp16 or self.cfg.common.amp:
metrics.log_scalar(
"loss_scale",
(
self.optimizer.scaler.loss_scale
if self.cfg.common.fp16
else self.optimizer.scaler.get_scale()
),
priority=700,
round=4,
weight=0,
)
metrics.log_stop_time("train_wall")
return logging_output
@metrics.aggregate("valid")
def valid_step(self, sample, raise_oom=False):
"""Do forward pass in evaluation mode."""
if self.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous("valid_step") # wait for all workers
# If EMA is enabled through store_ema=True
# and task.uses_ema is True, pass the EMA model as a keyword
# argument to the task.
extra_kwargs = {}
if self.cfg.ema.store_ema and getattr(self.task, "uses_ema", False):
extra_kwargs["ema_model"] = self.ema.get_model()
with torch.no_grad():
self.model.eval()
self.criterion.eval()
sample, is_dummy_batch = self._prepare_sample(sample)
try:
_loss, sample_size, logging_output = self.task.valid_step(
sample, self.model, self.criterion, **extra_kwargs
)
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
if not raise_oom:
logger.warning(
"ran out of memory in validation step, retrying batch"
)
for p in self.model.parameters():
if p.grad is not None:
p.grad = None # free some memory
if self.cuda:
torch.cuda.empty_cache()
return self.valid_step(sample, raise_oom=True)
raise e
logging_outputs = [logging_output]
if is_dummy_batch:
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.0
# gather logging outputs from all replicas
if self.data_parallel_world_size > 1:
logging_outputs, (sample_size,) = self._aggregate_logging_outputs(
logging_outputs,
sample_size,
ignore=is_dummy_batch,
)
# log validation stats
if self.tpu:
logging_outputs = self._xla_markstep_and_send_to_cpu(logging_outputs)
logging_output = self._reduce_and_log_stats(logging_outputs, sample_size)
return logging_output
def zero_grad(self):
self.optimizer.zero_grad()
def lr_step_begin_epoch(self, epoch):
"""Adjust the learning rate at the beginning of the epoch."""
self.lr_scheduler.step_begin_epoch(epoch)
# prefer updating the LR based on the number of steps
return self.lr_step_update()
def lr_step(self, epoch, val_loss=None):
"""Adjust the learning rate at the end of the epoch."""
self.lr_scheduler.step(epoch, val_loss)
# prefer updating the LR based on the number of steps
return self.lr_step_update()
def lr_step_update(self):
"""Update the learning rate after each update."""
new_lr = self.lr_scheduler.step_update(self.get_num_updates())
if isinstance(new_lr, dict):
for k, v in new_lr.items():
metrics.log_scalar(f"lr_{k}", v, weight=0, priority=300)
new_lr = new_lr.get("default", next(iter(new_lr.values())))
else:
metrics.log_scalar("lr", new_lr, weight=0, priority=300)
return new_lr
def get_lr(self):
"""Get the current learning rate."""
return self.optimizer.get_lr()
def get_model(self):
"""Get the (non-wrapped) model instance."""
return self._model
def get_criterion(self):
"""Get the (non-wrapped) criterion instance."""
return self._criterion
def get_meter(self, name):
"""[deprecated] Get a specific meter by name."""
from fairseq import meters
if "get_meter" not in self._warn_once:
self._warn_once.add("get_meter")
utils.deprecation_warning(
"Trainer.get_meter is deprecated. Please use fairseq.metrics instead."
)
train_meters = metrics.get_meters("train")
if train_meters is None:
train_meters = {}
if name == "train_loss" and "loss" in train_meters:
return train_meters["loss"]
elif name == "train_nll_loss":
# support for legacy train.py, which assumed this meter is
# always initialized
m = train_meters.get("nll_loss", None)
return m or meters.AverageMeter()
elif name == "wall":
# support for legacy train.py, which assumed this meter is
# always initialized
m = metrics.get_meter("default", "wall")
return m or meters.TimeMeter()
elif name == "wps":
m = metrics.get_meter("train", "wps")
return m or meters.TimeMeter()
elif name in {"valid_loss", "valid_nll_loss"}:
# support for legacy train.py, which assumed these meters
# are always initialized
k = name[len("valid_") :]
m = metrics.get_meter("valid", k)
return m or meters.AverageMeter()
elif name == "oom":
return meters.AverageMeter()
elif name in train_meters:
return train_meters[name]
return None
def get_num_updates(self):
"""Get the number of parameters updates."""
return self._num_updates
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self._num_updates = num_updates
self.lr_step_update()
if self.quantizer:
self.quantizer.step_update(self._num_updates)
metrics.log_scalar("num_updates", self._num_updates, weight=0, priority=200)
def clip_grad_norm(self, clip_norm):
def agg_norm_fn(total_norm):
total_norm = total_norm.cuda().float() ** 2
total_norm = distributed_utils.all_reduce(
total_norm, group=self.data_parallel_process_group
)
return total_norm**0.5
should_agg_norm = self.is_fsdp and (
self.data_parallel_process_group is not None
or torch.distributed.is_initialized()
)
return self.optimizer.clip_grad_norm(
clip_norm, aggregate_norm_fn=agg_norm_fn if should_agg_norm else None
)
def cumulative_training_time(self):
if self._cumulative_training_time is None:
# single GPU
return self._local_cumulative_training_time()
else:
return self._cumulative_training_time
def _local_cumulative_training_time(self):
"""Aggregate training time in seconds."""
return time.time() - self._start_time + self._previous_training_time
def _fp_convert_sample(self, sample):
def apply_half(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.half)
return t
def apply_bfloat16(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.bfloat16)
return t
if self.cfg.common.fp16:
sample = utils.apply_to_sample(apply_half, sample)
if self.cfg.common.bf16:
sample = utils.apply_to_sample(apply_bfloat16, sample)
return sample
def _prepare_sample(self, sample, is_dummy=False):
if sample == "DUMMY":
raise Exception(
"Trying to use an uninitialized 'dummy' batch. This usually indicates "
"that the total number of batches is smaller than the number of "
"participating GPUs. Try reducing the batch size or using fewer GPUs."
)
if sample is None or len(sample) == 0:
assert (
self._dummy_batch is not None and len(self._dummy_batch) > 0
), "Invalid dummy batch: {}".format(self._dummy_batch)
sample, _ = self._prepare_sample(self._dummy_batch, is_dummy=True)
return sample, True
# Given that PCIe/NVLink bandwidth is significantly smaller than DRAM bandwidth
# it makes sense to do the format conversion on the CPU and then transfer
# a smaller buffer to the device. This also saves GPU memory capacity.
if self.cfg.common.on_cpu_convert_precision:
sample = self._fp_convert_sample(sample)
if self.cuda:
if self.pipeline_model_parallel:
if "target" in sample:
sample["target"] = utils.move_to_cuda(
sample["target"], device=self.last_device
)
else:
sample = utils.move_to_cuda(sample)
elif self.tpu and is_dummy:
# the dummy batch may not be on the appropriate device
sample = utils.move_to_cuda(sample, device=self.device)
if not self.cfg.common.on_cpu_convert_precision:
sample = self._fp_convert_sample(sample)
if self._dummy_batch == "DUMMY":
self._dummy_batch = sample
return sample, False
def _set_seed(self):
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
seed = self.cfg.common.seed + self.get_num_updates()
utils.set_torch_seed(seed)
def _sync_stats(self):
# Return True if it's using multiple GPUs and DDP or multiple GPUs with
# BMUF and it's a bmuf sync with warmup iterations completed before.
if self.data_parallel_world_size == 1:
return False
elif self.cfg.optimization.use_bmuf:
return (
self.get_num_updates() + 1
) % self.cfg.bmuf.global_sync_iter == 0 and (
self.get_num_updates() + 1
) > self.cfg.bmuf.warmup_iterations
else:
return True
def _log_oom(self, exc):
msg = "OOM: Ran out of memory with exception: {}".format(exc)
logger.warning(msg)
if torch.cuda.is_available() and hasattr(torch.cuda, "memory_summary"):
for device_idx in range(torch.cuda.device_count()):
logger.warning(torch.cuda.memory_summary(device=device_idx))
sys.stderr.flush()
def _aggregate_logging_outputs(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
if self.task.__class__.logging_outputs_can_be_summed(self.get_criterion()):
return self._fast_stat_sync_sum(
logging_outputs, *extra_stats_to_sum, ignore=ignore
)
else:
return self._all_gather_list_sync(
logging_outputs, *extra_stats_to_sum, ignore=ignore
)
def _all_gather_list_sync(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
"""
Sync logging outputs across workers. all_gather_list_sync is
suitable when logging outputs are complex types.
"""
if self.tpu:
raise NotImplementedError
if ignore:
logging_outputs = []
results = list(
zip(
*distributed_utils.all_gather_list(
[logging_outputs] + list(extra_stats_to_sum),
max_size=getattr(self.cfg.common, "all_gather_list_size", 16384),
group=self.data_parallel_process_group,
)
)
)
logging_outputs, extra_stats_to_sum = results[0], results[1:]
logging_outputs = list(chain.from_iterable(logging_outputs))
extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum]
return logging_outputs, extra_stats_to_sum
def _fast_stat_sync_sum(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
"""
Sync logging outputs across workers. fast_stat_sync_sum is
faster than all_gather_list_sync, but is only suitable when
logging outputs are scalars and can be summed. Note that
*logging_outputs* cannot contain any nested dicts/lists.
"""
data = {}
for i, stat in enumerate(extra_stats_to_sum):
data["extra_stats_" + str(i)] = stat
if len(logging_outputs) > 0:
log_keys = list(logging_outputs[0].keys())
for k in log_keys:
if not ignore:
v = sum(log[k] for log in logging_outputs if k in log)
else:
v = logging_outputs[0][k]
v = torch.zeros_like(v) if torch.is_tensor(v) else 0
data["logging_outputs_" + k] = v
else:
log_keys = None
data = distributed_utils.all_reduce_dict(
data, device=self.device, group=self.data_parallel_process_group
)
extra_stats_to_sum = [
data["extra_stats_" + str(i)] for i in range(len(extra_stats_to_sum))
]
if log_keys is not None:
logging_outputs = [{k: data["logging_outputs_" + k] for k in log_keys}]
else:
logging_outputs = []
return logging_outputs, extra_stats_to_sum
def _check_grad_norms(self, grad_norm):
"""Check that grad norms are consistent across workers."""
if self._grad_norm_buf is not None:
self._grad_norm_buf.zero_()
self._grad_norm_buf[self.data_parallel_rank] = grad_norm
distributed_utils.all_reduce(
self._grad_norm_buf, group=self.data_parallel_process_group
)
def is_consistent(tensor):
max_abs_diff = torch.max(torch.abs(tensor - tensor[0]))
return (
(
torch.isfinite(tensor).all()
and (max_abs_diff / (tensor[0] + 1e-6) < 1e-6).all()
)
or (self.cfg.common.amp and not torch.isfinite(tensor).all())
# in case of amp non-finite grads are fine
)
if not is_consistent(self._grad_norm_buf):
pretty_detail = "\n".join(
"rank {:3d} = {:.8f}".format(r, n)
for r, n in enumerate(self._grad_norm_buf.tolist())
)
error_detail = "grad_norm across the workers:\n{}\n".format(
pretty_detail
)
# use FloatingPointError to trigger NanDetector
raise FloatingPointError(
"Fatal error: gradients are inconsistent between workers. "
"Try --ddp-backend=legacy_ddp. "
"Or are you mixing up different generation of GPUs in training?"
+ "\n"
+ "-" * 80
+ "\n{}\n".format(error_detail)
+ "-" * 80
)
def _reduce_and_log_stats(self, logging_outputs, sample_size, grad_norm=None):
if grad_norm is not None and (
not torch.is_tensor(grad_norm) or torch.isfinite(grad_norm)
):
metrics.log_speed("ups", 1.0, priority=100, round=2)
metrics.log_scalar("gnorm", grad_norm, priority=400, round=3)
if self.cfg.optimization.clip_norm > 0:
metrics.log_scalar(
"clip",
torch.where(
grad_norm > self.cfg.optimization.clip_norm,
grad_norm.new_tensor(100),
grad_norm.new_tensor(0),
),
priority=500,
round=1,
)
with metrics.aggregate() as agg:
if logging_outputs is not None:
self.task.reduce_metrics(logging_outputs, self.get_criterion())
del logging_outputs
# extra warning for criterions that don't properly log a loss value
if "loss" not in agg:
if "loss" not in self._warn_once:
self._warn_once.add("loss")
logger.warning(
"Criterion.reduce_metrics did not log a 'loss' value, "
"which may break some functionality"
)
metrics.log_scalar("loss", -1)
# support legacy interface
if self.tpu:
logging_output = {}
else:
logging_output = agg.get_smoothed_values()
logging_output["sample_size"] = sample_size
for key_to_delete in ["ppl", "wps", "wpb", "bsz"]:
if key_to_delete in logging_output:
del logging_output[key_to_delete]
return logging_output
def _check_xla_compilation(self):
import torch_xla.debug.metrics as met
compile_stats = met.metric_data("CompileTime")
if compile_stats is None:
return
num_xla_compiles = compile_stats[0]
if num_xla_compiles > self._num_xla_compiles:
logger.warning(
"XLA compilation detected on device #{}; too many of these can lead "
"to slow training, but we expect a few in the beginning".format(
self.cfg.distributed_training.distributed_rank
)
)
self._num_xla_compiles = num_xla_compiles
def _xla_markstep_and_send_to_cpu(self, data=None):
import torch_xla.core.xla_model as xm
xm.mark_step()
if data is not None:
from fairseq.utils import xla_device_to_cpu
return xla_device_to_cpu(data)
def _catalog_shared_params(module, memo=None, prefix=""):
if memo is None:
first_call = True
memo = {}
else:
first_call = False
for name, param in module._parameters.items():
param_prefix = prefix + ("." if prefix else "") + name
if param not in memo:
memo[param] = []
memo[param].append(param_prefix)
for name, m in module._modules.items():
if m is None:
continue
submodule_prefix = prefix + ("." if prefix else "") + name
_catalog_shared_params(m, memo, submodule_prefix)
if first_call:
return [x for x in memo.values() if len(x) > 1]
def _get_module_by_path(module, path):
path = path.split(".")
for name in path:
module = getattr(module, name)
return module
def _set_module_by_path(module, path, value):
path = path.split(".")
for name in path[:-1]:
module = getattr(module, name)
setattr(module, path[-1], value)
| 68,031 | 40.635251 | 202 | py |
null | DA-Transformer-main/fairseq/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import collections
import contextlib
import copy
import importlib
import logging
import os
import sys
import warnings
from itertools import accumulate
from typing import TYPE_CHECKING, Callable, Dict, List, Optional
import torch
import torch.nn.functional as F
from torch import Tensor
if TYPE_CHECKING:
from fairseq.modules.multihead_attention import MultiheadAttention
try:
from amp_C import multi_tensor_l2norm
multi_tensor_l2norm_available = True
except ImportError:
multi_tensor_l2norm_available = False
try:
import torch_xla.core.xla_model as xm
except ImportError:
xm = None
logger = logging.getLogger(__name__)
MANIFOLD_PATH_SEP = "|"
class FileContentsAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(FileContentsAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
from fairseq.file_io import PathManager
if PathManager.isfile(values):
with PathManager.open(values) as f:
argument = f.read().strip()
else:
argument = values
setattr(namespace, self.dest, argument)
def split_paths(paths: str, separator=os.pathsep) -> List[str]:
return (
paths.split(separator) if "://" not in paths else paths.split(MANIFOLD_PATH_SEP)
)
def load_ensemble_for_inference(filenames, task, model_arg_overrides=None):
from fairseq import checkpoint_utils
deprecation_warning(
"utils.load_ensemble_for_inference is deprecated. "
"Please use checkpoint_utils.load_model_ensemble instead."
)
return checkpoint_utils.load_model_ensemble(
filenames, arg_overrides=model_arg_overrides, task=task
)
def apply_to_sample(f, sample):
if hasattr(sample, "__len__") and len(sample) == 0:
return {}
def _apply(x):
if torch.is_tensor(x):
return f(x)
elif isinstance(x, collections.OrderedDict):
# OrderedDict has attributes that needs to be preserved
od = collections.OrderedDict(
(key, _apply(value)) for key, value in x.items()
)
od.__dict__ = x.__dict__
return od
elif isinstance(x, dict):
return {key: _apply(value) for key, value in x.items()}
elif isinstance(x, list):
return [_apply(x) for x in x]
elif isinstance(x, tuple):
return tuple(_apply(x) for x in x)
elif isinstance(x, set):
return {_apply(x) for x in x}
else:
return x
return _apply(sample)
def move_to_cuda(sample, device=None):
device = device or torch.cuda.current_device()
def _move_to_cuda(tensor):
# non_blocking is ignored if tensor is not pinned, so we can always set
# to True (see github.com/PyTorchLightning/pytorch-lightning/issues/620)
return tensor.to(device=device, non_blocking=True)
return apply_to_sample(_move_to_cuda, sample)
def move_to_cpu(sample):
def _move_to_cpu(tensor):
# PyTorch has poor support for half tensors (float16) on CPU.
# Move any such tensors to float32.
if tensor.dtype in {torch.bfloat16, torch.float16}:
tensor = tensor.to(dtype=torch.float32)
return tensor.cpu()
return apply_to_sample(_move_to_cpu, sample)
def move_to_tpu(sample):
import torch_xla.core.xla_model as xm
device = xm.xla_device()
def _move_to_tpu(tensor):
return tensor.to(device)
return apply_to_sample(_move_to_tpu, sample)
def get_incremental_state(
module: "MultiheadAttention",
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
return module.get_incremental_state(incremental_state, key)
def set_incremental_state(
module: "MultiheadAttention",
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
result = module.set_incremental_state(incremental_state, key, value)
if result is not None:
incremental_state = result
return incremental_state
def load_align_dict(replace_unk):
if replace_unk is None:
align_dict = None
elif isinstance(replace_unk, str) and len(replace_unk) > 0:
# Load alignment dictionary for unknown word replacement if it was passed as an argument.
align_dict = {}
with open(replace_unk, "r") as f:
for line in f:
cols = line.split()
align_dict[cols[0]] = cols[1]
else:
# No alignment dictionary provided but we still want to perform unknown word replacement by copying the
# original source word.
align_dict = {}
return align_dict
def print_embed_overlap(embed_dict, vocab_dict):
embed_keys = set(embed_dict.keys())
vocab_keys = set(vocab_dict.symbols)
overlap = len(embed_keys & vocab_keys)
logger.info("found {}/{} types in embedding file".format(overlap, len(vocab_dict)))
def parse_embedding(embed_path):
"""Parse embedding text file into a dictionary of word and embedding tensors.
The first line can have vocabulary size and dimension. The following lines
should contain word and embedding separated by spaces.
Example:
2 5
the -0.0230 -0.0264 0.0287 0.0171 0.1403
at -0.0395 -0.1286 0.0275 0.0254 -0.0932
"""
embed_dict = {}
with open(embed_path) as f_embed:
next(f_embed) # skip header
for line in f_embed:
pieces = line.rstrip().split(" ")
embed_dict[pieces[0]] = torch.Tensor(
[float(weight) for weight in pieces[1:]]
)
return embed_dict
def load_embedding(embed_dict, vocab, embedding):
for idx in range(len(vocab)):
token = vocab[idx]
if token in embed_dict:
embedding.weight.data[idx] = embed_dict[token]
return embedding
def replace_unk(hypo_str, src_str, alignment, align_dict, unk):
from fairseq import tokenizer
# Tokens are strings here
hypo_tokens = tokenizer.tokenize_line(hypo_str)
# TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully
src_tokens = tokenizer.tokenize_line(src_str) + ["<eos>"]
for i, ht in enumerate(hypo_tokens):
if ht == unk:
src_token = src_tokens[alignment[i]]
# Either take the corresponding value in the aligned dictionary or just copy the original value.
hypo_tokens[i] = align_dict.get(src_token, src_token)
return " ".join(hypo_tokens)
def post_process_prediction(
hypo_tokens,
src_str,
alignment,
align_dict,
tgt_dict,
remove_bpe=None,
extra_symbols_to_ignore=None,
):
hypo_str = tgt_dict.string(
hypo_tokens, remove_bpe, extra_symbols_to_ignore=extra_symbols_to_ignore
)
if align_dict is not None:
hypo_str = replace_unk(
hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string()
)
if align_dict is not None or remove_bpe is not None:
# Convert back to tokens for evaluating with unk replacement or without BPE
# Note that the dictionary can be modified inside the method.
hypo_tokens = tgt_dict.encode_line(hypo_str, add_if_not_exist=True)
return hypo_tokens, hypo_str, alignment
def make_positions(tensor, padding_idx: int, onnx_trace: bool = False):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
def strip_pad(tensor, pad):
return tensor[tensor.ne(pad)]
def buffered_arange(max):
if not hasattr(buffered_arange, "buf"):
buffered_arange.buf = torch.LongTensor()
if max > buffered_arange.buf.numel():
buffered_arange.buf.resize_(max)
torch.arange(max, out=buffered_arange.buf)
return buffered_arange.buf[:max]
def convert_padding_direction(
src_tokens, padding_idx, right_to_left: bool = False, left_to_right: bool = False
):
assert right_to_left ^ left_to_right
pad_mask = src_tokens.eq(padding_idx)
if not pad_mask.any():
# no padding, return early
return src_tokens
if left_to_right and not pad_mask[:, 0].any():
# already right padded
return src_tokens
if right_to_left and not pad_mask[:, -1].any():
# already left padded
return src_tokens
max_len = src_tokens.size(1)
buffered = torch.empty(0).long()
if max_len > 0:
torch.arange(max_len, out=buffered)
range = buffered.type_as(src_tokens).expand_as(src_tokens)
num_pads = pad_mask.long().sum(dim=1, keepdim=True)
if right_to_left:
index = torch.remainder(range - num_pads, max_len)
else:
index = torch.remainder(range + num_pads, max_len)
return src_tokens.gather(1, index)
def item(tensor):
# tpu-comment: making this a no-op for xla devices.
if torch.is_tensor(tensor) and tensor.device.type == "xla":
return tensor.detach()
if hasattr(tensor, "item"):
return tensor.item()
if hasattr(tensor, "__getitem__"):
return tensor[0]
return tensor
def multi_tensor_total_norm(grads, chunk_size=2048 * 32) -> torch.Tensor:
per_device_grads = {}
norms = []
for grad in grads:
device = grad.device
cur_device_grads = per_device_grads.get(device)
if cur_device_grads is None:
cur_device_grads = []
per_device_grads[device] = cur_device_grads
cur_device_grads.append(grad)
for device in per_device_grads.keys():
cur_device_grads = per_device_grads[device]
if device.type == "cuda":
# TODO(msb) return has_inf
has_inf = torch.zeros((1, 1), dtype=torch.int, device=device)
with torch.cuda.device(device):
norm = multi_tensor_l2norm(
chunk_size, has_inf, [cur_device_grads], False
)
norms.append(norm[0].to(torch.cuda.current_device()))
else:
norms += [torch.norm(g, p=2, dtype=torch.float32) for g in cur_device_grads]
total_norm = torch.norm(torch.stack(norms))
return total_norm
@torch.no_grad()
def clip_grad_norm_(params, max_norm, aggregate_norm_fn=None) -> torch.Tensor:
def grad_exists(p):
return p is not None and getattr(p, "grad", None) is not None
if isinstance(params, torch.Tensor):
params = [params]
params = list(params)
grads = [
p.grad.detach() for p in params if grad_exists(p) and not hasattr(p, "expert")
]
expert_grads = [
p.grad.detach() for p in params if grad_exists(p) and hasattr(p, "expert")
]
if len(grads) == 0:
if len(params) > 0:
return params[0].new_tensor(0.0)
else:
return torch.tensor(0.0)
if len(grads) == 1:
total_norm = torch.norm(grads[0], p=2, dtype=torch.float32)
else:
if multi_tensor_l2norm_available:
total_norm = multi_tensor_total_norm(grads)
else:
if torch.cuda.is_available():
warnings.warn(
"amp_C fused kernels unavailable, disabling multi_tensor_l2norm; "
"you may get better performance by installing NVIDIA's apex library"
)
device = torch.cuda.current_device()
elif grads[0].device.type == "xla":
device = grads[0].device
else:
device = torch.device("cpu")
total_norm = torch.norm(
torch.stack(
[torch.norm(g, p=2, dtype=torch.float32).to(device) for g in grads]
)
)
if aggregate_norm_fn is not None:
total_norm = aggregate_norm_fn(total_norm)
if max_norm > 0:
max_norm = float(max_norm)
clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1)
for g in grads + expert_grads:
g.mul_(clip_coef)
return total_norm
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float("-inf")).type_as(t)
def _match_types(arg1, arg2):
"""Convert the numerical argument to the same type as the other argument"""
def upgrade(arg_number, arg_structure):
if isinstance(arg_structure, tuple):
return tuple([arg_number] * len(arg_structure))
elif isinstance(arg_structure, dict):
arg = copy.deepcopy(arg_structure)
for k in arg:
arg[k] = upgrade(arg_number, arg_structure[k])
return arg
else:
return arg_number
if isinstance(arg1, float) or isinstance(arg1, int):
return upgrade(arg1, arg2), arg2
elif isinstance(arg2, float) or isinstance(arg2, int):
return arg1, upgrade(arg2, arg1)
return arg1, arg2
def resolve_max_positions(*args):
"""Resolve max position constraints from multiple sources."""
def map_value_update(d1, d2):
updated_value = copy.deepcopy(d1)
for key in d2:
if key not in updated_value:
updated_value[key] = d2[key]
else:
updated_value[key] = min(d1[key], d2[key])
return updated_value
def nullsafe_min(l):
minim = None
for item in l:
if minim is None:
minim = item
elif item is not None and item < minim:
minim = item
return minim
max_positions = None
for arg in args:
if max_positions is None:
max_positions = arg
elif arg is not None:
max_positions, arg = _match_types(max_positions, arg)
if isinstance(arg, float) or isinstance(arg, int):
max_positions = min(max_positions, arg)
elif isinstance(arg, dict):
max_positions = map_value_update(max_positions, arg)
else:
max_positions = tuple(map(nullsafe_min, zip(max_positions, arg)))
return max_positions
def import_user_module(args):
module_path = getattr(args, "user_dir", None)
if module_path is not None:
module_path = os.path.abspath(args.user_dir)
if not os.path.exists(module_path) and not os.path.isfile(
os.path.dirname(module_path)
):
fairseq_rel_path = os.path.join(os.path.dirname(__file__), args.user_dir)
if os.path.exists(fairseq_rel_path):
module_path = fairseq_rel_path
else:
fairseq_rel_path = os.path.join(
os.path.dirname(__file__), "..", args.user_dir
)
if os.path.exists(fairseq_rel_path):
module_path = fairseq_rel_path
else:
raise FileNotFoundError(module_path)
# ensure that user modules are only imported once
import_user_module.memo = getattr(import_user_module, "memo", set())
if module_path not in import_user_module.memo:
import_user_module.memo.add(module_path)
module_parent, module_name = os.path.split(module_path)
if module_name not in sys.modules:
sys.path.insert(0, module_parent)
importlib.import_module(module_name)
tasks_path = os.path.join(module_path, "tasks")
if os.path.exists(tasks_path):
from fairseq.tasks import import_tasks
import_tasks(tasks_path, f"{module_name}.tasks")
models_path = os.path.join(module_path, "models")
if os.path.exists(models_path):
from fairseq.models import import_models
import_models(models_path, f"{module_name}.models")
else:
raise ImportError(
"Failed to import --user-dir={} because the corresponding module name "
"({}) is not globally unique. Please rename the directory to "
"something unique and try again.".format(module_path, module_name)
)
def softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.softmax(x.float(), dim=dim)
else:
return F.softmax(x, dim=dim, dtype=torch.float32)
def log_softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.log_softmax(x.float(), dim=dim)
else:
return F.log_softmax(x, dim=dim, dtype=torch.float32)
def get_perplexity(loss, round=2, base=2):
from fairseq.logging.meters import safe_round
if loss is None:
return 0.0
try:
return safe_round(base**loss, round)
except OverflowError:
return float("inf")
def deprecation_warning(message, stacklevel=3):
# don't use DeprecationWarning, since it's ignored by default
warnings.warn(message, stacklevel=stacklevel)
def relu_squared(x: torch.Tensor):
return F.relu(x).pow(2)
def get_activation_fn(activation: str) -> Callable:
"""Returns the activation function corresponding to `activation`"""
from fairseq.modules import gelu, gelu_accurate
if activation == "relu":
return F.relu
elif activation == "relu_squared":
return relu_squared
elif activation == "gelu":
return gelu
elif activation == "gelu_fast":
deprecation_warning(
"--activation-fn=gelu_fast has been renamed to gelu_accurate"
)
return gelu_accurate
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
elif activation == "swish":
return torch.nn.SiLU
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def get_available_activation_fns() -> List:
return [
"relu",
"gelu",
"gelu_fast", # deprecated
"gelu_accurate",
"tanh",
"linear",
]
@contextlib.contextmanager
def model_eval(model):
is_training = model.training
model.eval()
yield
model.train(is_training)
def has_parameters(module):
try:
next(module.parameters())
return True
except StopIteration:
return False
def get_rng_state():
state = {"torch_rng_state": torch.get_rng_state()}
if xm is not None:
state["xla_rng_state"] = xm.get_rng_state()
if torch.cuda.is_available():
state["cuda_rng_state"] = torch.cuda.get_rng_state()
return state
def set_rng_state(state):
torch.set_rng_state(state["torch_rng_state"])
if xm is not None:
xm.set_rng_state(state["xla_rng_state"])
if torch.cuda.is_available():
torch.cuda.set_rng_state(state["cuda_rng_state"])
class set_torch_seed(object):
def __init__(self, seed):
assert isinstance(seed, int)
self.rng_state = get_rng_state()
torch.manual_seed(seed)
if xm is not None:
xm.set_rng_state(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
def __enter__(self):
return self
def __exit__(self, *exc):
set_rng_state(self.rng_state)
def parse_alignment(line):
"""
Parses a single line from the alingment file.
Args:
line (str): String containing the alignment of the format:
<src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> ..
<src_idx_m>-<tgt_idx_m>. All indices are 0 indexed.
Returns:
torch.IntTensor: packed alignments of shape (2 * m).
"""
alignments = line.strip().split()
parsed_alignment = torch.IntTensor(2 * len(alignments))
for idx, alignment in enumerate(alignments):
src_idx, tgt_idx = alignment.split("-")
parsed_alignment[2 * idx] = int(src_idx)
parsed_alignment[2 * idx + 1] = int(tgt_idx)
return parsed_alignment
def get_token_to_word_mapping(tokens, exclude_list):
n = len(tokens)
word_start = [int(token not in exclude_list) for token in tokens]
word_idx = list(accumulate(word_start))
token_to_word = {i: word_idx[i] for i in range(n)}
return token_to_word
def extract_hard_alignment(attn, src_sent, tgt_sent, pad, eos):
tgt_valid = (
((tgt_sent != pad) & (tgt_sent != eos)).nonzero(as_tuple=False).squeeze(dim=-1)
)
src_invalid = (
((src_sent == pad) | (src_sent == eos)).nonzero(as_tuple=False).squeeze(dim=-1)
)
src_token_to_word = get_token_to_word_mapping(src_sent, [eos, pad])
tgt_token_to_word = get_token_to_word_mapping(tgt_sent, [eos, pad])
alignment = []
if len(tgt_valid) != 0 and len(src_invalid) < len(src_sent):
attn_valid = attn[tgt_valid]
attn_valid[:, src_invalid] = float("-inf")
_, src_indices = attn_valid.max(dim=1)
for tgt_idx, src_idx in zip(tgt_valid, src_indices):
alignment.append(
(
src_token_to_word[src_idx.item()] - 1,
tgt_token_to_word[tgt_idx.item()] - 1,
)
)
return alignment
def extract_soft_alignment(attn, src_sent, tgt_sent, pad, eos):
tgt_valid = ((tgt_sent != pad)).nonzero(as_tuple=False)
src_valid = ((src_sent != pad)).nonzero(as_tuple=False).squeeze(dim=-1)
alignment = []
if len(tgt_valid) != 0 and len(src_valid) != 0:
attn_valid = attn[tgt_valid, src_valid]
alignment = [
["{:.6f}".format(p) for p in src_probs.tolist()] for src_probs in attn_valid
]
return alignment
def new_arange(x, *size):
"""
Return a Tensor of `size` filled with a range function on the device of x.
If size is empty, using the size of the variable x.
"""
if len(size) == 0:
size = x.size()
return torch.arange(size[-1], device=x.device).expand(*size).contiguous()
def get_tpu_device():
return xm.xla_device()
def tpu_data_loader(itr):
import torch_xla.core.xla_model as xm
import torch_xla.distributed.parallel_loader as pl
from fairseq.data import iterators
xm.rendezvous("tpu_data_loader") # wait for all workers
xm.mark_step()
device = xm.xla_device()
return iterators.CountingIterator(
pl.ParallelLoader(itr, [device]).per_device_loader(device),
start=getattr(itr, "n", 0),
total=len(itr),
)
def is_xla_tensor(tensor):
return torch.is_tensor(tensor) and tensor.device.type == "xla"
def index_put(tensor, indices, value):
if is_xla_tensor(tensor):
for _ in range(indices.dim(), tensor.dim()):
indices = indices.unsqueeze(-1)
if indices.size(-1) < tensor.size(-1):
indices = indices.expand_as(tensor)
tensor = torch.mul(tensor, ~indices) + torch.mul(value, indices)
else:
tensor[indices] = value
return tensor
def xla_device_to_cpu(dat):
import torch_xla.core.xla_model as xm
return xm._maybe_convert_to_cpu(dat)
class CudaEnvironment(object):
def __init__(self):
cur_device = torch.cuda.current_device()
prop = torch.cuda.get_device_properties("cuda:{}".format(cur_device))
self.name = prop.name
self.major = prop.major
self.minor = prop.minor
self.total_memory_in_GB = prop.total_memory / 1024 / 1024 / 1024
@staticmethod
def pretty_print_cuda_env_list(cuda_env_list):
"""
Given a list of CudaEnviorments, pretty print them
"""
num_workers = len(cuda_env_list)
center = "CUDA enviroments for all {} workers".format(num_workers)
banner_len = 40 - len(center) // 2
first_line = "*" * banner_len + center + "*" * banner_len
logger.info(first_line)
for r, env in enumerate(cuda_env_list):
logger.info(
"rank {:3d}: ".format(r)
+ "capabilities = {:2d}.{:<2d} ; ".format(env.major, env.minor)
+ "total memory = {:.3f} GB ; ".format(env.total_memory_in_GB)
+ "name = {:40s}".format(env.name)
)
logger.info(first_line)
def csv_str_list(x):
return x.split(",")
def eval_str_list(x, type=float):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
try:
return list(map(type, x))
except TypeError:
return [type(x)]
def eval_str_dict(x, type=dict):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
return x
def eval_bool(x, default=False):
if x is None:
return default
try:
return bool(eval(x))
except TypeError:
return default
def reset_logging():
root = logging.getLogger()
for handler in root.handlers:
root.removeHandler(handler)
root.setLevel(os.environ.get("LOGLEVEL", "INFO").upper())
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(
logging.Formatter(
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
)
root.addHandler(handler)
def safe_getattr(obj, k, default=None):
"""Returns obj[k] if it exists and is not None, otherwise returns default."""
from omegaconf import OmegaConf
if OmegaConf.is_config(obj):
return obj[k] if k in obj and obj[k] is not None else default
return getattr(obj, k, default)
def safe_hasattr(obj, k):
"""Returns True if the given key exists and is not None."""
return getattr(obj, k, None) is not None
| 26,722 | 30.775268 | 111 | py |
null | DA-Transformer-main/fairseq/benchmark/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# import models/tasks to register them
from . import dummy_dataset, dummy_lm, dummy_masked_lm, dummy_model, dummy_mt # noqa
| 303 | 37 | 85 | py |
null | DA-Transformer-main/fairseq/benchmark/dummy_dataset.py | import numpy as np
from fairseq.data import FairseqDataset
class DummyDataset(FairseqDataset):
def __init__(self, batch, num_items, item_size):
super().__init__()
self.batch = batch
self.num_items = num_items
self.item_size = item_size
def __getitem__(self, index):
return index
def __len__(self):
return self.num_items
def collater(self, samples):
return self.batch
@property
def sizes(self):
return np.array([self.item_size] * self.num_items)
def num_tokens(self, index):
return self.item_size
def size(self, index):
return self.item_size
def ordered_indices(self):
return np.arange(self.num_items)
@property
def supports_prefetch(self):
return False
| 803 | 20.72973 | 58 | py |
null | DA-Transformer-main/fairseq/benchmark/dummy_lm.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Optional
import torch
from .dummy_dataset import DummyDataset
from fairseq.data import Dictionary
from fairseq.dataclass import FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
from omegaconf import II
logger = logging.getLogger(__name__)
@dataclass
class DummyLMConfig(FairseqDataclass):
dict_size: int = 49996
dataset_size: int = 100000
tokens_per_sample: int = field(
default=512, metadata={"help": "max sequence length"}
)
add_bos_token: bool = False
batch_size: Optional[int] = II("dataset.batch_size")
max_tokens: Optional[int] = II("dataset.max_tokens")
max_target_positions: int = II("task.tokens_per_sample")
@register_task("dummy_lm", dataclass=DummyLMConfig)
class DummyLMTask(FairseqTask):
def __init__(self, cfg: DummyLMConfig):
super().__init__(cfg)
# load dictionary
self.dictionary = Dictionary()
for i in range(cfg.dict_size):
self.dictionary.add_symbol("word{}".format(i))
self.dictionary.pad_to_multiple_(8) # often faster if divisible by 8
logger.info("dictionary: {} types".format(len(self.dictionary)))
seq = torch.arange(cfg.tokens_per_sample + 1) + self.dictionary.pad() + 1
self.dummy_src = seq[:-1]
self.dummy_tgt = seq[1:]
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if self.cfg.batch_size is not None:
bsz = self.cfg.batch_size
else:
bsz = max(1, self.cfg.max_tokens // self.cfg.tokens_per_sample)
self.datasets[split] = DummyDataset(
{
"id": 1,
"net_input": {
"src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]),
"src_lengths": torch.full(
(bsz,), self.cfg.tokens_per_sample, dtype=torch.long
),
},
"target": torch.stack([self.dummy_tgt for _ in range(bsz)]),
"nsentences": bsz,
"ntokens": bsz * self.cfg.tokens_per_sample,
},
num_items=self.cfg.dataset_size,
item_size=self.cfg.tokens_per_sample,
)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
| 2,757 | 31.833333 | 84 | py |
null | DA-Transformer-main/fairseq/benchmark/dummy_masked_lm.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Optional
import torch
from omegaconf import II
from .dummy_dataset import DummyDataset
from fairseq.data import Dictionary
from fairseq.dataclass import FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
logger = logging.getLogger(__name__)
@dataclass
class DummyMaskedLMConfig(FairseqDataclass):
dict_size: int = 49996
dataset_size: int = 100000
tokens_per_sample: int = field(
default=512,
metadata={
"help": "max number of total tokens over all"
" segments per sample for BERT dataset"
},
)
batch_size: Optional[int] = II("dataset.batch_size")
max_tokens: Optional[int] = II("dataset.max_tokens")
max_target_positions: int = II("task.tokens_per_sample")
@register_task("dummy_masked_lm", dataclass=DummyMaskedLMConfig)
class DummyMaskedLMTask(FairseqTask):
def __init__(self, cfg: DummyMaskedLMConfig):
super().__init__(cfg)
self.dictionary = Dictionary()
for i in range(cfg.dict_size):
self.dictionary.add_symbol("word{}".format(i))
logger.info("dictionary: {} types".format(len(self.dictionary)))
# add mask token
self.mask_idx = self.dictionary.add_symbol("<mask>")
self.dictionary.pad_to_multiple_(8) # often faster if divisible by 8
mask_idx = 0
pad_idx = 1
seq = torch.arange(cfg.tokens_per_sample) + pad_idx + 1
mask = torch.arange(2, cfg.tokens_per_sample, 7) # ~15%
src = seq.clone()
src[mask] = mask_idx
tgt = torch.full_like(seq, pad_idx)
tgt[mask] = seq[mask]
self.dummy_src = src
self.dummy_tgt = tgt
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if self.cfg.batch_size is not None:
bsz = self.cfg.batch_size
else:
bsz = max(1, self.cfg.max_tokens // self.cfg.tokens_per_sample)
self.datasets[split] = DummyDataset(
{
"id": 1,
"net_input": {
"src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]),
"src_lengths": torch.full(
(bsz,), self.cfg.tokens_per_sample, dtype=torch.long
),
},
"target": torch.stack([self.dummy_tgt for _ in range(bsz)]),
"nsentences": bsz,
"ntokens": bsz * self.cfg.tokens_per_sample,
},
num_items=self.cfg.dataset_size,
item_size=self.cfg.tokens_per_sample,
)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
| 3,123 | 31.884211 | 84 | py |
null | DA-Transformer-main/fairseq/benchmark/dummy_model.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch.nn.functional as F
from fairseq.data import Dictionary
from fairseq.models import (
FairseqDecoder,
FairseqLanguageModel,
register_model,
register_model_architecture,
)
@register_model("dummy_model")
class DummyModel(FairseqLanguageModel):
def __init__(self, args, encoder):
super().__init__(encoder)
self.args = args
@staticmethod
def add_args(parser):
parser.add_argument("--num-layers", type=int, default=24)
parser.add_argument("--embed-dim", type=int, default=1024)
@classmethod
def build_model(cls, args, task):
encoder = DummyEncoder(
num_embed=len(task.target_dictionary),
embed_dim=args.embed_dim,
num_layers=args.num_layers,
)
return cls(args, encoder)
def forward(self, src_tokens, masked_tokens=None, **kwargs):
return self.decoder(src_tokens, masked_tokens=masked_tokens)
class DummyEncoder(FairseqDecoder):
def __init__(self, num_embed=50000, embed_dim=1024, num_layers=24):
super().__init__(Dictionary())
self.embed = nn.Embedding(
num_embeddings=num_embed, embedding_dim=embed_dim, padding_idx=0
)
self.layers_a = nn.ModuleList(
[
nn.Sequential(
nn.LayerNorm(embed_dim),
nn.Linear(embed_dim, 3 * embed_dim), # q, k, v input projection
nn.Linear(3 * embed_dim, embed_dim), # skip self-attention
nn.Linear(embed_dim, embed_dim), # output projection
nn.Dropout(),
)
for i in range(num_layers)
]
)
self.layers_b = nn.ModuleList(
[
nn.Sequential(
nn.LayerNorm(embed_dim),
nn.Linear(embed_dim, 4 * embed_dim), # FFN
nn.ReLU(),
nn.Linear(4 * embed_dim, embed_dim), # FFN
nn.Dropout(0.1),
)
for i in range(num_layers)
]
)
self.out_proj = nn.Linear(embed_dim, num_embed)
def forward(self, tokens, masked_tokens=None):
x = self.embed(tokens)
for layer_a, layer_b in zip(self.layers_a, self.layers_b):
x = x + layer_a(x)
x = x + layer_b(x)
x = self.out_proj(x)
if masked_tokens is not None:
x = x[masked_tokens]
return (x,)
def max_positions(self):
return 1024
def get_normalized_probs(self, net_output, log_probs, sample=None):
logits = net_output[0].float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return F.softmax(logits, dim=-1)
@register_model_architecture("dummy_model", "dummy_model")
def base_architecture(args):
pass
| 3,090 | 30.865979 | 84 | py |
null | DA-Transformer-main/fairseq/benchmark/dummy_mt.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
import torch
from fairseq.data import Dictionary, FairseqDataset
from fairseq.tasks import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("dummy_mt")
class DummyMTTask(LegacyFairseqTask):
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument("--dict-size", default=49996, type=int)
parser.add_argument("--dataset-size", default=100000, type=int)
parser.add_argument("--src-len", default=30, type=int)
parser.add_argument("--tgt-len", default=30, type=int)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
dictionary.pad_to_multiple_(8) # often faster if divisible by 8
self.dummy_src = torch.arange(args.src_len + 1) + dictionary.pad() + 1
self.dummy_tgt = torch.arange(args.tgt_len + 1) + dictionary.pad() + 1
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task."""
dictionary = Dictionary()
for i in range(args.dict_size):
dictionary.add_symbol("word{}".format(i))
logger.info("dictionary: {} types".format(len(dictionary)))
args.max_source_positions = args.src_len + dictionary.pad() + 2
args.max_target_positions = args.tgt_len + dictionary.pad() + 2
return cls(args, dictionary)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
item_size = max(self.args.src_len, self.args.tgt_len)
if self.args.batch_size is not None:
bsz = self.args.batch_size
else:
bsz = max(1, self.args.max_tokens // item_size)
tgt = torch.stack([self.dummy_tgt for _ in range(bsz)])
self.datasets[split] = DummyDataset(
{
"id": 1,
"net_input": {
"src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]),
"src_lengths": torch.full(
(bsz,), self.args.src_len, dtype=torch.long
),
"prev_output_tokens": tgt.clone(),
},
"target": tgt,
"nsentences": bsz,
"ntokens": bsz * self.args.tgt_len,
},
num_items=self.args.dataset_size,
item_size=item_size,
)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
class DummyDataset(FairseqDataset):
def __init__(self, batch, num_items, item_size):
super().__init__()
self.batch = batch
self.num_items = num_items
self.item_size = item_size
def __getitem__(self, index):
return index
def __len__(self):
return self.num_items
def collater(self, samples):
return self.batch
@property
def sizes(self):
return np.array([self.item_size] * self.num_items)
def num_tokens(self, index):
return self.item_size
def size(self, index):
return self.item_size
def ordered_indices(self):
return np.arange(self.num_items)
@property
def supports_prefetch(self):
return False
| 3,677 | 29.65 | 84 | py |
null | DA-Transformer-main/fairseq/clib/cuda/ngram_repeat_block_cuda.cpp | /*
Copyright (c) Microsoft Corporation.
Licensed under the MIT License.
*/
#include <torch/extension.h>
#include <vector>
/*
CPP Binding for CUDA OP
*/
// CUDA forward declarations
torch::Tensor ngram_repeat_block_cuda_forward(
torch::Tensor tokens,
torch::Tensor lprobs,
int bsz,
int step,
int beam_size,
int no_repeat_ngram_size);
#define CHECK_CUDA(x) \
TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) \
TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) \
CHECK_CUDA(x); \
CHECK_CONTIGUOUS(x)
// Input check and call to CUDA OP
// Backward method not required
torch::Tensor ngram_repeat_block_forward(
torch::Tensor tokens,
torch::Tensor lprobs,
int bsz,
int step,
int beam_size,
int no_repeat_ngram_size) {
CHECK_INPUT(tokens);
CHECK_INPUT(lprobs);
assert(bsz > 0);
assert(step >= 0);
assert(beam_size > 0);
assert(no_repeat_ngram_size > 0);
return ngram_repeat_block_cuda_forward(
tokens, lprobs, bsz, step, beam_size, no_repeat_ngram_size);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def(
"forward",
&ngram_repeat_block_forward,
"No Repeat Ngram Block forward (CUDA)");
}
| 1,262 | 21.553571 | 66 | cpp |
null | DA-Transformer-main/fairseq/clib/libbase/balanced_assignment.cpp | /**
* Copyright 2017-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the license found in the
* LICENSE file in the root directory of this source tree.
*/
/*
C++ code for solving the linear assignment problem.
Based on the Auction Algorithm from
https://dspace.mit.edu/bitstream/handle/1721.1/3265/P-2108-26912652.pdf and the
implementation from: https://github.com/bkj/auction-lap Adapted to be more
efficient when each worker is looking for k jobs instead of 1.
*/
#include <torch/extension.h>
#include <iostream>
using namespace torch::indexing;
torch::Tensor balanced_assignment(torch::Tensor job_and_worker_to_score) {
int max_iterations = 100;
torch::Tensor epsilon =
(job_and_worker_to_score.max() - job_and_worker_to_score.min()) / 50;
epsilon.clamp_min_(1e-04);
torch::Tensor worker_and_job_to_score =
job_and_worker_to_score.detach().transpose(0, 1).contiguous();
int num_workers = worker_and_job_to_score.size(0);
int num_jobs = worker_and_job_to_score.size(1);
auto device = worker_and_job_to_score.device();
int jobs_per_worker = num_jobs / num_workers;
torch::Tensor value = worker_and_job_to_score.clone();
int counter = 0;
torch::Tensor max_value = worker_and_job_to_score.max();
torch::Tensor bid_indices;
torch::Tensor cost = worker_and_job_to_score.new_zeros({1, num_jobs});
torch::Tensor bids =
worker_and_job_to_score.new_empty({num_workers, num_jobs});
torch::Tensor bid_increments =
worker_and_job_to_score.new_empty({num_workers, jobs_per_worker});
torch::Tensor top_values =
worker_and_job_to_score.new_empty({num_workers, jobs_per_worker + 1});
torch::Tensor high_bids = worker_and_job_to_score.new_empty({num_jobs});
torch::Tensor top_index = top_values.to(torch::kLong);
torch::Tensor high_bidders = top_index.new_empty({num_jobs});
torch::Tensor have_bids = high_bidders.to(torch::kBool);
torch::Tensor jobs_indices =
torch::arange({num_jobs}, torch::dtype(torch::kLong).device(device));
torch::Tensor true_tensor =
torch::ones({1}, torch::dtype(torch::kBool).device(device));
while (true) {
bids.zero_();
torch::topk_out(top_values, top_index, value, jobs_per_worker + 1, 1);
// Each worker bids the difference in value between that job and the k+1th
// job
torch::sub_out(
bid_increments,
top_values.index({Slice(None, None), Slice(0, jobs_per_worker)}),
top_values.index({Slice(None, None), jobs_per_worker}).unsqueeze(1));
bid_increments.add_(epsilon);
bids.scatter_(
1,
top_index.index({Slice(None, None), Slice(0, jobs_per_worker)}),
bid_increments);
if (counter < max_iterations && counter > 0) {
// Put in a minimal bid to retain items from the last round if no-one else
// bids for them this round
bids.view(-1).index_put_({bid_indices}, epsilon);
}
// Find the highest bidding worker per job
torch::max_out(high_bids, high_bidders, bids, 0);
torch::gt_out(have_bids, high_bids, 0);
if (have_bids.all().item<bool>()) {
// All jobs were bid for
break;
}
// Make popular items more expensive
cost.add_(high_bids);
torch::sub_out(value, worker_and_job_to_score, cost);
bid_indices = ((high_bidders * num_jobs) + jobs_indices).index({have_bids});
if (counter < max_iterations) {
// Make sure that this item will be in the winning worker's top-k next
// time.
value.view(-1).index_put_({bid_indices}, max_value);
} else {
// Suboptimal approximation that converges quickly from current solution
value.view(-1).index_put_(
{bid_indices}, worker_and_job_to_score.view(-1).index({bid_indices}));
}
counter += 1;
}
return top_index.index({Slice(None, None), Slice(0, jobs_per_worker)})
.reshape(-1);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("balanced_assignment", &balanced_assignment, "Balanced Assignment");
}
| 4,016 | 35.518182 | 80 | cpp |
null | DA-Transformer-main/fairseq/clib/libbleu/libbleu.cpp | /**
* Copyright 2017-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <array>
#include <cstdio>
#include <cstring>
#include <map>
// NOLINTNEXTLINE
typedef struct {
size_t reflen;
size_t predlen;
size_t match1;
size_t count1;
size_t match2;
size_t count2;
size_t match3;
size_t count3;
size_t match4;
size_t count4;
} bleu_stat;
// left trim (remove pad)
void bleu_ltrim(size_t* len, int** sent, int pad) {
size_t start = 0;
while (start < *len) {
if (*(*sent + start) != pad) {
break;
}
start++;
}
*sent += start;
*len -= start;
}
// right trim remove (eos)
void bleu_rtrim(size_t* len, int** sent, int pad, int eos) {
size_t end = *len - 1;
while (end > 0) {
if (*(*sent + end) != eos && *(*sent + end) != pad) {
break;
}
end--;
}
*len = end + 1;
}
// left and right trim
void bleu_trim(size_t* len, int** sent, int pad, int eos) {
bleu_ltrim(len, sent, pad);
bleu_rtrim(len, sent, pad, eos);
}
size_t bleu_hash(int len, int* data) {
size_t h = 14695981039346656037ul;
size_t prime = 0x100000001b3;
char* b = (char*)data;
size_t blen = sizeof(int) * len;
while (blen-- > 0) {
h ^= *b++;
h *= prime;
}
return h;
}
void bleu_addngram(
size_t* ntotal,
size_t* nmatch,
size_t n,
size_t reflen,
int* ref,
size_t predlen,
int* pred) {
if (predlen < n) {
return;
}
predlen = predlen - n + 1;
(*ntotal) += predlen;
if (reflen < n) {
return;
}
reflen = reflen - n + 1;
std::map<size_t, size_t> count;
while (predlen > 0) {
size_t w = bleu_hash(n, pred++);
count[w]++;
predlen--;
}
while (reflen > 0) {
size_t w = bleu_hash(n, ref++);
if (count[w] > 0) {
(*nmatch)++;
count[w] -= 1;
}
reflen--;
}
}
extern "C" {
#ifdef _WIN64
__declspec(dllexport)
#endif
void bleu_zero_init(bleu_stat* stat) {
std::memset(stat, 0, sizeof(bleu_stat));
}
#ifdef _WIN64
__declspec(dllexport)
#endif
void bleu_one_init(bleu_stat* stat) {
bleu_zero_init(stat);
stat->count1 = 0;
stat->count2 = 1;
stat->count3 = 1;
stat->count4 = 1;
stat->match1 = 0;
stat->match2 = 1;
stat->match3 = 1;
stat->match4 = 1;
}
#ifdef _WIN64
__declspec(dllexport)
#endif
void bleu_add(
bleu_stat* stat,
size_t reflen,
int* ref,
size_t predlen,
int* pred,
int pad,
int eos) {
bleu_trim(&reflen, &ref, pad, eos);
bleu_trim(&predlen, &pred, pad, eos);
stat->reflen += reflen;
stat->predlen += predlen;
bleu_addngram(&stat->count1, &stat->match1, 1, reflen, ref, predlen, pred);
bleu_addngram(&stat->count2, &stat->match2, 2, reflen, ref, predlen, pred);
bleu_addngram(&stat->count3, &stat->match3, 3, reflen, ref, predlen, pred);
bleu_addngram(&stat->count4, &stat->match4, 4, reflen, ref, predlen, pred);
}
}
| 3,019 | 18.113924 | 77 | cpp |
null | DA-Transformer-main/fairseq/clib/libbleu/module.cpp | /**
* Copyright 2017-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <Python.h>
static PyMethodDef method_def[] = {{NULL, NULL, 0, NULL}}; // NOLINT
static struct PyModuleDef module_def = {
PyModuleDef_HEAD_INIT,
"libbleu", /* name of module */
// NOLINTNEXTLINE
NULL, /* module documentation, may be NULL */
-1, /* size of per-interpreter state of the module,
or -1 if the module keeps state in global variables. */
method_def}; // NOLINT
#if PY_MAJOR_VERSION == 2
PyMODINIT_FUNC init_libbleu()
#else
PyMODINIT_FUNC PyInit_libbleu()
#endif
{
PyObject* m = PyModule_Create(&module_def);
if (!m) {
return NULL;
}
return m;
}
| 814 | 22.970588 | 68 | cpp |
null | DA-Transformer-main/fairseq/clib/libnat/edit_dist.cpp | /**
* Copyright 2017-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <pybind11/detail/common.h>
#include <pybind11/pybind11.h>
#include <torch/torch.h> // @manual=//caffe2:torch_extension
#include <algorithm>
#include <cstdint>
#include <iosfwd>
#include <memory>
#include <new>
#include <string>
#include <utility>
#include <vector>
using namespace ::std;
vector<vector<uint32_t>> edit_distance2_with_dp(
vector<uint32_t>& x,
vector<uint32_t>& y) {
uint32_t lx = x.size();
uint32_t ly = y.size();
vector<vector<uint32_t>> d(lx + 1, vector<uint32_t>(ly + 1));
for (uint32_t i = 0; i < lx + 1; i++) {
d[i][0] = i;
}
for (uint32_t j = 0; j < ly + 1; j++) {
d[0][j] = j;
}
for (uint32_t i = 1; i < lx + 1; i++) {
for (uint32_t j = 1; j < ly + 1; j++) {
d[i][j] =
min(min(d[i - 1][j], d[i][j - 1]) + 1,
d[i - 1][j - 1] + 2 * (x.at(i - 1) == y.at(j - 1) ? 0 : 1));
}
}
return d;
}
vector<vector<uint32_t>> edit_distance2_backtracking(
vector<vector<uint32_t>>& d,
vector<uint32_t>& x,
vector<uint32_t>& y,
uint32_t terminal_symbol) {
vector<uint32_t> seq;
vector<vector<uint32_t>> edit_seqs(x.size() + 2, vector<uint32_t>());
/*
edit_seqs:
0~x.size() cell is the insertion sequences
last cell is the delete sequence
*/
if (x.size() == 0) {
edit_seqs.at(0) = y;
return edit_seqs;
}
uint32_t i = d.size() - 1;
uint32_t j = d.at(0).size() - 1;
while ((i >= 0) && (j >= 0)) {
if ((i == 0) && (j == 0)) {
break;
}
if ((j > 0) && (d.at(i).at(j - 1) < d.at(i).at(j))) {
seq.push_back(1); // insert
seq.push_back(y.at(j - 1));
j--;
} else if ((i > 0) && (d.at(i - 1).at(j) < d.at(i).at(j))) {
seq.push_back(2); // delete
seq.push_back(x.at(i - 1));
i--;
} else {
seq.push_back(3); // keep
seq.push_back(x.at(i - 1));
i--;
j--;
}
}
uint32_t prev_op, op, s, word;
prev_op = 0, s = 0;
for (uint32_t k = 0; k < seq.size() / 2; k++) {
op = seq.at(seq.size() - 2 * k - 2);
word = seq.at(seq.size() - 2 * k - 1);
if (prev_op != 1) {
s++;
}
if (op == 1) // insert
{
edit_seqs.at(s - 1).push_back(word);
} else if (op == 2) // delete
{
edit_seqs.at(x.size() + 1).push_back(1);
} else {
edit_seqs.at(x.size() + 1).push_back(0);
}
prev_op = op;
}
for (uint32_t k = 0; k < edit_seqs.size(); k++) {
if (edit_seqs[k].size() == 0) {
edit_seqs[k].push_back(terminal_symbol);
}
}
return edit_seqs;
}
vector<vector<uint32_t>> edit_distance2_backtracking_with_delete(
vector<vector<uint32_t>>& d,
vector<uint32_t>& x,
vector<uint32_t>& y,
uint32_t terminal_symbol,
uint32_t deletion_symbol) {
vector<uint32_t> seq;
vector<vector<uint32_t>> edit_seqs(x.size() + 1, vector<uint32_t>());
/*
edit_seqs:
0~x.size() cell is the insertion sequences
last cell is the delete sequence
*/
if (x.size() == 0) {
edit_seqs.at(0) = y;
return edit_seqs;
}
uint32_t i = d.size() - 1;
uint32_t j = d.at(0).size() - 1;
while ((i >= 0) && (j >= 0)) {
if ((i == 0) && (j == 0)) {
break;
}
if ((j > 0) && (d.at(i).at(j - 1) < d.at(i).at(j))) {
seq.push_back(1); // insert
seq.push_back(y.at(j - 1));
j--;
} else if ((i > 0) && (d.at(i - 1).at(j) < d.at(i).at(j))) {
seq.push_back(2); // delete
seq.push_back(x.at(i - 1));
i--;
} else {
seq.push_back(3); // keep
seq.push_back(x.at(i - 1));
i--;
j--;
}
}
uint32_t prev_op, op, s, word;
prev_op = 0, s = 0;
for (uint32_t k = 0; k < seq.size() / 2; k++) {
op = seq.at(seq.size() - 2 * k - 2);
word = seq.at(seq.size() - 2 * k - 1);
if (prev_op != 1) {
s++;
}
if (op == 1) // insert
{
edit_seqs.at(s - 1).push_back(word);
} else if (op == 2) // delete
{
edit_seqs.at(s - 1).push_back(deletion_symbol);
}
prev_op = op;
}
for (uint32_t k = 0; k < edit_seqs.size(); k++) {
if (edit_seqs.at(k).size() == 0) {
edit_seqs.at(k).push_back(terminal_symbol);
}
}
return edit_seqs;
}
vector<uint32_t> compute_ed2(
vector<vector<uint32_t>>& xs,
vector<vector<uint32_t>>& ys) {
vector<uint32_t> distances(xs.size());
for (uint32_t i = 0; i < xs.size(); i++) {
vector<vector<uint32_t>> d = edit_distance2_with_dp(xs.at(i), ys.at(i));
distances.at(i) = d.at(xs.at(i).size()).at(ys.at(i).size());
}
return distances;
}
vector<vector<vector<uint32_t>>> suggested_ed2_path(
vector<vector<uint32_t>>& xs,
vector<vector<uint32_t>>& ys,
uint32_t terminal_symbol) {
vector<vector<vector<uint32_t>>> seq(xs.size());
for (uint32_t i = 0; i < xs.size(); i++) {
vector<vector<uint32_t>> d = edit_distance2_with_dp(xs.at(i), ys.at(i));
seq.at(i) =
edit_distance2_backtracking(d, xs.at(i), ys.at(i), terminal_symbol);
}
return seq;
}
vector<vector<vector<uint32_t>>> suggested_ed2_path_with_delete(
vector<vector<uint32_t>>& xs,
vector<vector<uint32_t>>& ys,
uint32_t terminal_symbol,
uint32_t deletion_symbol) {
vector<vector<vector<uint32_t>>> seq(xs.size());
for (uint32_t i = 0; i < xs.size(); i++) {
vector<vector<uint32_t>> d = edit_distance2_with_dp(xs.at(i), ys.at(i));
seq.at(i) = edit_distance2_backtracking_with_delete(
d, xs.at(i), ys.at(i), terminal_symbol, deletion_symbol);
}
return seq;
}
PYBIND11_MODULE(libnat, m) {
m.def("compute_ed2", &compute_ed2, "compute_ed2");
m.def("suggested_ed2_path", &suggested_ed2_path, "suggested_ed2_path");
m.def(
"suggested_ed2_path_with_delete",
&suggested_ed2_path_with_delete,
"suggested_ed2_path_with_delete");
}
| 5,958 | 24.685345 | 76 | cpp |
null | DA-Transformer-main/fairseq/clib/libnat_cuda/binding.cpp | /**
* Copyright 2017-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the license found in the
* LICENSE file in the root directory of this source tree.
*/
/*
This code is partially adpoted from
https://github.com/1ytic/pytorch-edit-distance
*/
#include <torch/types.h>
#include "edit_dist.h"
#ifndef TORCH_CHECK
#define TORCH_CHECK AT_CHECK
#endif
#define CHECK_CUDA(x) \
TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) \
TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) \
CHECK_CUDA(x); \
CHECK_CONTIGUOUS(x)
torch::Tensor LevenshteinDistance(
torch::Tensor source,
torch::Tensor target,
torch::Tensor source_length,
torch::Tensor target_length) {
CHECK_INPUT(source);
CHECK_INPUT(target);
CHECK_INPUT(source_length);
CHECK_INPUT(target_length);
return LevenshteinDistanceCuda(source, target, source_length, target_length);
}
torch::Tensor GenerateDeletionLabel(
torch::Tensor source,
torch::Tensor operations) {
CHECK_INPUT(source);
CHECK_INPUT(operations);
return GenerateDeletionLabelCuda(source, operations);
}
std::pair<torch::Tensor, torch::Tensor> GenerateInsertionLabel(
torch::Tensor target,
torch::Tensor operations) {
CHECK_INPUT(target);
CHECK_INPUT(operations);
return GenerateInsertionLabelCuda(target, operations);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("levenshtein_distance", &LevenshteinDistance, "Levenshtein distance");
m.def(
"generate_deletion_labels",
&GenerateDeletionLabel,
"Generate Deletion Label");
m.def(
"generate_insertion_labels",
&GenerateInsertionLabel,
"Generate Insertion Label");
}
| 1,769 | 25.029412 | 79 | cpp |
null | DA-Transformer-main/fairseq/clib/libnat_cuda/edit_dist.h | /**
* Copyright 2017-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <torch/extension.h>
torch::Tensor LevenshteinDistanceCuda(
torch::Tensor source,
torch::Tensor target,
torch::Tensor source_length,
torch::Tensor target_length);
torch::Tensor GenerateDeletionLabelCuda(
torch::Tensor source,
torch::Tensor operations);
std::pair<torch::Tensor, torch::Tensor> GenerateInsertionLabelCuda(
torch::Tensor source,
torch::Tensor operations);
| 627 | 23.153846 | 67 | h |
null | DA-Transformer-main/fairseq/config/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| 177 | 34.6 | 65 | py |
null | DA-Transformer-main/fairseq/config/config.yaml | # @package _group_
hydra:
run:
dir: .
defaults:
- _self_
- task: null
- model: null
- criterion: cross_entropy
- optimizer: null
- lr_scheduler: fixed
- bpe: null
- tokenizer: null
- scoring: null
- generation: null
- common_eval: null
- eval_lm: null
| 308 | 14.45 | 30 | yaml |
null | DA-Transformer-main/fairseq/config/model/transformer_lm/transformer_lm_baevski_gbw.yaml | # @package _group_
activation_fn: "relu"
dropout: 0.1
attention_dropout: 0.1
activation_dropout: 0.0
relu_dropout: 0.0
decoder_embed_dim: 512
decoder_output_dim: 512
decoder_input_dim: 512
decoder_ffn_embed_dim: 4096
decoder_layers: 12
decoder_attention_heads: 16
decoder_normalize_before: true
no_decoder_final_norm: true
adaptive_softmax_cutoff: null
adaptive_softmax_dropout: 0
adaptive_softmax_factor: 4
no_token_positional_embeddings: false
share_decoder_input_output_embed: false
character_embeddings: false
character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]"
character_embedding_dim: 4
char_embedder_highway_layers: 2
adaptive_input: false
adaptive_input_factor: 4
adaptive_input_cutoff: null
tie_adaptive_weights: false
tie_adaptive_proj: false
decoder_learned_pos: false
decoder_layerdrop: 0
decoder_layers_to_keep: null
layernorm_embedding: false
no_scale_embedding: false
quant_noise_pq: 0
quant_noise_pq_block_size: 8
quant_noise_scalar: 0
| 991 | 25.810811 | 90 | yaml |
null | DA-Transformer-main/fairseq/config/model/transformer_lm/transformer_lm_baevski_wiki103.yaml | # @package _group_
activation_fn: "relu"
dropout: 0.3
attention_dropout: 0.1
activation_dropout: 0.1
relu_dropout: 0.1
decoder_embed_dim: 1024
decoder_output_dim: 1024
decoder_input_dim: 1024
decoder_ffn_embed_dim: 4096
decoder_layers: 16
decoder_attention_heads: 8
decoder_normalize_before: true
no_decoder_final_norm: true
adaptive_softmax_cutoff: "20000,60000"
adaptive_softmax_dropout: 0.2
adaptive_softmax_factor: 4
no_token_positional_embeddings: false
share_decoder_input_output_embed: false
character_embeddings: false
character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]"
character_embedding_dim: 4
char_embedder_highway_layers: 2
adaptive_input: true
adaptive_input_factor: 4
adaptive_input_cutoff: "20000,60000"
tie_adaptive_weights: true
tie_adaptive_proj: true
decoder_learned_pos: false
decoder_layerdrop: 0
decoder_layers_to_keep: null
layernorm_embedding: false
no_scale_embedding: false
quant_noise_pq: 0
quant_noise_pq_block_size: 8
quant_noise_scalar: 0
| 1,010 | 26.324324 | 90 | yaml |
null | DA-Transformer-main/fairseq/config/model/transformer_lm/transformer_lm_big.yaml | # @package _group_
activation_fn: "relu"
dropout: 0.1
attention_dropout: 0.0
activation_dropout: 0.0
relu_dropout: 0.0
decoder_embed_dim: 1024
decoder_output_dim: 1024
decoder_input_dim: 1024
decoder_ffn_embed_dim: 4096
decoder_layers: 12
decoder_attention_heads: 16
decoder_normalize_before: true
no_decoder_final_norm: false
adaptive_softmax_cutoff: null
adaptive_softmax_dropout: 0
adaptive_softmax_factor: 4
no_token_positional_embeddings: false
share_decoder_input_output_embed: false
character_embeddings: false
character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]"
character_embedding_dim: 4
char_embedder_highway_layers: 2
adaptive_input: false
adaptive_input_factor: 4
adaptive_input_cutoff: null
tie_adaptive_weights: false
tie_adaptive_proj: false
decoder_learned_pos: false
decoder_layerdrop: 0
decoder_layers_to_keep: null
layernorm_embedding: false
no_scale_embedding: false
quant_noise_pq: 0
quant_noise_pq_block_size: 8
quant_noise_scalar: 0
| 995 | 25.918919 | 90 | yaml |
null | DA-Transformer-main/fairseq/config/model/transformer_lm/transformer_lm_gbw.yaml | # @package _group_
activation_fn: "relu"
dropout: 0.1
attention_dropout: 0.1
activation_dropout: 0.0
relu_dropout: 0.0
decoder_embed_dim: 512
decoder_output_dim: 512
decoder_input_dim: 512
decoder_ffn_embed_dim: 4096
decoder_layers: 12
decoder_attention_heads: 16
decoder_normalize_before: true
no_decoder_final_norm: true
adaptive_softmax_cutoff: null
adaptive_softmax_dropout: 0
adaptive_softmax_factor: 4
no_token_positional_embeddings: false
share_decoder_input_output_embed: false
character_embeddings: false
character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]"
character_embedding_dim: 4
char_embedder_highway_layers: 2
adaptive_input: false
adaptive_input_factor: 4
adaptive_input_cutoff: null
tie_adaptive_weights: false
tie_adaptive_proj: false
decoder_learned_pos: false
decoder_layerdrop: 0
decoder_layers_to_keep: null
layernorm_embedding: false
no_scale_embedding: false
quant_noise_pq: 0
quant_noise_pq_block_size: 8
quant_noise_scalar: 0
| 991 | 25.810811 | 90 | yaml |