repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
gunpowder | gunpowder-master/docs/build/conf.py | # -*- coding: utf-8 -*-
#
# gunpowder documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 30 12:59:21 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'jupyter_sphinx'
]
def setup(app):
app.add_css_file('custom.css')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'gunpowder'
author = u'Jan Funke, Will Patton, Renate Krause, Julia Buhmann, Rodrigo Ceballos Lentini, William Grisaitis, Chris Barnes, Caroline Malin-Mayor, Larissa Heinrich, Philipp Hanslovsky, Sherry Ding, Andrew Champion, Arlo Sheridan, Constantin Pape'
copyright = u'2020, ' + author
here = os.path.abspath(os.path.dirname(__file__))
version_info = {}
with open(os.path.join(here, '..', '..', 'gunpowder', 'version_info.py')) as fp:
exec(fp.read(), version_info)
gp_version = version_info['_version']
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'{}.{}'.format(gp_version.major(), gp_version.minor())
# The full version, including alpha/beta/rc tags.
release = str(gp_version)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme_path = ['_themes']
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'logo_only': True
}
html_logo = 'gunpowder.svg'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'gunpowderdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'gunpowder.tex', u'gunpowder Documentation',
u'Jan Funke', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'gunpowder', u'gunpowder Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'gunpowder', u'gunpowder Documentation',
author, 'gunpowder', 'One line description of project.',
'Miscellaneous'),
]
| 5,592 | 30.24581 | 245 | py |
gunpowder | gunpowder-master/docs/build/_themes/sphinx_rtd_theme/__init__.py | """Sphinx ReadTheDocs theme.
From https://github.com/ryan-roemer/sphinx-bootstrap-theme.
"""
from os import path
__version__ = '0.2.5b2'
__version_full__ = __version__
def get_html_theme_path():
"""Return list of HTML theme paths."""
cur_dir = path.abspath(path.dirname(path.dirname(__file__)))
return cur_dir
# See http://www.sphinx-doc.org/en/stable/theming.html#distribute-your-theme-as-a-python-package
def setup(app):
app.add_html_theme('sphinx_rtd_theme', path.abspath(path.dirname(__file__)))
| 522 | 25.15 | 96 | py |
treelstm.pytorch | treelstm.pytorch-master/main.py | from __future__ import division
from __future__ import print_function
import os
import random
import logging
import torch
import torch.nn as nn
import torch.optim as optim
# IMPORT CONSTANTS
from treelstm import Constants
# NEURAL NETWORK MODULES/LAYERS
from treelstm import SimilarityTreeLSTM
# DATA HANDLING CLASSES
from treelstm import Vocab
# DATASET CLASS FOR SICK DATASET
from treelstm import SICKDataset
# METRICS CLASS FOR EVALUATION
from treelstm import Metrics
# UTILITY FUNCTIONS
from treelstm import utils
# TRAIN AND TEST HELPER FUNCTIONS
from treelstm import Trainer
# CONFIG PARSER
from config import parse_args
# MAIN BLOCK
def main():
global args
args = parse_args()
# global logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
# file logger
fh = logging.FileHandler(os.path.join(args.save, args.expname)+'.log', mode='w')
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
# console logger
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
# argument validation
args.cuda = args.cuda and torch.cuda.is_available()
device = torch.device("cuda:0" if args.cuda else "cpu")
if args.sparse and args.wd != 0:
logger.error('Sparsity and weight decay are incompatible, pick one!')
exit()
logger.debug(args)
torch.manual_seed(args.seed)
random.seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.benchmark = True
if not os.path.exists(args.save):
os.makedirs(args.save)
train_dir = os.path.join(args.data, 'train/')
dev_dir = os.path.join(args.data, 'dev/')
test_dir = os.path.join(args.data, 'test/')
# write unique words from all token files
sick_vocab_file = os.path.join(args.data, 'sick.vocab')
if not os.path.isfile(sick_vocab_file):
token_files_b = [os.path.join(split, 'b.toks') for split in [train_dir, dev_dir, test_dir]]
token_files_a = [os.path.join(split, 'a.toks') for split in [train_dir, dev_dir, test_dir]]
token_files = token_files_a + token_files_b
sick_vocab_file = os.path.join(args.data, 'sick.vocab')
utils.build_vocab(token_files, sick_vocab_file)
# get vocab object from vocab file previously written
vocab = Vocab(filename=sick_vocab_file,
data=[Constants.PAD_WORD, Constants.UNK_WORD,
Constants.BOS_WORD, Constants.EOS_WORD])
logger.debug('==> SICK vocabulary size : %d ' % vocab.size())
# load SICK dataset splits
train_file = os.path.join(args.data, 'sick_train.pth')
if os.path.isfile(train_file):
train_dataset = torch.load(train_file)
else:
train_dataset = SICKDataset(train_dir, vocab, args.num_classes)
torch.save(train_dataset, train_file)
logger.debug('==> Size of train data : %d ' % len(train_dataset))
dev_file = os.path.join(args.data, 'sick_dev.pth')
if os.path.isfile(dev_file):
dev_dataset = torch.load(dev_file)
else:
dev_dataset = SICKDataset(dev_dir, vocab, args.num_classes)
torch.save(dev_dataset, dev_file)
logger.debug('==> Size of dev data : %d ' % len(dev_dataset))
test_file = os.path.join(args.data, 'sick_test.pth')
if os.path.isfile(test_file):
test_dataset = torch.load(test_file)
else:
test_dataset = SICKDataset(test_dir, vocab, args.num_classes)
torch.save(test_dataset, test_file)
logger.debug('==> Size of test data : %d ' % len(test_dataset))
# initialize model, criterion/loss_function, optimizer
model = SimilarityTreeLSTM(
vocab.size(),
args.input_dim,
args.mem_dim,
args.hidden_dim,
args.num_classes,
args.sparse,
args.freeze_embed)
criterion = nn.KLDivLoss()
# for words common to dataset vocab and GLOVE, use GLOVE vectors
# for other words in dataset vocab, use random normal vectors
emb_file = os.path.join(args.data, 'sick_embed.pth')
if os.path.isfile(emb_file):
emb = torch.load(emb_file)
else:
# load glove embeddings and vocab
glove_vocab, glove_emb = utils.load_word_vectors(
os.path.join(args.glove, 'glove.840B.300d'))
logger.debug('==> GLOVE vocabulary size: %d ' % glove_vocab.size())
emb = torch.zeros(vocab.size(), glove_emb.size(1), dtype=torch.float, device=device)
emb.normal_(0, 0.05)
# zero out the embeddings for padding and other special words if they are absent in vocab
for idx, item in enumerate([Constants.PAD_WORD, Constants.UNK_WORD,
Constants.BOS_WORD, Constants.EOS_WORD]):
emb[idx].zero_()
for word in vocab.labelToIdx.keys():
if glove_vocab.getIndex(word):
emb[vocab.getIndex(word)] = glove_emb[glove_vocab.getIndex(word)]
torch.save(emb, emb_file)
# plug these into embedding matrix inside model
model.emb.weight.data.copy_(emb)
model.to(device), criterion.to(device)
if args.optim == 'adam':
optimizer = optim.Adam(filter(lambda p: p.requires_grad,
model.parameters()), lr=args.lr, weight_decay=args.wd)
elif args.optim == 'adagrad':
optimizer = optim.Adagrad(filter(lambda p: p.requires_grad,
model.parameters()), lr=args.lr, weight_decay=args.wd)
elif args.optim == 'sgd':
optimizer = optim.SGD(filter(lambda p: p.requires_grad,
model.parameters()), lr=args.lr, weight_decay=args.wd)
metrics = Metrics(args.num_classes)
# create trainer object for training and testing
trainer = Trainer(args, model, criterion, optimizer, device)
best = -float('inf')
for epoch in range(args.epochs):
train_loss = trainer.train(train_dataset)
train_loss, train_pred = trainer.test(train_dataset)
dev_loss, dev_pred = trainer.test(dev_dataset)
test_loss, test_pred = trainer.test(test_dataset)
train_pearson = metrics.pearson(train_pred, train_dataset.labels)
train_mse = metrics.mse(train_pred, train_dataset.labels)
logger.info('==> Epoch {}, Train \tLoss: {}\tPearson: {}\tMSE: {}'.format(
epoch, train_loss, train_pearson, train_mse))
dev_pearson = metrics.pearson(dev_pred, dev_dataset.labels)
dev_mse = metrics.mse(dev_pred, dev_dataset.labels)
logger.info('==> Epoch {}, Dev \tLoss: {}\tPearson: {}\tMSE: {}'.format(
epoch, dev_loss, dev_pearson, dev_mse))
test_pearson = metrics.pearson(test_pred, test_dataset.labels)
test_mse = metrics.mse(test_pred, test_dataset.labels)
logger.info('==> Epoch {}, Test \tLoss: {}\tPearson: {}\tMSE: {}'.format(
epoch, test_loss, test_pearson, test_mse))
if best < test_pearson:
best = test_pearson
checkpoint = {
'model': trainer.model.state_dict(),
'optim': trainer.optimizer,
'pearson': test_pearson, 'mse': test_mse,
'args': args, 'epoch': epoch
}
logger.debug('==> New optimum found, checkpointing everything now...')
torch.save(checkpoint, '%s.pt' % os.path.join(args.save, args.expname))
if __name__ == "__main__":
main()
| 7,610 | 39.484043 | 99 | py |
treelstm.pytorch | treelstm.pytorch-master/config.py | import argparse
def parse_args():
parser = argparse.ArgumentParser(
description='PyTorch TreeLSTM for Sentence Similarity on Dependency Trees')
# data arguments
parser.add_argument('--data', default='data/sick/',
help='path to dataset')
parser.add_argument('--glove', default='data/glove/',
help='directory with GLOVE embeddings')
parser.add_argument('--save', default='checkpoints/',
help='directory to save checkpoints in')
parser.add_argument('--expname', type=str, default='test',
help='Name to identify experiment')
# model arguments
parser.add_argument('--input_dim', default=300, type=int,
help='Size of input word vector')
parser.add_argument('--mem_dim', default=150, type=int,
help='Size of TreeLSTM cell state')
parser.add_argument('--hidden_dim', default=50, type=int,
help='Size of classifier MLP')
parser.add_argument('--num_classes', default=5, type=int,
help='Number of classes in dataset')
parser.add_argument('--freeze_embed', action='store_true',
help='Freeze word embeddings')
# training arguments
parser.add_argument('--epochs', default=15, type=int,
help='number of total epochs to run')
parser.add_argument('--batchsize', default=25, type=int,
help='batchsize for optimizer updates')
parser.add_argument('--lr', default=0.01, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--wd', default=1e-4, type=float,
help='weight decay (default: 1e-4)')
parser.add_argument('--sparse', action='store_true',
help='Enable sparsity for embeddings, \
incompatible with weight decay')
parser.add_argument('--optim', default='adagrad',
help='optimizer (default: adagrad)')
# miscellaneous options
parser.add_argument('--seed', default=123, type=int,
help='random seed (default: 123)')
cuda_parser = parser.add_mutually_exclusive_group(required=False)
cuda_parser.add_argument('--cuda', dest='cuda', action='store_true')
cuda_parser.add_argument('--no-cuda', dest='cuda', action='store_false')
parser.set_defaults(cuda=True)
args = parser.parse_args()
return args
| 2,532 | 48.666667 | 83 | py |
treelstm.pytorch | treelstm.pytorch-master/scripts/download.py | """
Downloads the following:
- Stanford parser
- Stanford POS tagger
- Glove vectors
- SICK dataset (semantic relatedness task)
"""
from __future__ import print_function
import urllib2
import sys
import os
import zipfile
def download(url, dirpath):
filename = url.split('/')[-1]
filepath = os.path.join(dirpath, filename)
try:
u = urllib2.urlopen(url)
except Exception as e:
print("URL %s failed to open" % url)
raise Exception
try:
f = open(filepath, 'wb')
except Exception as e:
print("Cannot write %s" % filepath)
raise Exception
try:
filesize = int(u.info().getheaders("Content-Length")[0])
except Exception as e:
print("URL %s failed to report length" % url)
raise Exception
print("Downloading: %s Bytes: %s" % (filename, filesize))
downloaded = 0
block_sz = 8192
status_width = 70
while True:
buf = u.read(block_sz)
if not buf:
print('')
break
else:
print('', end='\r')
downloaded += len(buf)
f.write(buf)
status = (("[%-" + str(status_width + 1) + "s] %3.2f%%") %
('=' * int(downloaded / filesize * status_width) + '>',
downloaded * 100. / filesize))
print(status, end='')
sys.stdout.flush()
f.close()
return filepath
def unzip(filepath):
print("Extracting: " + filepath)
dirpath = os.path.dirname(filepath)
with zipfile.ZipFile(filepath) as zf:
zf.extractall(dirpath)
os.remove(filepath)
def download_tagger(dirpath):
tagger_dir = 'stanford-tagger'
if os.path.exists(os.path.join(dirpath, tagger_dir)):
print('Found Stanford POS Tagger - skip')
return
url = 'http://nlp.stanford.edu/software/stanford-postagger-2015-01-29.zip'
filepath = download(url, dirpath)
zip_dir = ''
with zipfile.ZipFile(filepath) as zf:
zip_dir = zf.namelist()[0]
zf.extractall(dirpath)
os.remove(filepath)
os.rename(os.path.join(dirpath, zip_dir), os.path.join(dirpath, tagger_dir))
def download_parser(dirpath):
parser_dir = 'stanford-parser'
if os.path.exists(os.path.join(dirpath, parser_dir)):
print('Found Stanford Parser - skip')
return
url = 'http://nlp.stanford.edu/software/stanford-parser-full-2015-01-29.zip'
filepath = download(url, dirpath)
zip_dir = ''
with zipfile.ZipFile(filepath) as zf:
zip_dir = zf.namelist()[0]
zf.extractall(dirpath)
os.remove(filepath)
os.rename(os.path.join(dirpath, zip_dir), os.path.join(dirpath, parser_dir))
def download_wordvecs(dirpath):
if os.path.exists(dirpath):
print('Found Glove vectors - skip')
return
else:
os.makedirs(dirpath)
url = 'http://www-nlp.stanford.edu/data/glove.840B.300d.zip'
unzip(download(url, dirpath))
def download_sick(dirpath):
if os.path.exists(dirpath):
print('Found SICK dataset - skip')
return
else:
os.makedirs(dirpath)
train_url = 'http://alt.qcri.org/semeval2014/task1/data/uploads/sick_train.zip'
trial_url = 'http://alt.qcri.org/semeval2014/task1/data/uploads/sick_trial.zip'
test_url = 'http://alt.qcri.org/semeval2014/task1/data/uploads/sick_test_annotated.zip'
unzip(download(train_url, dirpath))
unzip(download(trial_url, dirpath))
unzip(download(test_url, dirpath))
if __name__ == '__main__':
base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
# data
data_dir = os.path.join(base_dir, 'data')
wordvec_dir = os.path.join(data_dir, 'glove')
sick_dir = os.path.join(data_dir, 'sick')
# libraries
lib_dir = os.path.join(base_dir, 'lib')
# download dependencies
download_tagger(lib_dir)
download_parser(lib_dir)
download_wordvecs(wordvec_dir)
download_sick(sick_dir)
| 3,938 | 28.177778 | 91 | py |
treelstm.pytorch | treelstm.pytorch-master/scripts/preprocess-sick.py | """
Preprocessing script for SICK data.
"""
import os
import glob
def make_dirs(dirs):
for d in dirs:
if not os.path.exists(d):
os.makedirs(d)
def dependency_parse(filepath, cp='', tokenize=True):
print('\nDependency parsing ' + filepath)
dirpath = os.path.dirname(filepath)
filepre = os.path.splitext(os.path.basename(filepath))[0]
tokpath = os.path.join(dirpath, filepre + '.toks')
parentpath = os.path.join(dirpath, filepre + '.parents')
relpath = os.path.join(dirpath, filepre + '.rels')
tokenize_flag = '-tokenize - ' if tokenize else ''
cmd = ('java -cp %s DependencyParse -tokpath %s -parentpath %s -relpath %s %s < %s'
% (cp, tokpath, parentpath, relpath, tokenize_flag, filepath))
os.system(cmd)
def constituency_parse(filepath, cp='', tokenize=True):
dirpath = os.path.dirname(filepath)
filepre = os.path.splitext(os.path.basename(filepath))[0]
tokpath = os.path.join(dirpath, filepre + '.toks')
parentpath = os.path.join(dirpath, filepre + '.cparents')
tokenize_flag = '-tokenize - ' if tokenize else ''
cmd = ('java -cp %s ConstituencyParse -tokpath %s -parentpath %s %s < %s'
% (cp, tokpath, parentpath, tokenize_flag, filepath))
os.system(cmd)
def build_vocab(filepaths, dst_path, lowercase=True):
vocab = set()
for filepath in filepaths:
with open(filepath) as f:
for line in f:
if lowercase:
line = line.lower()
vocab |= set(line.split())
with open(dst_path, 'w') as f:
for w in sorted(vocab):
f.write(w + '\n')
def split(filepath, dst_dir):
with open(filepath) as datafile, \
open(os.path.join(dst_dir, 'a.txt'), 'w') as afile, \
open(os.path.join(dst_dir, 'b.txt'), 'w') as bfile, \
open(os.path.join(dst_dir, 'id.txt'), 'w') as idfile, \
open(os.path.join(dst_dir, 'sim.txt'), 'w') as simfile:
datafile.readline()
for line in datafile:
i, a, b, sim, ent = line.strip().split('\t')
idfile.write(i + '\n')
afile.write(a + '\n')
bfile.write(b + '\n')
simfile.write(sim + '\n')
def parse(dirpath, cp=''):
dependency_parse(os.path.join(dirpath, 'a.txt'), cp=cp, tokenize=True)
dependency_parse(os.path.join(dirpath, 'b.txt'), cp=cp, tokenize=True)
constituency_parse(os.path.join(dirpath, 'a.txt'), cp=cp, tokenize=True)
constituency_parse(os.path.join(dirpath, 'b.txt'), cp=cp, tokenize=True)
if __name__ == '__main__':
print('=' * 80)
print('Preprocessing SICK dataset')
print('=' * 80)
base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
data_dir = os.path.join(base_dir, 'data')
sick_dir = os.path.join(data_dir, 'sick')
lib_dir = os.path.join(base_dir, 'lib')
train_dir = os.path.join(sick_dir, 'train')
dev_dir = os.path.join(sick_dir, 'dev')
test_dir = os.path.join(sick_dir, 'test')
make_dirs([train_dir, dev_dir, test_dir])
# java classpath for calling Stanford parser
classpath = ':'.join([
lib_dir,
os.path.join(lib_dir, 'stanford-parser/stanford-parser.jar'),
os.path.join(lib_dir, 'stanford-parser/stanford-parser-3.5.1-models.jar')])
# split into separate files
split(os.path.join(sick_dir, 'SICK_train.txt'), train_dir)
split(os.path.join(sick_dir, 'SICK_trial.txt'), dev_dir)
split(os.path.join(sick_dir, 'SICK_test_annotated.txt'), test_dir)
# parse sentences
parse(train_dir, cp=classpath)
parse(dev_dir, cp=classpath)
parse(test_dir, cp=classpath)
# get vocabulary
build_vocab(
glob.glob(os.path.join(sick_dir, '*/*.toks')),
os.path.join(sick_dir, 'vocab.txt'))
build_vocab(
glob.glob(os.path.join(sick_dir, '*/*.toks')),
os.path.join(sick_dir, 'vocab-cased.txt'),
lowercase=False)
| 3,975 | 34.185841 | 87 | py |
treelstm.pytorch | treelstm.pytorch-master/treelstm/tree.py | # tree object from stanfordnlp/treelstm
class Tree(object):
def __init__(self):
self.parent = None
self.num_children = 0
self.children = list()
def add_child(self, child):
child.parent = self
self.num_children += 1
self.children.append(child)
def size(self):
if getattr(self, '_size'):
return self._size
count = 1
for i in range(self.num_children):
count += self.children[i].size()
self._size = count
return self._size
def depth(self):
if getattr(self, '_depth'):
return self._depth
count = 0
if self.num_children > 0:
for i in range(self.num_children):
child_depth = self.children[i].depth()
if child_depth > count:
count = child_depth
count += 1
self._depth = count
return self._depth
| 946 | 26.852941 | 54 | py |
treelstm.pytorch | treelstm.pytorch-master/treelstm/Constants.py | PAD = 0
UNK = 1
BOS = 2
EOS = 3
PAD_WORD = '<blank>'
UNK_WORD = '<unk>'
BOS_WORD = '<s>'
EOS_WORD = '</s>'
| 108 | 9.9 | 20 | py |
treelstm.pytorch | treelstm.pytorch-master/treelstm/utils.py | from __future__ import division
from __future__ import print_function
import os
import math
import torch
from .vocab import Vocab
# loading GLOVE word vectors
# if .pth file is found, will load that
# else will load from .txt file & save
def load_word_vectors(path):
if os.path.isfile(path + '.pth') and os.path.isfile(path + '.vocab'):
print('==> File found, loading to memory')
vectors = torch.load(path + '.pth')
vocab = Vocab(filename=path + '.vocab')
return vocab, vectors
# saved file not found, read from txt file
# and create tensors for word vectors
print('==> File not found, preparing, be patient')
count = sum(1 for line in open(path + '.txt', 'r', encoding='utf8', errors='ignore'))
with open(path + '.txt', 'r') as f:
contents = f.readline().rstrip('\n').split(' ')
dim = len(contents[1:])
words = [None] * (count)
vectors = torch.zeros(count, dim, dtype=torch.float, device='cpu')
with open(path + '.txt', 'r', encoding='utf8', errors='ignore') as f:
idx = 0
for line in f:
contents = line.rstrip('\n').split(' ')
words[idx] = contents[0]
values = list(map(float, contents[1:]))
vectors[idx] = torch.tensor(values, dtype=torch.float, device='cpu')
idx += 1
with open(path + '.vocab', 'w', encoding='utf8', errors='ignore') as f:
for word in words:
f.write(word + '\n')
vocab = Vocab(filename=path + '.vocab')
torch.save(vectors, path + '.pth')
return vocab, vectors
# write unique words from a set of files to a new file
def build_vocab(filenames, vocabfile):
vocab = set()
for filename in filenames:
with open(filename, 'r') as f:
for line in f:
tokens = line.rstrip('\n').split(' ')
vocab |= set(tokens)
with open(vocabfile, 'w') as f:
for token in sorted(vocab):
f.write(token + '\n')
# mapping from scalar to vector
def map_label_to_target(label, num_classes):
target = torch.zeros(1, num_classes, dtype=torch.float, device='cpu')
ceil = int(math.ceil(label))
floor = int(math.floor(label))
if ceil == floor:
target[0, floor-1] = 1
else:
target[0, floor-1] = ceil - label
target[0, ceil-1] = label - floor
return target
| 2,376 | 32.957143 | 89 | py |
treelstm.pytorch | treelstm.pytorch-master/treelstm/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from . import Constants
# module for childsumtreelstm
class ChildSumTreeLSTM(nn.Module):
def __init__(self, in_dim, mem_dim):
super(ChildSumTreeLSTM, self).__init__()
self.in_dim = in_dim
self.mem_dim = mem_dim
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
def node_forward(self, inputs, child_c, child_h):
child_h_sum = torch.sum(child_h, dim=0, keepdim=True)
iou = self.ioux(inputs) + self.iouh(child_h_sum)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = F.sigmoid(i), F.sigmoid(o), F.tanh(u)
f = F.sigmoid(
self.fh(child_h) +
self.fx(inputs).repeat(len(child_h), 1)
)
fc = torch.mul(f, child_c)
c = torch.mul(i, u) + torch.sum(fc, dim=0, keepdim=True)
h = torch.mul(o, F.tanh(c))
return c, h
def forward(self, tree, inputs):
for idx in range(tree.num_children):
self.forward(tree.children[idx], inputs)
if tree.num_children == 0:
child_c = inputs[0].detach().new(1, self.mem_dim).fill_(0.).requires_grad_()
child_h = inputs[0].detach().new(1, self.mem_dim).fill_(0.).requires_grad_()
else:
child_c, child_h = zip(* map(lambda x: x.state, tree.children))
child_c, child_h = torch.cat(child_c, dim=0), torch.cat(child_h, dim=0)
tree.state = self.node_forward(inputs[tree.idx], child_c, child_h)
return tree.state
# module for distance-angle similarity
class Similarity(nn.Module):
def __init__(self, mem_dim, hidden_dim, num_classes):
super(Similarity, self).__init__()
self.mem_dim = mem_dim
self.hidden_dim = hidden_dim
self.num_classes = num_classes
self.wh = nn.Linear(2 * self.mem_dim, self.hidden_dim)
self.wp = nn.Linear(self.hidden_dim, self.num_classes)
def forward(self, lvec, rvec):
mult_dist = torch.mul(lvec, rvec)
abs_dist = torch.abs(torch.add(lvec, -rvec))
vec_dist = torch.cat((mult_dist, abs_dist), 1)
out = F.sigmoid(self.wh(vec_dist))
out = F.log_softmax(self.wp(out), dim=1)
return out
# putting the whole model together
class SimilarityTreeLSTM(nn.Module):
def __init__(self, vocab_size, in_dim, mem_dim, hidden_dim, num_classes, sparsity, freeze):
super(SimilarityTreeLSTM, self).__init__()
self.emb = nn.Embedding(vocab_size, in_dim, padding_idx=Constants.PAD, sparse=sparsity)
if freeze:
self.emb.weight.requires_grad = False
self.childsumtreelstm = ChildSumTreeLSTM(in_dim, mem_dim)
self.similarity = Similarity(mem_dim, hidden_dim, num_classes)
def forward(self, ltree, linputs, rtree, rinputs):
linputs = self.emb(linputs)
rinputs = self.emb(rinputs)
lstate, lhidden = self.childsumtreelstm(ltree, linputs)
rstate, rhidden = self.childsumtreelstm(rtree, rinputs)
output = self.similarity(lstate, rstate)
return output
| 3,286 | 36.352273 | 95 | py |
treelstm.pytorch | treelstm.pytorch-master/treelstm/dataset.py | import os
from tqdm import tqdm
from copy import deepcopy
import torch
import torch.utils.data as data
from . import Constants
from .tree import Tree
# Dataset class for SICK dataset
class SICKDataset(data.Dataset):
def __init__(self, path, vocab, num_classes):
super(SICKDataset, self).__init__()
self.vocab = vocab
self.num_classes = num_classes
self.lsentences = self.read_sentences(os.path.join(path, 'a.toks'))
self.rsentences = self.read_sentences(os.path.join(path, 'b.toks'))
self.ltrees = self.read_trees(os.path.join(path, 'a.parents'))
self.rtrees = self.read_trees(os.path.join(path, 'b.parents'))
self.labels = self.read_labels(os.path.join(path, 'sim.txt'))
self.size = self.labels.size(0)
def __len__(self):
return self.size
def __getitem__(self, index):
ltree = deepcopy(self.ltrees[index])
rtree = deepcopy(self.rtrees[index])
lsent = deepcopy(self.lsentences[index])
rsent = deepcopy(self.rsentences[index])
label = deepcopy(self.labels[index])
return (ltree, lsent, rtree, rsent, label)
def read_sentences(self, filename):
with open(filename, 'r') as f:
sentences = [self.read_sentence(line) for line in tqdm(f.readlines())]
return sentences
def read_sentence(self, line):
indices = self.vocab.convertToIdx(line.split(), Constants.UNK_WORD)
return torch.tensor(indices, dtype=torch.long, device='cpu')
def read_trees(self, filename):
with open(filename, 'r') as f:
trees = [self.read_tree(line) for line in tqdm(f.readlines())]
return trees
def read_tree(self, line):
parents = list(map(int, line.split()))
trees = dict()
root = None
for i in range(1, len(parents) + 1):
if i - 1 not in trees.keys() and parents[i - 1] != -1:
idx = i
prev = None
while True:
parent = parents[idx - 1]
if parent == -1:
break
tree = Tree()
if prev is not None:
tree.add_child(prev)
trees[idx - 1] = tree
tree.idx = idx - 1
if parent - 1 in trees.keys():
trees[parent - 1].add_child(tree)
break
elif parent == 0:
root = tree
break
else:
prev = tree
idx = parent
return root
def read_labels(self, filename):
with open(filename, 'r') as f:
labels = list(map(lambda x: float(x), f.readlines()))
labels = torch.tensor(labels, dtype=torch.float, device='cpu')
return labels
| 2,927 | 32.655172 | 82 | py |
treelstm.pytorch | treelstm.pytorch-master/treelstm/vocab.py | # vocab object from harvardnlp/opennmt-py
class Vocab(object):
def __init__(self, filename=None, data=None, lower=False):
self.idxToLabel = {}
self.labelToIdx = {}
self.lower = lower
# Special entries will not be pruned.
self.special = []
if data is not None:
self.addSpecials(data)
if filename is not None:
self.loadFile(filename)
def size(self):
return len(self.idxToLabel)
# Load entries from a file.
def loadFile(self, filename):
idx = 0
for line in open(filename, 'r', encoding='utf8', errors='ignore'):
token = line.rstrip('\n')
self.add(token)
idx += 1
def getIndex(self, key, default=None):
key = key.lower() if self.lower else key
try:
return self.labelToIdx[key]
except KeyError:
return default
def getLabel(self, idx, default=None):
try:
return self.idxToLabel[idx]
except KeyError:
return default
# Mark this `label` and `idx` as special
def addSpecial(self, label, idx=None):
idx = self.add(label)
self.special += [idx]
# Mark all labels in `labels` as specials
def addSpecials(self, labels):
for label in labels:
self.addSpecial(label)
# Add `label` in the dictionary. Use `idx` as its index if given.
def add(self, label):
label = label.lower() if self.lower else label
if label in self.labelToIdx:
idx = self.labelToIdx[label]
else:
idx = len(self.idxToLabel)
self.idxToLabel[idx] = label
self.labelToIdx[label] = idx
return idx
# Convert `labels` to indices. Use `unkWord` if not found.
# Optionally insert `bosWord` at the beginning and `eosWord` at the .
def convertToIdx(self, labels, unkWord, bosWord=None, eosWord=None):
vec = []
if bosWord is not None:
vec += [self.getIndex(bosWord)]
unk = self.getIndex(unkWord)
vec += [self.getIndex(label, default=unk) for label in labels]
if eosWord is not None:
vec += [self.getIndex(eosWord)]
return vec
# Convert `idx` to labels. If index `stop` is reached, convert it and return.
def convertToLabels(self, idx, stop):
labels = []
for i in idx:
labels += [self.getLabel(i)]
if i == stop:
break
return labels
| 2,537 | 28.172414 | 81 | py |
treelstm.pytorch | treelstm.pytorch-master/treelstm/metrics.py | from copy import deepcopy
import torch
class Metrics():
def __init__(self, num_classes):
self.num_classes = num_classes
def pearson(self, predictions, labels):
x = deepcopy(predictions)
y = deepcopy(labels)
x = (x - x.mean()) / x.std()
y = (y - y.mean()) / y.std()
return torch.mean(torch.mul(x, y))
def mse(self, predictions, labels):
x = deepcopy(predictions)
y = deepcopy(labels)
return torch.mean((x - y) ** 2)
| 504 | 23.047619 | 43 | py |
treelstm.pytorch | treelstm.pytorch-master/treelstm/__init__.py | from . import Constants
from .dataset import SICKDataset
from .metrics import Metrics
from .model import SimilarityTreeLSTM
from .trainer import Trainer
from .tree import Tree
from . import utils
from .vocab import Vocab
__all__ = [Constants, SICKDataset, Metrics, SimilarityTreeLSTM, Trainer, Tree, Vocab, utils]
| 315 | 27.727273 | 92 | py |
treelstm.pytorch | treelstm.pytorch-master/treelstm/trainer.py | from tqdm import tqdm
import torch
from . import utils
class Trainer(object):
def __init__(self, args, model, criterion, optimizer, device):
super(Trainer, self).__init__()
self.args = args
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self.device = device
self.epoch = 0
# helper function for training
def train(self, dataset):
self.model.train()
self.optimizer.zero_grad()
total_loss = 0.0
indices = torch.randperm(len(dataset), dtype=torch.long, device='cpu')
for idx in tqdm(range(len(dataset)), desc='Training epoch ' + str(self.epoch + 1) + ''):
ltree, linput, rtree, rinput, label = dataset[indices[idx]]
target = utils.map_label_to_target(label, dataset.num_classes)
linput, rinput = linput.to(self.device), rinput.to(self.device)
target = target.to(self.device)
output = self.model(ltree, linput, rtree, rinput)
loss = self.criterion(output, target)
total_loss += loss.item()
loss.backward()
if idx % self.args.batchsize == 0 and idx > 0:
self.optimizer.step()
self.optimizer.zero_grad()
self.epoch += 1
return total_loss / len(dataset)
# helper function for testing
def test(self, dataset):
self.model.eval()
with torch.no_grad():
total_loss = 0.0
predictions = torch.zeros(len(dataset), dtype=torch.float, device='cpu')
indices = torch.arange(1, dataset.num_classes + 1, dtype=torch.float, device='cpu')
for idx in tqdm(range(len(dataset)), desc='Testing epoch ' + str(self.epoch) + ''):
ltree, linput, rtree, rinput, label = dataset[idx]
target = utils.map_label_to_target(label, dataset.num_classes)
linput, rinput = linput.to(self.device), rinput.to(self.device)
target = target.to(self.device)
output = self.model(ltree, linput, rtree, rinput)
loss = self.criterion(output, target)
total_loss += loss.item()
output = output.squeeze().to('cpu')
predictions[idx] = torch.dot(indices, torch.exp(output))
return total_loss / len(dataset), predictions
| 2,384 | 40.842105 | 96 | py |
FastVae_Gpu | FastVae_Gpu-main/run_mm.py | from dataloader import RecData, UserItemData
from sampler_gpu_mm import SamplerBase, PopularSampler, MidxUniform, MidxUniPop
import torch
import torch.optim
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from vae_models import BaseVAE, VAE_Sampler
import argparse
import numpy as np
from utils import Eval
import utils
import logging
import datetime
import os
import time
import gc
def evaluate(model, train_mat, test_mat, logger, device):
logger.info("Start evaluation")
model.eval()
with torch.no_grad():
user_num, item_num = train_mat.shape
user_emb = get_user_embs(train_mat, model, device)
item_emb = model._get_item_emb()
user_emb = user_emb.cpu().data
item_emb = item_emb.cpu().data
users = np.random.choice(user_num, min(user_num, 5000), False)
m = Eval.evaluate_item(train_mat[users, :], test_mat[users, :], user_emb[users, :], item_emb, topk=50)
return m
def get_user_embs(data_mat, model, device):
data = UserItemData(data_mat, train_flag=False)
dataloader = DataLoader(data, batch_size=config.batch_size_u, num_workers=config.num_workers, pin_memory=False, shuffle=False, collate_fn=utils.custom_collate_)
user_lst = []
for e in dataloader:
user_his = e
user_emb = model._get_user_emb(user_his.to(device))
user_lst.append(user_emb)
return torch.cat(user_lst, dim=0)
def train_model(model, train_mat, test_mat, config, logger):
optimizer = utils_optim(config.learning_rate, model, config.weight_decay)
scheduler = StepLR(optimizer, config.step_size, config.gamma)
device = torch.device(config.device)
train_data = UserItemData(train_mat)
train_dataloader = DataLoader(train_data, batch_size=config.batch_size, num_workers=config.num_workers, pin_memory=True, shuffle=True, collate_fn=utils.custom_collate_)
initial_list = []
training_list = []
sampling_list = []
inference_list = []
cal_loss_list = []
for epoch in range(config.epoch):
loss_ , kld_loss = 0.0, 0.0
logger.info("Epoch %d"%epoch)
if epoch > 0:
del sampler
if config.sampler > 2:
del item_emb
infer_total_time = 0.0
sample_total_time = 0.0
loss_total_time = 0.0
t0 = time.time()
if config.sampler > 0:
if config.sampler == 1:
sampler = SamplerBase(train_mat.shape[1] * config.multi, config.sample_num, device)
elif config.sampler == 2:
pop_count = np.squeeze(train_mat.sum(axis=0).A)
pop_count = np.r_[pop_count, np.ones(train_mat.shape[1] * (config.multi -1))]
sampler = PopularSampler(pop_count, config.sample_num, device)
elif config.sampler == 3:
item_emb = model._get_item_emb().detach()
sampler = MidxUniform(item_emb, config.sample_num, device, config.cluster_num)
elif config.sampler == 4:
item_emb = model._get_item_emb().detach()
pop_count = np.squeeze(train_mat.sum(axis=0).A)
pop_count = np.r_[pop_count, np.ones(train_mat.shape[1] * (config.multi -1))]
sampler = MidxUniPop(item_emb, config.sample_num, device, config.cluster_num, pop_count)
t1 = time.time()
for batch_idx, data in enumerate(train_dataloader):
model.train()
if config.sampler > 0 :
sampler.train()
else:
sampler = None
pos_id = data
pos_id = pos_id.to(device)
optimizer.zero_grad()
tt0 = time.time()
mu, logvar, loss, sample_time, loss_time = model(pos_id, sampler)
tt1 = time.time()
sample_total_time += sample_time
infer_total_time += tt1 - tt0
loss_total_time += loss_time
kl_divergence = model.kl_loss(mu, logvar, config.anneal, reduction=config.reduction)/config.batch_size
loss_ += loss.item()
kld_loss += kl_divergence.item()
loss += kl_divergence.item()
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)
optimizer.step()
# break
# torch.cuda.empty_cache()
t2= time.time()
logger.info('--loss : %.2f, kl_dis : %.2f, total : %.2f '% (loss_, kld_loss, loss_ + kld_loss))
torch.cuda.empty_cache()
scheduler.step()
gc.collect()
initial_list.append(t1 - t0)
training_list.append(t2 - t1)
sampling_list.append(sample_total_time)
inference_list.append(infer_total_time)
cal_loss_list.append(loss_total_time)
if (epoch % 10) == 0:
result = evaluate(model, train_mat, test_mat, logger, device)
logger.info('***************Eval_Res : NDCG@5,10,50 %.6f, %.6f, %.6f'%(result['item_ndcg'][4], result['item_ndcg'][9], result['item_ndcg'][49]))
logger.info('***************Eval_Res : RECALL@5,10,50 %.6f, %.6f, %.6f'%(result['item_recall'][4], result['item_recall'][9], result['item_recall'][49]))
logger.info(' Initial Time : {}'.format(np.mean(initial_list)))
logger.info(' Sampling Time : {}'.format(np.mean(sampling_list)))
logger.info(' Inference Time: {}'.format(np.mean(inference_list)))
logger.info(' Calc Loss Time: {}'.format(np.mean(cal_loss_list)))
logger.info(' Training Time (One epoch, including the dataIO, sampling, inference and backward time) : {}'.format(np.mean(training_list)))
def utils_optim(learning_rate, model, w):
if config.optim=='adam':
return torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=w)
elif config.optim=='sgd':
return torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=w)
else:
raise ValueError('Unkown optimizer!')
def main(config, logger=None):
device = torch.device(config.device)
data = RecData(config.data_dir, config.data)
train_mat, test_mat = data.get_data(config.ratio)
user_num, item_num = train_mat.shape
logging.info('The shape of datasets: %d, %d'%(user_num, item_num))
assert config.sample_num < item_num
if config.model == 'vae' and config.sampler == 0:
model = BaseVAE(item_num * config.multi, config.dim)
elif config.model == 'vae' and config.sampler > 0:
model = VAE_Sampler(item_num * config.multi, config.dim)
else:
raise ValueError('Not supported model name!!!')
model = model.to(device)
train_model(model, train_mat, test_mat, config, logger)
return evaluate(model, train_mat, test_mat, logger, device)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Initialize Parameters!')
parser.add_argument('-data', default='ml10M', type=str, help='path of datafile')
parser.add_argument('-d', '--dim', default=[200, 32], type=int, nargs='+', help='the dimenson of the latent vector for student model')
parser.add_argument('-s','--sample_num', default=500, type=int, help='the number of sampled items')
parser.add_argument('--subspace_num', default=2, type=int, help='the number of splitted sub space')
parser.add_argument('--cluster_num', default=16, type=int, help='the number of cluster centroids')
parser.add_argument('-b', '--batch_size', default=256, type=int, help='the batch size for training')
parser.add_argument('-e','--epoch', default=2, type=int, help='the number of epoches')
parser.add_argument('-o','--optim', default='adam', type=str, help='the optimizer for training')
parser.add_argument('-lr', '--learning_rate', default=0.001, type=float, help='the learning rate for training')
parser.add_argument('--seed', default=20, type=int, help='random seed values')
parser.add_argument('--ratio', default=0.8, type=float, help='the spilit ratio of dataset for train and test')
parser.add_argument('--log_path', default='logs_test', type=str, help='the path for log files')
parser.add_argument('--num_workers', default=16, type=int, help='the number of workers for dataloader')
parser.add_argument('--data_dir', default='datasets', type=str, help='the dir of datafiles')
parser.add_argument('--device', default='cuda', type=str, help='device for training, cuda or gpu')
parser.add_argument('--model', default='vae', type=str, help='model name')
parser.add_argument('--sampler', default=4, type=int, help='the sampler, 0 : no sampler, 1: uniform, 2: popular, 3: MidxUni, 4: MidxPop')
parser.add_argument('--fix_seed', default=True, type=bool, help='whether to fix the seed values')
parser.add_argument('--step_size', default=5, type=int, help='step size for learning rate discount')
parser.add_argument('--gamma', default=0.95, type=float, help='discout for lr')
parser.add_argument('--anneal', default=1.0, type=float, help='parameters for kl loss')
parser.add_argument('--batch_size_u', default=128, type=int, help='batch size user for inference')
parser.add_argument('--reduction', default=False, type=bool, help='loss if reduction')
parser.add_argument('-w', '--weight_decay', default=1e-3, type=float, help='weight decay for the optimizer' )
parser.add_argument('--multi', default=1, type=int, help='the number of extended items')
config = parser.parse_args()
import os
if not os.path.exists(config.log_path):
os.makedirs(config.log_path)
alg = config.model
sampler = str(config.sampler) + '_' + str(config.multi) + 'x'
ISOTIMEFORMAT = '%m%d-%H%M%S'
timestamp = str(datetime.datetime.now().strftime(ISOTIMEFORMAT))
loglogs = '_'.join((config.data, sampler, timestamp))
log_file_name = os.path.join(config.log_path, loglogs)
logger = utils.get_logger(log_file_name)
# os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
logger.info(config)
if config.fix_seed:
utils.setup_seed(config.seed)
import time
t0 = time.time()
m = main(config, logger)
t1 = time.time()
logger.info('Eval_Res : NDCG@5,10,50 %.6f, %.6f, %.6f'%(m['item_ndcg'][4], m['item_ndcg'][9], m['item_ndcg'][49]))
logger.info('Eval_Res : RECALL@5,10,50 %.6f, %.6f, %.6f'%(m['item_recall'][4], m['item_recall'][9], m['item_recall'][49]))
logger.info("Finish")
svmat_name = log_file_name + '.mat'
logger.info('Total Running Time: {}'.format(t1-t0) ) | 10,590 | 45.862832 | 172 | py |
FastVae_Gpu | FastVae_Gpu-main/vae_models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import time
class BaseVAE(nn.Module):
def __init__(self, num_item, dims, active='relu', dropout=0.5):
"""
dims is a list for latent dims
"""
super(BaseVAE, self).__init__()
self.num_item = num_item
self.dims = dims
assert len(dims) == 2, 'Not supported dims'
self.encode_layer_0 = nn.Embedding(self.num_item + 1, dims[0], padding_idx=0)
self.encode_layer_1 = nn.Linear(dims[0], dims[1] * 2)
self.decode_layer_0 = nn.Linear(dims[1], dims[0])
# self._Item_Embeddings = nn.Embedding(self.num_item + 1, dims[0], padding_idx=0)
self._Item_Embeddings = nn.Linear(dims[0], self.num_item + 1)
self.dropout = nn.Dropout(dropout)
if active == 'relu':
self.act = F.relu
elif active == 'tanh':
self.act == F.tanh
elif active == 'sigmoid':
self.act == F.sigmoid
else:
raise ValueError('Not supported active function')
def encode(self, item_id):
# item_id is padded
count_nonzero = item_id.count_nonzero(dim=1).unsqueeze(-1) # batch_user * 1
user_embs = self.encode_layer_0(item_id) # batch_user * dims
user_embs = torch.sum(user_embs, dim=1) / count_nonzero.pow(0.5)
user_embs = self.dropout(user_embs)
h = self.act(user_embs)
h = self.encode_layer_1(h)
mu, logvar = h[:, :self.dims[1]], h[:, self.dims[1]:]
return mu, logvar
def decode(self, user_emb_encode, items):
user_emb = self.decode_layer_0(user_emb_encode)
# item_embs = self._Item_Embeddings(items)
# item_embs = F.normalize(item_embs)
return self._Item_Embeddings(user_emb)
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def forward(self, pos_items, sampler=None):
mu, logvar = self.encode(pos_items)
z = self.reparameterize(mu, logvar)
items = torch.arange(self.num_item + 1, device=z.device)
self.pos_items = pos_items
part_rats = self.decode(z, items)
t00 = time.time()
loss = self.loss_function(part_rats)
t11 = time.time()
return mu, logvar, loss, 0.0, t11-t00
def kl_loss(self, mu, log_var, anneal=1.0, reduction=False):
if reduction is True:
return -anneal * 0.5 * torch.mean(torch.sum(1 + log_var - mu.pow(2) - log_var.exp(), dim = 1), dim = 0)
else:
return -anneal * 0.5 * torch.sum(torch.sum(1 + log_var - mu.pow(2) - log_var.exp(), dim = 1), dim = 0)
def loss_function(self, part_rats, prob_neg=None, pos_rats=None, prob_pos=None, reduction=False):
# max_v, _ = torch.max(part_rats, dim=-1)
# chuli = part_rats - max_v.unsqueeze(-1)
# logits = chuli - torch.log(torch.sum(torch.exp(chuli), dim=-1)).unsqueeze(-1)
logits = F.log_softmax(part_rats, dim=-1)
idx_mtx = (self.pos_items > 0).double()
if reduction is True:
return -torch.sum(torch.gather(logits, 1, self.pos_items) * idx_mtx, dim=-1).mean()
else:
return -torch.sum(torch.gather(logits, 1, self.pos_items)* idx_mtx, dim=-1).sum()
def _get_user_emb(self, user_his):
user_emb, _ = self.encode(user_his)
return self.decode_layer_0(user_emb)
def _get_item_emb(self):
return self._Item_Embeddings.weight[1:]
class VAE_Sampler(BaseVAE):
def __init__(self, num_item, dims, active='relu', dropout=0.5):
super(VAE_Sampler, self).__init__(num_item, dims, active=active, dropout=dropout)
self._Item_Embeddings = nn.Embedding(self.num_item + 1, dims[0], padding_idx=0)
def decode(self, user_emb_encode, items):
user_emb = self.decode_layer_0(user_emb_encode)
item_embs = self._Item_Embeddings(items)
# return torch.matmul(user_emb.view(user_emb.shape[0], 1, -1), item_embs.transpose(1,2)).squeeze(1)
# return user_emb.unsqueeze(1).bmm(item_embs.transpose(1,2)).squeeze(1)
return (user_emb.unsqueeze(1) * item_embs).sum(-1)
# return (user_emb.view(user_emb.shape[0], 1, -1) * item_embs).sum(-1)
# return torch.einsum('ijk,ik->ij', item_embs, user_emb)
def forward(self, pos_items, sampler):
mu, logvar = self.encode(pos_items)
z = self.reparameterize(mu, logvar)
user_emb = self.decode_layer_0(z)
with torch.no_grad():
t0 = time.time()
pos_prob, neg_items, neg_prob = sampler(user_emb, pos_items)
t1 = time.time()
pos_items_emb = self._Item_Embeddings(pos_items)
neg_items_emb = self._Item_Embeddings(neg_items)
pos_rat = (user_emb.unsqueeze(1) * pos_items_emb).sum(-1)
neg_rat = (user_emb.unsqueeze(1) * neg_items_emb).sum(-1)
t00 = time.time()
loss = self.loss_function(neg_rat, neg_prob, pos_rat, pos_prob)
t11 = time.time()
return mu, logvar, loss, t1 - t0, t11 - t00
def loss_function(self, part_rats, log_prob_neg=None, pos_rats=None, log_prob_pos=None, reduction=False):
idx_mtx = (pos_rats != 0).double()
new_pos = pos_rats - log_prob_pos.detach()
new_neg = part_rats - log_prob_neg.detach()
# parts_log_sum_exp = torch.logsumexp(new_neg, dim=-1).unsqueeze(-1)
# final = torch.log( torch.exp(new_pos) + torch.exp(parts_log_sum_exp))
parts_sum_exp = torch.sum(torch.exp(new_neg), dim=-1).unsqueeze(-1)
final = torch.log(torch.exp(new_pos) + parts_sum_exp)
if reduction is True:
return torch.sum((- new_pos + final) * idx_mtx, dim=-1 ).mean()
else:
return torch.sum((- new_pos + final) * idx_mtx, dim=-1 ).sum()
# idx_mtx = (pos_rats != 0).double()
# new_pos = pos_rats - log_prob_pos.detach()
# new_neg = part_rats - log_prob_neg.detach()
# # new_pos[pos_rats==0] = -np.inf
# logits = torch.log_softmax(torch.cat([new_pos, new_neg], dim=-1), dim=-1)
# num_pos_item = pos_rats.shape[1]
# if reduction is True:
# return -torch.sum( logits[:, :num_pos_item] * idx_mtx, dim=-1).mean()
# else:
# return -torch.sum( logits * idx_mtx, dim=-1).sum()
| 6,596 | 38.740964 | 115 | py |
FastVae_Gpu | FastVae_Gpu-main/dataloader.py | import pandas as pd
from torch.utils.data import IterableDataset, Dataset
import torch
from torch.utils.data import Dataset, IterableDataset, DataLoader
import scipy.io as sci
import scipy as sp
import random
import numpy as np
import math
import os
class RecData(object):
def __init__(self, dir, file_name):
file_name = file_name + 'data.mat'
# file_name = file_name + 'data.txt'
self.file_name = os.path.join(dir, file_name)
def get_data(self,ratio):
mat = self.load_file(filename=self.file_name)
train_mat, test_mat = self.split_matrix(mat, ratio)
return train_mat, test_mat
def load_file(self,filename=''):
# if file_name.endswith('.mat'):
# return sci.loadmat(file_name)['data']
# else:
# raise ValueError('not supported file type')
if filename.endswith('.mat'):
return sci.loadmat(filename)['data']
elif filename.endswith('.txt') or filename.endswith('.tsv'):
sep = '\t'
elif filename.endswith('.csv'):
sep = ','
else:
raise ValueError('not supported file type')
max_user = -1
max_item = -1
row_idx = []
col_idx = []
data = []
for line in open(filename):
user, item, rating = line.strip().split(sep)
user, item, rating = int(user) -1, int(item)-1, float(rating)
row_idx.append(user)
col_idx.append(item)
data.append(rating)
if user > max_user:
max_user = user
if item > max_item:
max_item = item
return sp.sparse.csc_matrix((data, (row_idx, col_idx)), (max_user+1, max_item+1))
def split_matrix(self, mat, ratio=0.8):
mat = mat.tocsr() #按行读取,即每一行为一个用户
m,n = mat.shape
train_data_indices = []
train_indptr = [0] * (m+1)
test_data_indices = []
test_indptr = [0] * (m+1)
for i in range(m):
row = [(mat.indices[j], mat.data[j]) for j in range(mat.indptr[i], mat.indptr[i+1])]
train_idx = random.sample(range(len(row)), round(ratio * len(row)))
train_binary_idx = np.full(len(row), False)
train_binary_idx[train_idx] = True
test_idx = (~train_binary_idx).nonzero()[0]
for idx in train_idx:
train_data_indices.append(row[idx])
train_indptr[i+1] = len(train_data_indices)
for idx in test_idx:
test_data_indices.append(row[idx])
test_indptr[i+1] = len(test_data_indices)
[train_indices, train_data] = zip(*train_data_indices)
[test_indices, test_data] = zip(*test_data_indices)
train_mat = sp.sparse.csr_matrix((train_data, train_indices, train_indptr), (m,n))
test_mat = sp.sparse.csr_matrix((test_data, test_indices, test_indptr), (m,n))
return train_mat, test_mat
class UserItemData(Dataset):
def __init__(self, train_mat, train_flag=True):
super(UserItemData, self).__init__()
self.train = train_mat
if train_flag is True:
self.users = np.random.permutation(self.train.shape[0])
else:
self.users = np.arange(self.train.shape[0])
def __len__(self):
return self.train.shape[0]
def __getitem__(self, idx):
# return self.user[idx], self.item[idx]
pos_idx = self.train[self.users[idx]].nonzero()[1]
return pos_idx | 3,526 | 35.739583 | 96 | py |
FastVae_Gpu | FastVae_Gpu-main/utils.py | import scipy as sp
import scipy.sparse as ss
import scipy.io as sio
import random
import numpy as np
from typing import List
import logging
import torch
import math
from torch.nn.utils.rnn import pad_sequence
def get_logger(filename, verbosity=1, name=None):
filename = filename + '.txt'
level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}
formatter = logging.Formatter(
"[%(asctime)s][%(filename)s][line:%(lineno)d][%(levelname)s] %(message)s"
)
logger = logging.getLogger(name)
logger.setLevel(level_dict[verbosity])
fh = logging.FileHandler(filename, "w")
fh.setFormatter(formatter)
logger.addHandler(fh)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger
def setup_seed(seed):
import os
os.environ['PYTHONHASHSEED']=str(seed)
import random
random.seed(seed)
np.random.seed(seed)
import torch
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
def get_max_length(x):
return max(x, key=lambda x: x.shape[0]).shape[0]
def pad_sequence_int(seq):
def _pad(_it, _max_len):
return np.concatenate(( _it + 1, np.zeros(_max_len - len(_it), dtype=np.int32) ))
return [_pad(it, get_max_length(seq)) for it in seq]
def custom_collate_(batch):
return torch.LongTensor(pad_sequence_int(batch))
class Eval:
@staticmethod
def evaluate_item(train:ss.csr_matrix, test:ss.csr_matrix, user:np.ndarray, item:np.ndarray, topk:int=50, cutoff:int=50):
train = train.tocsr()
test = test.tocsr()
idx = np.squeeze((test.sum(axis=1) > 0).A)
train = train[idx, :]
test = test[idx, :]
user = user[idx, :]
N = train.shape[1]
cand_count = N - train.sum(axis=1)
if topk <0:
mat_rank = Eval.predict(train, test, user, item)
else:
mat_rank = Eval.topk_search_(train, test, user, item, topk)
return Eval.compute_item_metric(test, mat_rank, cand_count, cutoff)
@staticmethod
def compute_item_metric(test:ss.csr_matrix, mat_rank:ss.csr_matrix, cand_count:np.ndarray, cutoff:int=200):
rel_count = (test !=0).sum(axis=1)
istopk = mat_rank.max() < test.shape[1] * 0.5
recall, precision, map = Eval.compute_recall_precision(mat_rank, rel_count, cutoff)
ndcg = Eval.compute_ndcg(test, mat_rank, cutoff)
if not istopk:
auc, mpr = Eval.compute_auc(mat_rank, rel_count, cand_count)
return {'item_recall': recall, 'item_prec': precision, 'item_map': map, 'item_ndcg': ndcg, 'item_mpr':mpr, 'item_auc':auc}
else:
return {'item_recall':recall, 'item_prec':precision, 'item_map': map, 'item_ndcg':ndcg}
@staticmethod
def compute_ndcg(test, mat_rank, cutoff):
M, _ = test.shape
mat_rank_ = mat_rank.tocoo()
user, item, rank = mat_rank_.row, mat_rank_.col, mat_rank_.data
score = np.squeeze(test[(user, item)].A) / np.log2(rank + 2)
dcg_score = ss.csr_matrix((score, (user, rank)), shape=test.shape)
dcg = np.cumsum(dcg_score[:, :cutoff].todense(), axis=1)
dcg = np.c_[dcg, dcg_score.sum(axis=1)]
idcg = np.zeros((M, cutoff+1))
for i in range(M):
r = test.data[test.indptr[i]:test.indptr[i+1]]
idcg_ = np.cumsum(-np.sort(-r) / np.log2(np.array(range(len(r)))+2))
if cutoff > len(r):
idcg[i,:] = np.r_[idcg_, np.tile(idcg_[-1], cutoff+1-len(r))]
else:
idcg[i,:] = np.r_[idcg_[:cutoff], idcg_[-1]]
ndcg = dcg / idcg
ndcg = np.mean(ndcg, axis=0)
return np.squeeze(ndcg.A)
@staticmethod
def compute_recall_precision(mat_rank, user_count, cutoff):
user_count = user_count.A.T
M, _ = mat_rank.shape
mat_rank_ = mat_rank.tocoo()
user, rank = mat_rank_.row, mat_rank_.data
user_rank = ss.csr_matrix((np.ones_like(user), (user, rank)), shape=mat_rank.shape)
user_rank = user_rank[:,:cutoff].todense()
user_count_inv = ss.diags(1/user_count, [0])
cum = np.cumsum(user_rank, axis=1)
recall = np.mean(user_count_inv * cum, axis=0)
prec_cum = cum * ss.diags(1/np.array(range(1,cutoff+1)), 0)
prec = np.mean(prec_cum, axis=0)
div = np.minimum(np.tile(range(1, cutoff+1), (M, 1)), np.tile(user_count.T, (1, cutoff)))
map = np.mean(np.divide(np.cumsum(np.multiply(prec_cum,user_rank), axis=1), div), axis=0)
return np.squeeze(recall.A), np.squeeze(prec.A), np.squeeze(map.A)
@staticmethod
def compute_auc(mat_rank, rel_count, cand_count):
rel_count = rel_count.A
cand_count = cand_count.A
tmp = mat_rank.sum(axis=1)
mpr = np.mean(tmp / cand_count / rel_count)
auc_vec = rel_count * cand_count - tmp - rel_count - rel_count * (rel_count - 1) / 2
auc_vec = auc_vec / ((cand_count - rel_count) * rel_count)
auc = np.mean(auc_vec)
return auc, mpr
@staticmethod
def evaluate_item_with_code(train:ss.csr_matrix, test:ss.csr_matrix, user:np.ndarray, item_code: np.ndarray, item_center: List[np.ndarray], topk=200, cutoff=200):
train = train.tocsr()
test = test.tocsr()
#result1 = Eval.topk_search_with_code(train, user, item_code, item_center, topk)
result = Eval.topk_search_with_code_fast(train, user, item_code, item_center, topk)
return Eval.evaluate_topk(train, test, result, cutoff)
#@staticmethod
#def topk_search_approximate(train:ss.csr_matrix, user:np.ndarray, item_code: np.ndarray, item_center: List[np.ndarray]):
@staticmethod
def topk_search_with_code_fast(train:ss.csr_matrix, user:np.ndarray, item_code: np.ndarray, item_center: List[np.ndarray], topk=200):
M, _ = train.shape
traind = [train.indices[train.indptr[i]:train.indptr[i + 1]].tolist() for i in range(M)]
center = np.concatenate(item_center)
#result = uts.topk_search_with_code(traind, user, item_code, center, topk)
#return result.reshape([M, topk])
return None
@staticmethod
def topk_search_with_code(train:ss.csr_matrix, user:np.ndarray, item_code: np.ndarray, item_center: List[np.ndarray], topk=200):
item_center = np.stack(item_center, 0) # m x K x D
M = train.shape[0]
result = np.zeros((M, topk), dtype=np.int)
for i in range(M):
E = train.indices[train.indptr[i]:train.indptr[i + 1]]
center_score = np.tensordot(item_center, user[i,:], [-1, -1]) # m x K
#pred = uts.fetch_score(item_code, center_score)
#pred[E] = -np.inf
#idx = np.argpartition(pred, -topk)[-topk:]
#result[i, :] = idx[np.argsort(-pred[idx])]
return result
@staticmethod
def item_reranking(topk_item: np.ndarray, score_func):
M, K = topk_item.shape
result = np.zeros_like(topk_item)
for i in range(M):
score_item = [(topk_item[i, k], score_func(i, topk_item[i, k])) for k in range(K)]
result[i,:] = [a for (a, b) in sorted(score_item, key=lambda x: -x[1])]
return result
@staticmethod
def evaluate_topk(train:ss.csr_matrix, test:ss.csr_matrix, topk_item:np.ndarray, cutoff:int=200):
train = train.tocsr()
test = test.tocsr()
result = topk_item
N = train.shape[1]
cand_count = N - train.sum(axis=1)
M = test.shape[0]
uir = []
for i in range(M):
R = set(test.indices[test.indptr[i]:test.indptr[i+1]])
for k in range(result.shape[1]):
if result[i,k] in R:
uir.append((i, result[i,k], k))
user_id, item_id, rank = zip(*uir)
mat_rank = ss.csr_matrix((rank, (user_id, item_id)), shape=test.shape)
return Eval.compute_item_metric(test, mat_rank, cand_count, cutoff)
@staticmethod
def topk_search(train:ss.csr_matrix, user:np.ndarray, item:np.ndarray, topk:int=200)->np.ndarray:
train = train.tocsr()
M, _ = train.shape
item_t = item.T
result = np.zeros((M, topk), dtype=np.int)
for i in range(M):
E = train.indices[train.indptr[i]:train.indptr[i+1]]
pred = np.matmul(user[i,:], item_t)
#pred = np.tensordot(user[i,:], item, [0,-1])
pred[E] = -np.inf
idx = np.argpartition(pred, -topk)[-topk:]
result[i,:] = idx[np.argsort(-pred[idx])]
return result
@staticmethod
def topk_search_(train:ss.csr_matrix, test:ss.csr_matrix, user:np.ndarray, item:np.ndarray, topk:int=200)->ss.csr_matrix:
M, _ = train.shape
#traind = [train.indices[train.indptr[i]:train.indptr[i + 1]].tolist() for i in range(M)]
#result = uts.topk_search(traind, user, item, topk).reshape([M, topk])
result = Eval.topk_search(train, user, item, topk)
uir = []
for i in range(M):
R = set(test.indices[test.indptr[i]:test.indptr[i+1]])
for k in range(topk):
if result[i,k] in R:
uir.append((i, result[i,k], k))
user_id, item_id, rank = zip(*uir)
mat_rank = ss.csr_matrix((rank, (user_id, item_id)), shape=test.shape)
return mat_rank
#user_id, rank = result.nonzero()
#item_id = result[(user_id, rank)]
#mat_rank = sp.csr_matrix((rank, (user_id, item_id)), shape=test.shape)
#return mat_rank.multiply(test !=0)
@staticmethod
def predict(train:ss.csr_matrix, test:ss.csr_matrix, user:np.ndarray, item:np.ndarray)->ss.csr_matrix:
M, _ = train.shape
item_t = item.T
full_rank = np.zeros_like(test.data)
for i in range(M):
E = train.indices[train.indptr[i]:train.indptr[i+1]]
R = test.indices[test.indptr[i]:test.indptr[i+1]]
U = user[i,:]
pred = np.matmul(U, item_t)
pred[E] = -np.inf
idx = np.argsort(-pred)
rank = np.zeros_like(idx)
rank[idx] = range(len(idx))
full_rank[test.indptr[i]:test.indptr[i+1]] = rank[R]
mat_rank = ss.csr_matrix((full_rank, test.indices, test.indptr), shape=test.shape)
return mat_rank
@staticmethod
def format(metric:dict):
list_str = []
for k, v in metric.items():
if 'ndcg' in k:
m_str = '{0:11}:[{1}, {2:.4f}]'.format(k ,', '.join('{:.4f}'.format(e) for e in v[(10-1)::10]), v[-1])
elif not isinstance(v, np.ndarray):
m_str = '{0:11}:{1:.4f}'.format(k , v)
else:
m_str = '{0:11}:[{1}]'.format(k ,', '.join('{:.4f}'.format(e) for e in v[(10-1)::10]))
list_str.append(m_str)
return '\n'.join(list_str)
| 10,977 | 41.550388 | 166 | py |
FastVae_Gpu | FastVae_Gpu-main/sampler_gpu_mm.py | # The cluster algorithmn(K-means) is implemented on the GPU
from operator import imod, neg
from numpy.core.numeric import indices
import scipy.sparse as sps
from sklearn import cluster
from sklearn.cluster import KMeans
import torch
import numpy as np
import torch.nn as nn
from torch._C import device, dtype
def kmeans(X, K_or_center, max_iter=300, verbose=False):
N = X.size(0)
if isinstance(K_or_center, int) is True:
K = K_or_center
C = X[torch.randperm(N, device=X.device)[:K]]
else:
K = K_or_center.size(0)
C = K_or_center
prev_loss = np.inf
for iter in range(max_iter):
dist = torch.sum(X * X, dim=-1, keepdim=True) - 2 * (X @ C.T) + torch.sum(C * C, dim=-1).unsqueeze(0)
assign = dist.argmin(-1)
assign_m = torch.zeros(N, K, device=X.device)
assign_m[(range(N), assign)] = 1
loss = torch.sum(torch.square(X - C[assign,:])).item()
if verbose:
print(f'step:{iter:<3d}, loss:{loss:.3f}')
if (prev_loss - loss) < prev_loss * 1e-6:
break
prev_loss = loss
cluster_count = assign_m.sum(0)
C = (assign_m.T @ X) / cluster_count.unsqueeze(-1)
empty_idx = cluster_count<.5
ndead = empty_idx.sum().item()
C[empty_idx] = X[torch.randperm(N, device=X.device)[:ndead]]
return C, assign, assign_m, loss
def construct_index(cd01, K):
# Stable is availabel in PyTorch 1.9. Earlier version is not supported.
cd01, indices = torch.sort(cd01, stable=True)
# save the indices according to the cluster
cluster, count = torch.unique_consecutive(cd01, return_counts=True)
count_all = torch.zeros(K**2 + 1, dtype=torch.long, device=cd01.device)
count_all[cluster + 1] = count
indptr = count_all.cumsum(dim=-1)
return indices, indptr
class SamplerBase(nn.Module):
"""
Uniformly Sample negative items for each query.
"""
def __init__(self, num_items, num_neg, device, **kwargs):
super().__init__()
self.num_items = num_items
self.num_neg = num_neg
self.device = device
def forward(self, query, pos_items=None, padding=0):
"""
Input
query: torch.tensor
Sequential models:
query: (B,L,D), pos_items : (B, L)
Normal models:
query: (B,D), pos_items: (B,L)
Output
pos_prob(None if no pos_items), neg_items, neg_prob
pos_items.shape == pos_prob.shape
neg_items.shape == neg_prob.shape
Sequential models:
neg_items: (B,L,N)
Normal
"""
assert padding == 0
num_queries = np.prod(query.shape[:-1]) # for sequential models the number of queries is the B x L
neg_items = torch.randint(1, self.num_items + 1, size=(num_queries, self.num_neg), device=self.device)
neg_items = neg_items.view(*query.shape[:-1], -1)
neg_prob = -torch.log(self.num_items * torch.ones_like(neg_items, dtype=torch.float))
if pos_items is not None:
pos_prob = -torch.log(self.num_items * torch.ones_like(pos_items, dtype=torch.float))
return pos_prob, neg_items, neg_prob
return None, neg_items, neg_prob
class PopularSampler(SamplerBase):
def __init__(self, pop_count, num_neg, device, mode=0, **kwargs):
super().__init__(pop_count.shape[0], num_neg, device)
pop_count = torch.from_numpy(pop_count).to(self.device)
if mode == 0:
pop_count = torch.log(pop_count + 1)
elif mode == 1:
pop_count = torch.log(pop_count) + 1e-16
elif mode == 2:
pop_count = pop_count**0.75
pop_count = torch.cat([torch.zeros(1, device=self.device), pop_count])
self.pop_prob = pop_count / pop_count.sum()
self.table = torch.cumsum(self.pop_prob, -1)
self.pop_prob[0] = torch.ones(1, device=self.device)
def forward(self, query, pos_items=None, padding=0):
assert padding == 0
num_queris = np.prod(query.shape[:-1])
seeds = torch.rand(num_queris, self.num_neg, device=self.device)
neg_items = torch.searchsorted(self.table, seeds)
neg_items = neg_items.view(*query.shape[:-1], -1)
neg_prob = torch.log(self.pop_prob[neg_items])
if pos_items is not None:
pos_prob = torch.log(self.pop_prob[pos_items])
return pos_prob, neg_items, neg_prob
return None, neg_items, neg_prob
class MidxUniform(SamplerBase):
"""
Midx Sampler with Uniform Variant
"""
def __init__(self, item_embs:torch.tensor, num_neg, device, num_cluster, item_pop:torch.tensor = None, **kwargs):
super().__init__(item_embs.shape[0], num_neg, device)
if isinstance(num_cluster, int) is True:
self.K = num_cluster
else:
self.K = num_cluster.size(0)
embs1, embs2 = torch.chunk(item_embs, 2, dim=-1)
self.c0, cd0, cd0m, _ = kmeans(embs1, num_cluster)
self.c1, cd1, cd1m, _ = kmeans(embs2, num_cluster)
self.c0_ = torch.cat([torch.zeros(1, self.c0.size(1), device=self.device), self.c0], dim=0) ## for retreival probability, considering padding
self.c1_ = torch.cat([torch.zeros(1, self.c1.size(1), device=self.device), self.c1], dim=0) ## for retreival probability, considering padding
self.cd0 = torch.cat([torch.tensor([-1]).to(self.device), cd0], dim=0) + 1 ## for retreival probability, considering padding
self.cd1 = torch.cat([torch.tensor([-1]).to(self.device), cd1], dim=0) + 1 ## for retreival probability, considering padding
cd01 = cd0 * self.K + cd1
self.indices, self.indptr = construct_index(cd01, self.K)
if item_pop is None:
self.wkk = cd0m.T @ cd1m
else:
self.wkk = cd0m.T @ (cd1m * item_pop.view(-1, 1))
def forward(self, query, pos_items=None, padding=0):
assert padding == 0
q0, q1 = query.view(-1, query.size(-1)).chunk(2, dim=-1)
r1 = q1 @ self.c1.T
r1s = torch.softmax(r1, dim=-1) # num_q x K1
r0 = q0 @ self.c0.T
r0s = torch.softmax(r0, dim=-1) # num_q x K0
s0 = (r1s @ self.wkk.T) * r0s # num_q x K0 | wkk: K0 x K1
k0 = torch.multinomial(s0, self.num_neg, replacement=True) # num_q x neg
p0 = torch.gather(r0, -1, k0) # num_q * neg
subwkk = self.wkk[k0, :] # num_q x neg x K1
s1 = subwkk * r1s.unsqueeze(1) # num_q x neg x K1
k1 = torch.multinomial(s1.view(-1, s1.size(-1)), 1).squeeze(-1).view(*s1.shape[:-1]) # num_q x neg
p1 = torch.gather(r1, -1, k1) # num_q x neg
k01 = k0 * self.K + k1 # num_q x neg
p01 = p0 + p1
neg_items, neg_prob = self.sample_item(k01, p01)
if pos_items is not None:
pos_prop = self.compute_item_p(query, pos_items)
return pos_prop, neg_items.view(*query.shape[:-1], -1), neg_prob.view(*query.shape[:-1], -1)
return None, neg_items.view(*query.shape[:-1], -1), neg_prob.view(*query.shape[:-1], -1)
def sample_item(self, k01, p01):
item_cnt = self.indptr[k01 + 1] - self.indptr[k01] # num_q x neg, the number of items
item_idx = torch.floor(item_cnt * torch.rand_like(item_cnt, dtype=torch.float32, device=self.device)).long() # num_q x neg
neg_items = self.indices[item_idx + self.indptr[k01]] + 1
neg_prob = p01
return neg_items, neg_prob
def compute_item_p(self, query, pos_items):
# query: B x L x D, pos_items: B x L || query: B x D, pos_item: B x L1 || assume padding=0
k0 = self.cd0[pos_items] # B x L || B x L1
k1 = self.cd1[pos_items] # B x L || B x L1
c0 = self.c0_[k0, :] # B x L x D || B x L1 x D
c1 = self.c1_[k1, :] # B x L x D || B x L1 x D
q0, q1 = query.chunk(2, dim=-1) # B x L x D || B x D
if query.dim() == pos_items.dim():
r = (torch.bmm(c0, q0.unsqueeze(-1)) + torch.bmm(c1, q1.unsqueeze(-1))).squeeze(-1) # B x L1
else:
r = torch.sum(c0 * q0, dim=-1) + torch.sum(c1 * q1, dim=-1) # B x L
return r
class MidxUniPop(MidxUniform):
"""
Popularity sampling for the final items
"""
def __init__(self, item_embs: np.ndarray, num_neg, device, num_cluster, pop_count, mode=1, **kwargs):
if mode == 0:
pop_count = np.log(pop_count + 1)
elif mode == 1:
pop_count = np.log(pop_count + 1) + 1e-6
elif mode == 2:
pop_count = pop_count**0.75
pop_count = torch.tensor(pop_count, dtype=torch.float32, device=device)
super(MidxUniPop, self).__init__(item_embs, num_neg, device, num_cluster, pop_count)
self.p = torch.cat([torch.ones(1, device=self.device), pop_count], dim=0) # this is similar, to avoid log 0 !!! in case of zero padding
self.cp = pop_count[self.indices]
for c in range(self.K**2):
start, end = self.indptr[c], self.indptr[c+1]
if end > start:
cumsum = self.cp[start:end].cumsum(-1)
self.cp[start:end] = cumsum / cumsum[-1]
def forward(self, query, pos_items=None, padding=0):
return super().forward(query, pos_items=pos_items, padding=padding)
def sample_item(self, k01, p01):
# k01 num_q x neg, p01 num_q x neg
start = self.indptr[k01]
last = self.indptr[k01 + 1] - 1
count = last - start + 1
maxlen = count.max()
# print(maxlen)
fullrange = start.unsqueeze(-1) + torch.arange(maxlen, device=self.device).reshape(1, 1, maxlen) # num_q x neg x maxlen
fullrange = torch.minimum(fullrange, last.unsqueeze(-1))
item_idx = torch.searchsorted(self.cp[fullrange], torch.rand_like(start, dtype=torch.float32, device=self.device).unsqueeze(-1)).squeeze(-1) ## num_q x neg
item_idx = torch.minimum(item_idx, last)
neg_items = self.indices[item_idx + self.indptr[k01]] + 1
# neg_probs = self.p[item_idx + self.indptr[k01] + 1] # plus 1 due to considering padding, since p include num_items + 1 entries
neg_probs = self.p[neg_items]
return neg_items, p01 + torch.log(neg_probs)
def compute_item_p(self, query, pos_items):
r = super().compute_item_p(query, pos_items)
p_r = self.p[pos_items]
return r + torch.log(p_r)
if __name__ == '__main__':
import os
os.environ['CUDA_VISIBLE_DEVICES'] = "5"
device = 'cuda'
from dataloader import RecData
from utils import setup_seed
setup_seed(10)
data = RecData('datasets', 'amazoni')
train, test = data.get_data(0.8)
dim = 200
user_num, num_items = train.shape
num_neg = 2000
num_cluster = 32
max_iter = 200
# item_embs = np.random.randn(num_items, dim)
item_embs = torch.randn(num_items,dim, device=device) * 0.1
pop_count = np.squeeze(train.sum(axis=0).A)
device = torch.device(device)
# sampler0 = SamplerBase(num_items, num_neg, device)
# sampler1 = PopularSampler(pop_count, num_neg, device)
sampler2 = MidxUniform(item_embs, num_neg, device, num_cluster)
sampler3 = MidxUniPop(item_embs, num_neg, device, num_cluster, pop_count)
batch_size = 1
query = torch.randn(batch_size, dim, device=device) * 0.1
# pop_item = torch.randint(0, num_items+1, size=(batch_size))
# sampler0(query, pop_item)
# sampler1(query, pop_item)
# sampler2(query, pop_item)
# sampler3(query, pop_item)
count_tensor = torch.zeros(num_items, dtype=torch.long, device=device)
for i in range(max_iter):
_, neg_items, _ = sampler2(query)
# _, neg_items, _ = sampler3(query)
ids, counts = torch.unique(neg_items -1, return_counts=True)
count_tensor[ids] += counts
# print(count_tensor.max(), counts.max())
count_t = count_tensor / count_tensor.sum(-1)
exact_prob = torch.softmax( torch.matmul(query, item_embs.T), dim=-1).squeeze()
# =========================================
# Plot prob
item_ids = pop_count.argsort()
exact_prob = exact_prob.cpu().data.numpy()
count_prob = count_t.cpu().data.numpy()
import matplotlib.pyplot as plt
plt.plot(exact_prob[item_ids].cumsum(), label='Softmax', linewidth=4.0)
plt.plot(count_prob[item_ids].cumsum(), label='Midx_Uni')
plt.legend()
plt.savefig('amazoni_check.jpg')
| 12,583 | 40.668874 | 163 | py |
KSTER | KSTER-main/setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
with open("requirements.txt", encoding="utf-8") as req_fp:
install_requires = req_fp.readlines()
setup(
name='joeynmt',
version='1.2',
description='Minimalist NMT for educational purposes',
author='Jasmijn Bastings and Julia Kreutzer',
url='https://github.com/joeynmt/joeynmt',
license='Apache License',
install_requires=install_requires,
packages=find_packages(exclude=[]),
python_requires='>=3.5',
project_urls={
'Documentation': 'http://joeynmt.readthedocs.io/en/latest/',
'Source': 'https://github.com/joeynmt/joeynmt',
'Tracker': 'https://github.com/joeynmt/joeynmt/issues',
},
entry_points={
'console_scripts': [
],
}
)
| 790 | 28.296296 | 68 | py |
KSTER | KSTER-main/test/__init__.py | 0 | 0 | 0 | py | |
KSTER | KSTER-main/test/unit/test_vocabulary.py | import unittest
import os
from joeynmt.vocabulary import Vocabulary
class TestVocabulary(unittest.TestCase):
def setUp(self):
self.file = "test/data/toy/train.de"
sent = "Die Wahrheit ist, dass die Titanic – obwohl sie alle " \
"Kinokassenrekorde bricht – nicht gerade die aufregendste " \
"Geschichte vom Meer ist."
self.word_list = sent.split() # only unique tokens
self.char_list = list(sent)
self.temp_file_char = "tmp.src.char"
self.temp_file_word = "tmp.src.word"
self.word_vocab = Vocabulary(tokens=sorted(list(set(self.word_list))))
self.char_vocab = Vocabulary(tokens=sorted(list(set(self.char_list))))
def testVocabularyFromList(self):
self.assertEqual(len(self.word_vocab)-len(self.word_vocab.specials),
len(set(self.word_list)))
self.assertEqual(len(self.char_vocab)-len(self.char_vocab.specials),
len(set(self.char_list)))
expected_char_itos = ['<unk>', '<pad>', '<s>', '</s>',
' ', ',', '.', 'D', 'G', 'K', 'M', 'T', 'W', 'a',
'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'k', 'l',
'm', 'n', 'o', 'r', 's', 't', 'u', 'v', 'w', '–']
self.assertEqual(self.char_vocab.itos, expected_char_itos)
expected_word_itos = ['<unk>', '<pad>', '<s>', '</s>',
'Die', 'Geschichte', 'Kinokassenrekorde', 'Meer',
'Titanic', 'Wahrheit', 'alle', 'aufregendste',
'bricht', 'dass', 'die', 'gerade', 'ist,', 'ist.',
'nicht', 'obwohl', 'sie', 'vom', '–']
self.assertEqual(self.word_vocab.itos, expected_word_itos)
def testVocabularyFromFile(self):
# write vocabs to file and create new ones from those files
self.word_vocab.to_file(self.temp_file_word)
self.char_vocab.to_file(self.temp_file_char)
word_vocab2 = Vocabulary(file=self.temp_file_word)
char_vocab2 = Vocabulary(file=self.temp_file_char)
self.assertEqual(self.word_vocab.itos, word_vocab2.itos)
self.assertEqual(self.char_vocab.itos, char_vocab2.itos)
os.remove(self.temp_file_char)
os.remove(self.temp_file_word)
def testIsUnk(self):
self.assertTrue(self.word_vocab.is_unk("BLA"))
self.assertFalse(self.word_vocab.is_unk("Die"))
self.assertTrue(self.char_vocab.is_unk("x"))
self.assertFalse(self.char_vocab.is_unk("d"))
| 2,599 | 47.148148 | 80 | py |
KSTER | KSTER-main/test/unit/test_data.py | import unittest
import numpy as np
from joeynmt.data import MonoDataset, TranslationDataset, load_data, \
make_data_iter
class TestData(unittest.TestCase):
def setUp(self):
self.train_path = "test/data/toy/train"
self.dev_path = "test/data/toy/dev"
self.test_path = "test/data/toy/test"
self.levels = ["char", "word"] # bpe is equivalently processed to word
self.max_sent_length = 10
# minimal data config
self.data_cfg = {"src": "de", "trg": "en", "train": self.train_path,
"dev": self.dev_path, "level": "word",
"lowercase": False,
"max_sent_length": self.max_sent_length}
def testIteratorBatchType(self):
current_cfg = self.data_cfg.copy()
# load toy data
train_data, dev_data, test_data, src_vocab, trg_vocab = \
load_data(current_cfg)
# make batches by number of sentences
train_iter = iter(make_data_iter(
train_data, batch_size=10, batch_type="sentence"))
batch = next(train_iter)
self.assertEqual(batch.src[0].shape[0], 10)
self.assertEqual(batch.trg[0].shape[0], 10)
# make batches by number of tokens
train_iter = iter(make_data_iter(
train_data, batch_size=100, batch_type="token"))
_ = next(train_iter) # skip a batch
_ = next(train_iter) # skip another batch
batch = next(train_iter)
self.assertEqual(batch.src[0].shape[0], 8)
self.assertEqual(np.prod(batch.src[0].shape), 88)
self.assertLessEqual(np.prod(batch.src[0].shape), 100)
def testDataLoading(self):
# test all combinations of configuration settings
for test_path in [None, self.test_path]:
for level in self.levels:
for lowercase in [True, False]:
current_cfg = self.data_cfg.copy()
current_cfg["level"] = level
current_cfg["lowercase"] = lowercase
if test_path is not None:
current_cfg["test"] = test_path
# load the data
train_data, dev_data, test_data, src_vocab, trg_vocab = \
load_data(current_cfg)
self.assertIs(type(train_data), TranslationDataset)
self.assertIs(type(dev_data), TranslationDataset)
if test_path is not None:
# test has no target side
self.assertIs(type(test_data), MonoDataset)
# check the number of examples loaded
if level == "char":
# training set is filtered to max_sent_length
expected_train_len = 5
else:
expected_train_len = 382
expected_testdev_len = 20 # dev and test have the same len
self.assertEqual(len(train_data), expected_train_len)
self.assertEqual(len(dev_data), expected_testdev_len)
if test_path is None:
self.assertIsNone(test_data)
else:
self.assertEqual(len(test_data), expected_testdev_len)
# check the segmentation: src and trg attributes are lists
self.assertIs(type(train_data.examples[0].src), list)
self.assertIs(type(train_data.examples[0].trg), list)
self.assertIs(type(dev_data.examples[0].src), list)
self.assertIs(type(dev_data.examples[0].trg), list)
if test_path is not None:
self.assertIs(type(test_data.examples[0].src), list)
self.assertFalse(hasattr(test_data.examples[0], "trg"))
# check the length filtering of the training examples
self.assertFalse(any([len(ex.src) > self.max_sent_length for
ex in train_data.examples]))
self.assertFalse(any([len(ex.trg) > self.max_sent_length for
ex in train_data.examples]))
# check the lowercasing
if lowercase:
self.assertTrue(
all([" ".join(ex.src).lower() == " ".join(ex.src)
for ex in train_data.examples]))
self.assertTrue(
all([" ".join(ex.src).lower() == " ".join(ex.src)
for ex in dev_data.examples]))
self.assertTrue(
all([" ".join(ex.trg).lower() == " ".join(ex.trg)
for ex in train_data.examples]))
self.assertTrue(
all([" ".join(ex.trg).lower() == " ".join(ex.trg)
for ex in dev_data.examples]))
if test_path is not None:
self.assertTrue(
all([" ".join(ex.src).lower() == " ".join(
ex.src) for ex in test_data.examples]))
# check the first example from the training set
expected_srcs = {"char": "Danke.",
"word": "David Gallo: Das ist Bill Lange."
" Ich bin Dave Gallo."}
expected_trgs = {"char": "Thank you.",
"word": "David Gallo: This is Bill Lange. "
"I'm Dave Gallo."}
if level == "char":
if lowercase:
comparison_src = list(expected_srcs[level].lower())
comparison_trg = list(expected_trgs[level].lower())
else:
comparison_src = list(expected_srcs[level])
comparison_trg = list(expected_trgs[level])
else:
if lowercase:
comparison_src = expected_srcs[level].lower().\
split()
comparison_trg = expected_trgs[level].lower(). \
split()
else:
comparison_src = expected_srcs[level].split()
comparison_trg = expected_trgs[level].split()
self.assertEqual(train_data.examples[0].src, comparison_src)
self.assertEqual(train_data.examples[0].trg, comparison_trg)
def testRandomSubset(self):
# only a random subset should be selected for training
current_cfg = self.data_cfg.copy()
current_cfg["random_train_subset"] = -1
# load the data
train_data, dev_data, test_data, src_vocab, trg_vocab = \
load_data(current_cfg)
assert len(train_data) == 382
current_cfg["random_train_subset"] = 10
train_data, dev_data, test_data, src_vocab, trg_vocab = \
load_data(current_cfg)
assert len(train_data) == 10
| 7,422 | 45.685535 | 80 | py |
KSTER | KSTER-main/test/unit/test_decoder.py | from torch.nn import GRU, LSTM
import torch
from joeynmt.decoders import RecurrentDecoder
from joeynmt.encoders import RecurrentEncoder
from .test_helpers import TensorTestCase
class TestRecurrentDecoder(TensorTestCase):
def setUp(self):
self.emb_size = 10
self.num_layers = 3
self.hidden_size = 6
self.encoder_hidden_size = 3
self.vocab_size = 5
seed = 42
torch.manual_seed(seed)
bidi_encoder = RecurrentEncoder(emb_size=self.emb_size,
num_layers=self.num_layers,
hidden_size=self.encoder_hidden_size,
bidirectional=True)
uni_encoder = RecurrentEncoder(emb_size=self.emb_size,
num_layers=self.num_layers,
hidden_size=self.encoder_hidden_size*2,
bidirectional=False)
self.encoders = [uni_encoder, bidi_encoder]
def test_recurrent_decoder_size(self):
# test all combinations of bridge, input_feeding, encoder directions
for encoder in self.encoders:
for init_hidden in ["bridge", "zero", "last"]:
for input_feeding in [True, False]:
decoder = RecurrentDecoder(hidden_size=self.hidden_size,
encoder=encoder,
attention="bahdanau",
emb_size=self.emb_size,
vocab_size=self.vocab_size,
num_layers=self.num_layers,
init_hidden=init_hidden,
input_feeding=input_feeding)
self.assertEqual(decoder.rnn.hidden_size, self.hidden_size)
self.assertEqual(decoder.att_vector_layer.out_features,
self.hidden_size)
self.assertEqual(decoder.output_layer.out_features,
self.vocab_size)
self.assertEqual(decoder.output_size, self.vocab_size)
self.assertEqual(decoder.rnn.bidirectional, False)
self.assertEqual(decoder.init_hidden_option, init_hidden)
if init_hidden == "bridge":
self.assertTrue(hasattr(decoder, "bridge_layer"))
self.assertEqual(decoder.bridge_layer.out_features,
self.hidden_size)
self.assertEqual(decoder.bridge_layer.in_features,
encoder.output_size)
else:
self.assertFalse(hasattr(decoder, "bridge_layer"))
if input_feeding:
self.assertEqual(decoder.rnn_input_size,
self.emb_size + self.hidden_size)
else:
self.assertEqual(decoder.rnn_input_size, self.emb_size)
def test_recurrent_decoder_type(self):
valid_rnn_types = {"gru": GRU, "lstm": LSTM}
for name, obj in valid_rnn_types.items():
decoder = RecurrentDecoder(rnn_type=name,
hidden_size=self.hidden_size,
encoder=self.encoders[0],
attention="bahdanau",
emb_size=self.emb_size,
vocab_size=self.vocab_size,
num_layers=self.num_layers,
init_hidden="zero",
input_feeding=False)
self.assertEqual(type(decoder.rnn), obj)
def test_recurrent_input_dropout(self):
drop_prob = 0.5
decoder = RecurrentDecoder(hidden_size=self.hidden_size,
encoder=self.encoders[0],
attention="bahdanau",
emb_size=self.emb_size,
vocab_size=self.vocab_size,
num_layers=self.num_layers,
init_hidden="zero",
input_feeding=False,
dropout=drop_prob,
emb_dropout=drop_prob)
input_tensor = torch.Tensor([2, 3, 1, -1])
decoder.train()
dropped = decoder.emb_dropout(input=input_tensor)
# eval switches off dropout
decoder.eval()
no_drop = decoder.emb_dropout(input=input_tensor)
# when dropout is applied, remaining values are divided by drop_prob
self.assertGreaterEqual((no_drop - (drop_prob*dropped)).abs().sum(), 0)
drop_prob = 1.0
decoder = RecurrentDecoder(hidden_size=self.hidden_size,
encoder=self.encoders[0],
attention="bahdanau",
emb_size=self.emb_size,
vocab_size=self.vocab_size,
num_layers=self.num_layers,
init_hidden="zero",
input_feeding=False,
dropout=drop_prob,
emb_dropout=drop_prob)
all_dropped = decoder.emb_dropout(input=input_tensor)
self.assertEqual(all_dropped.sum(), 0)
decoder.eval()
none_dropped = decoder.emb_dropout(input=input_tensor)
self.assertTensorEqual(no_drop, none_dropped)
self.assertTensorEqual((no_drop - all_dropped), no_drop)
def test_recurrent_hidden_dropout(self):
hidden_drop_prob = 0.5
decoder = RecurrentDecoder(hidden_size=self.hidden_size,
encoder=self.encoders[0],
attention="bahdanau",
emb_size=self.emb_size,
vocab_size=self.vocab_size,
num_layers=self.num_layers,
init_hidden="zero",
input_feeding=False,
hidden_dropout=hidden_drop_prob)
input_tensor = torch.Tensor([2, 3, 1, -1])
decoder.train()
dropped = decoder.hidden_dropout(input=input_tensor)
# eval switches off dropout
decoder.eval()
no_drop = decoder.hidden_dropout(input=input_tensor)
# when dropout is applied, remaining values are divided by drop_prob
self.assertGreaterEqual((no_drop -
(hidden_drop_prob * dropped)).abs().sum(), 0)
hidden_drop_prob = 1.0
decoder = RecurrentDecoder(hidden_size=self.hidden_size,
encoder=self.encoders[0],
attention="bahdanau",
emb_size=self.emb_size,
vocab_size=self.vocab_size,
num_layers=self.num_layers,
init_hidden="zero",
input_feeding=False,
hidden_dropout=hidden_drop_prob)
all_dropped = decoder.hidden_dropout(input=input_tensor)
self.assertEqual(all_dropped.sum(), 0)
decoder.eval()
none_dropped = decoder.hidden_dropout(input=input_tensor)
self.assertTensorEqual(no_drop, none_dropped)
self.assertTensorEqual((no_drop - all_dropped), no_drop)
def test_recurrent_freeze(self):
decoder = RecurrentDecoder(hidden_size=self.hidden_size,
encoder=self.encoders[0],
attention="bahdanau",
emb_size=self.emb_size,
vocab_size=self.vocab_size,
num_layers=self.num_layers,
init_hidden="zero",
input_feeding=False,
freeze=True)
for n, p in decoder.named_parameters():
self.assertFalse(p.requires_grad)
def test_recurrent_forward(self):
time_dim = 4
batch_size = 2
# make sure the outputs match the targets
decoder = RecurrentDecoder(hidden_size=self.hidden_size,
encoder=self.encoders[0],
attention="bahdanau",
emb_size=self.emb_size,
vocab_size=self.vocab_size,
num_layers=self.num_layers,
init_hidden="zero",
input_feeding=False)
encoder_states = torch.rand(size=(batch_size, time_dim,
self.encoders[0].output_size))
trg_inputs = torch.ones(size=(batch_size, time_dim, self.emb_size))
# no padding, no mask
#x_length = torch.Tensor([time_dim]*batch_size).int()
mask = torch.ones(size=(batch_size, 1, time_dim)).byte()
output, hidden, att_probs, att_vectors = decoder(
trg_inputs, encoder_hidden=encoder_states[:, -1, :],
encoder_output=encoder_states, src_mask=mask, unroll_steps=time_dim,
hidden=None, prev_att_vector=None)
self.assertEqual(output.shape, torch.Size(
[batch_size, time_dim, self.vocab_size]))
self.assertEqual(hidden.shape, torch.Size(
[batch_size, self.num_layers, self.hidden_size]))
self.assertEqual(att_probs.shape, torch.Size(
[batch_size, time_dim, time_dim]))
self.assertEqual(att_vectors.shape, torch.Size(
[batch_size, time_dim, self.hidden_size]))
hidden_target = torch.Tensor(
[[[ 0.1814, 0.5468, -0.4717, -0.7580, 0.5834, -0.4018],
[ 0.4649, 0.5484, -0.2702, 0.4545, 0.1983, 0.2771],
[-0.1752, -0.4215, 0.1941, -0.3975, -0.2317, -0.5566]],
[[ 0.1814, 0.5468, -0.4717, -0.7580, 0.5834, -0.4018],
[ 0.4649, 0.5484, -0.2702, 0.4545, 0.1983, 0.2771],
[-0.1752, -0.4215, 0.1941, -0.3975, -0.2317, -0.5566]]])
output_target = torch.Tensor(
[[[ 0.2702, -0.1988, -0.1985, -0.2998, -0.2564],
[ 0.2719, -0.2075, -0.2017, -0.2988, -0.2595],
[ 0.2720, -0.2143, -0.2084, -0.3024, -0.2537],
[ 0.2714, -0.2183, -0.2135, -0.3061, -0.2468]],
[[ 0.2757, -0.1744, -0.1888, -0.3038, -0.2466],
[ 0.2782, -0.1837, -0.1928, -0.3028, -0.2505],
[ 0.2785, -0.1904, -0.1994, -0.3066, -0.2448],
[ 0.2777, -0.1943, -0.2042, -0.3105, -0.2379]]])
att_vectors_target = torch.Tensor(
[[[-0.6196, -0.0505, 0.4900, 0.6286, -0.5007, -0.3721],
[-0.6389, -0.0337, 0.4998, 0.6458, -0.5052, -0.3579],
[-0.6396, -0.0158, 0.5058, 0.6609, -0.5035, -0.3660],
[-0.6348, -0.0017, 0.5090, 0.6719, -0.5013, -0.3771]],
[[-0.5697, -0.0887, 0.4515, 0.6128, -0.4713, -0.4068],
[-0.5910, -0.0721, 0.4617, 0.6305, -0.4760, -0.3930],
[-0.5918, -0.0544, 0.4680, 0.6461, -0.4741, -0.4008],
[-0.5866, -0.0405, 0.4712, 0.6574, -0.4718, -0.4116]]])
self.assertTensorAlmostEqual(hidden_target, hidden)
self.assertTensorAlmostEqual(output_target, output)
self.assertTensorAlmostEqual(att_vectors, att_vectors_target)
# att_probs should be a distribution over the output vocabulary
self.assertTensorAlmostEqual(att_probs.sum(2),
torch.ones(batch_size, time_dim))
| 12,263 | 51.187234 | 80 | py |
KSTER | KSTER-main/test/unit/test_loss.py | import torch
from joeynmt.loss import XentLoss
from .test_helpers import TensorTestCase
class TestTransformerUtils(TensorTestCase):
def setUp(self):
seed = 42
torch.manual_seed(seed)
def test_label_smoothing(self):
pad_index = 0
smoothing = 0.4
criterion = XentLoss(pad_index=pad_index, smoothing=smoothing)
# batch x seq_len x vocab_size: 3 x 2 x 5
predict = torch.FloatTensor(
[[[0.1, 0.1, 0.6, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1]],
[[0.1, 0.1, 0.6, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1]],
[[0.1, 0.1, 0.6, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1]]]
)
# batch x seq_len: 3 x 2
targets = torch.LongTensor([[2, 1],
[2, 0],
[1, 0]])
# test the smoothing function
smoothed_targets = criterion._smooth_targets(targets=targets.view(-1),
vocab_size=predict.size(-1))
self.assertTensorAlmostEqual(
smoothed_targets,
torch.Tensor(
[[0.0000, 0.1333, 0.6000, 0.1333, 0.1333],
[0.0000, 0.6000, 0.1333, 0.1333, 0.1333],
[0.0000, 0.1333, 0.6000, 0.1333, 0.1333],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.6000, 0.1333, 0.1333, 0.1333],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000]])
)
assert torch.max(smoothed_targets) == 1-smoothing
# test the loss computation
v = criterion(predict.log(), targets)
self.assertTensorAlmostEqual(v, 2.1326)
def test_no_label_smoothing(self):
pad_index = 0
smoothing = 0.0
criterion = XentLoss(pad_index=pad_index, smoothing=smoothing)
# batch x seq_len x vocab_size: 3 x 2 x 5
predict = torch.FloatTensor(
[[[0.1, 0.1, 0.6, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1]],
[[0.1, 0.1, 0.6, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1]],
[[0.1, 0.1, 0.6, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1]]]
)
# batch x seq_len: 3 x 2
targets = torch.LongTensor([[2, 1],
[2, 0],
[1, 0]])
# test the smoothing function: should still be one-hot
smoothed_targets = criterion._smooth_targets(targets=targets.view(-1),
vocab_size=predict.size(-1))
assert torch.max(smoothed_targets) == 1
assert torch.min(smoothed_targets) == 0
self.assertTensorAlmostEqual(
smoothed_targets,
torch.Tensor(
[[0., 0., 1., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 0., 0., 0.]])
)
v = criterion(predict.log(), targets)
self.assertTensorAlmostEqual(v, 5.6268)
| 3,037 | 34.325581 | 78 | py |
KSTER | KSTER-main/test/unit/test_weight_tying.py | from torch.nn import GRU, LSTM
import torch
import numpy as np
from joeynmt.encoders import RecurrentEncoder
from .test_helpers import TensorTestCase
from joeynmt.model import build_model
from joeynmt.vocabulary import Vocabulary
import copy
class TestWeightTying(TensorTestCase):
def setUp(self):
self.seed = 42
vocab_size = 30
tokens = ["tok{:02d}".format(i) for i in range(vocab_size)]
self.vocab = Vocabulary(tokens=tokens)
self.cfg = {
"model": {
"tied_embeddings": False,
"tied_softmax": False,
"encoder": {
"type": "recurrent",
"hidden_size": 64,
"embeddings": {"embedding_dim": 32},
"num_layers": 1,
},
"decoder": {
"type": "recurrent",
"hidden_size": 64,
"embeddings": {"embedding_dim": 32},
"num_layers": 1,
},
}
}
def test_tied_embeddings(self):
torch.manual_seed(self.seed)
cfg = copy.deepcopy(self.cfg)
cfg["model"]["tied_embeddings"] = True
cfg["model"]["tied_softmax"] = False
src_vocab = trg_vocab = self.vocab
model = build_model(cfg["model"],
src_vocab=src_vocab, trg_vocab=trg_vocab)
self.assertEqual(src_vocab.itos, trg_vocab.itos)
self.assertEqual(model.src_embed, model.trg_embed)
self.assertTensorEqual(model.src_embed.lut.weight,
model.trg_embed.lut.weight)
self.assertEqual(model.src_embed.lut.weight.shape,
model.trg_embed.lut.weight.shape)
def test_tied_softmax(self):
torch.manual_seed(self.seed)
cfg = copy.deepcopy(self.cfg)
cfg["model"]["decoder"]["type"] = "transformer"
cfg["model"]["tied_embeddings"] = False
cfg["model"]["tied_softmax"] = True
cfg["model"]["decoder"]["embeddings"]["embedding_dim"] = 64
src_vocab = trg_vocab = self.vocab
model = build_model(cfg["model"],
src_vocab=src_vocab, trg_vocab=trg_vocab)
self.assertEqual(model.trg_embed.lut.weight.shape,
model.decoder.output_layer.weight.shape)
self.assertTensorEqual(model.trg_embed.lut.weight,
model.decoder.output_layer.weight)
def test_tied_src_trg_softmax(self):
# test source embedding, target embedding, and softmax tying
torch.manual_seed(self.seed)
cfg = copy.deepcopy(self.cfg)
cfg["model"]["decoder"]["type"] = "transformer"
cfg["model"]["tied_embeddings"] = True
cfg["model"]["tied_softmax"] = True
cfg["model"]["decoder"]["embeddings"]["embedding_dim"] = 64
cfg["model"]["encoder"]["embeddings"]["embedding_dim"] = 64
src_vocab = trg_vocab = self.vocab
model = build_model(cfg["model"],
src_vocab=src_vocab, trg_vocab=trg_vocab)
src_weight = model.src_embed.lut.weight
trg_weight = model.trg_embed.lut.weight
output_weight = model.decoder.output_layer.weight
self.assertTensorEqual(src_weight, trg_weight)
self.assertTensorEqual(src_weight, output_weight)
self.assertEqual(src_weight.shape, trg_weight.shape)
self.assertEqual(trg_weight.shape, output_weight.shape)
output_weight.data.fill_(3.)
self.assertEqual(output_weight.sum().item(), 6528)
self.assertEqual(output_weight.sum().item(), src_weight.sum().item())
self.assertEqual(output_weight.sum().item(), trg_weight.sum().item())
self.assertEqual(src_weight.sum().item(), trg_weight.sum().item())
| 3,861 | 34.759259 | 77 | py |
KSTER | KSTER-main/test/unit/test_transformer_utils.py | import torch
from joeynmt.transformer_layers import PositionalEncoding
from .test_helpers import TensorTestCase
class TestTransformerUtils(TensorTestCase):
def setUp(self):
seed = 42
torch.manual_seed(seed)
def test_position_encoding(self):
batch_size = 2
max_time = 3
emb_size = hidden_size = 12
x = torch.zeros([batch_size, max_time, emb_size])
pe = PositionalEncoding(emb_size)
output = pe(x)
self.assertEqual(pe.pe.size(2), hidden_size)
self.assertTensorAlmostEqual(output, pe.pe[:, :x.size(1)])
| 595 | 23.833333 | 66 | py |
KSTER | KSTER-main/test/unit/test_batch.py | import torch
import random
from torchtext.data.batch import Batch as TorchTBatch
from joeynmt.batch import Batch
from joeynmt.data import load_data, make_data_iter
from joeynmt.constants import PAD_TOKEN
from .test_helpers import TensorTestCase
class TestData(TensorTestCase):
def setUp(self):
self.train_path = "test/data/toy/train"
self.dev_path = "test/data/toy/dev"
self.test_path = "test/data/toy/test"
self.levels = ["char", "word"] # bpe is equivalently processed to word
self.max_sent_length = 20
# minimal data config
self.data_cfg = {"src": "de", "trg": "en", "train": self.train_path,
"dev": self.dev_path, "level": "char",
"lowercase": True,
"max_sent_length": self.max_sent_length}
# load the data
self.train_data, self.dev_data, self.test_data, src_vocab, trg_vocab = \
load_data(self.data_cfg)
self.pad_index = trg_vocab.stoi[PAD_TOKEN]
# random seeds
seed = 42
torch.manual_seed(seed)
random.seed(42)
def testBatchTrainIterator(self):
batch_size = 4
self.assertEqual(len(self.train_data), 27)
# make data iterator
train_iter = make_data_iter(self.train_data, train=True, shuffle=True,
batch_size=batch_size)
self.assertEqual(train_iter.batch_size, batch_size)
self.assertTrue(train_iter.shuffle)
self.assertTrue(train_iter.train)
self.assertEqual(train_iter.epoch, 0)
self.assertEqual(train_iter.iterations, 0)
expected_src0 = torch.Tensor(
[[21, 10, 4, 16, 4, 5, 21, 4, 12, 33, 6, 14, 4, 12, 23, 6, 18, 4,
6, 9, 3],
[20, 28, 4, 10, 28, 4, 6, 5, 14, 8, 6, 15, 4, 5, 7, 17, 11, 27,
6, 9, 3],
[24, 8, 7, 5, 24, 10, 12, 14, 5, 18, 4, 7, 17, 11, 4, 11, 4, 6,
25, 3, 1]]).long()
expected_src0_len = torch.Tensor([21, 21, 20]).long()
expected_trg0 = torch.Tensor(
[[6, 4, 27, 5, 8, 4, 5, 31, 4, 26, 7, 6, 10, 20, 11,
9, 3],
[8, 7, 6, 10, 17, 4, 13, 5, 15, 9, 3, 1, 1, 1, 1,
1, 1],
[12, 5, 4, 25, 7, 6, 8, 4, 7, 6, 18, 18, 11, 10, 12,
23, 3]]).long()
expected_trg0_len = torch.Tensor([18, 12, 18]).long()
total_samples = 0
for b in iter(train_iter):
b = Batch(torch_batch=b, pad_index=self.pad_index)
if total_samples == 0:
self.assertTensorEqual(b.src, expected_src0)
self.assertTensorEqual(b.src_length, expected_src0_len)
self.assertTensorEqual(b.trg, expected_trg0)
self.assertTensorEqual(b.trg_length, expected_trg0_len)
total_samples += b.nseqs
self.assertLessEqual(b.nseqs, batch_size)
self.assertEqual(total_samples, len(self.train_data))
def testBatchDevIterator(self):
batch_size = 3
self.assertEqual(len(self.dev_data), 20)
# make data iterator
dev_iter = make_data_iter(self.dev_data, train=False, shuffle=False,
batch_size=batch_size)
self.assertEqual(dev_iter.batch_size, batch_size)
self.assertFalse(dev_iter.shuffle)
self.assertFalse(dev_iter.train)
self.assertEqual(dev_iter.epoch, 0)
self.assertEqual(dev_iter.iterations, 0)
expected_src0 = torch.Tensor(
[[29, 8, 5, 22, 5, 8, 16, 7, 19, 5, 22, 5, 24, 8, 7, 5, 7, 19,
16, 16, 5, 31, 10, 19, 11, 8, 17, 15, 10, 6, 18, 5, 7, 4, 10, 6,
5, 25, 3],
[10, 17, 11, 5, 28, 12, 4, 23, 4, 5, 0, 10, 17, 11, 5, 22, 5, 14,
8, 7, 7, 5, 10, 17, 11, 5, 14, 8, 5, 31, 10, 6, 5, 9, 3, 1,
1, 1, 1],
[29, 8, 5, 22, 5, 18, 23, 13, 4, 6, 5, 13, 8, 18, 5, 9, 3, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1]]).long()
expected_src0_len = torch.Tensor([39, 35, 17]).long()
expected_trg0 = torch.Tensor(
[[13, 11, 12, 4, 22, 4, 12, 5, 4, 22, 4, 25, 7, 6, 8, 4, 14, 12,
4, 24, 14, 5, 7, 6, 26, 17, 14, 10, 20, 4, 23, 3],
[14, 0, 28, 4, 7, 6, 18, 18, 13, 4, 8, 5, 4, 24, 11, 4, 7, 11,
16, 11, 4, 9, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[13, 11, 12, 4, 22, 4, 7, 11, 27, 27, 5, 4, 9, 3, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]).long()
expected_trg0_len = torch.Tensor([33, 24, 15]).long()
total_samples = 0
for b in iter(dev_iter):
self.assertEqual(type(b), TorchTBatch)
b = Batch(b, pad_index=self.pad_index)
# test the sorting by src length
self.assertEqual(type(b), Batch)
before_sort = b.src_length
b.sort_by_src_length()
after_sort = b.src_length
self.assertTensorEqual(torch.sort(before_sort, descending=True)[0],
after_sort)
self.assertEqual(type(b), Batch)
if total_samples == 0:
self.assertTensorEqual(b.src, expected_src0)
self.assertTensorEqual(b.src_length, expected_src0_len)
self.assertTensorEqual(b.trg, expected_trg0)
self.assertTensorEqual(b.trg_length, expected_trg0_len)
total_samples += b.nseqs
self.assertLessEqual(b.nseqs, batch_size)
self.assertEqual(total_samples, len(self.dev_data))
| 5,692 | 40.554745 | 80 | py |
KSTER | KSTER-main/test/unit/test_model_init.py | from torch.nn import GRU, LSTM
import torch
from torch import nn
import numpy as np
from joeynmt.encoders import RecurrentEncoder
from .test_helpers import TensorTestCase
from joeynmt.model import build_model
from joeynmt.vocabulary import Vocabulary
import copy
class TestModelInit(TensorTestCase):
def setUp(self):
self.seed = 42
vocab_size = 30
tokens = ["tok{:02d}".format(i) for i in range(vocab_size)]
self.vocab = Vocabulary(tokens=tokens)
self.hidden_size = 64
self.cfg = {
"model": {
"tied_embeddings": False,
"tied_softmax": False,
"encoder": {
"type": "transformer",
"hidden_size": self.hidden_size,
"embeddings": {"embedding_dim": self.hidden_size},
"num_layers": 1,
},
"decoder": {
"type": "transformer",
"hidden_size": self.hidden_size,
"embeddings": {"embedding_dim": self.hidden_size},
"num_layers": 1,
},
}
}
def test_transformer_layer_norm_init(self):
torch.manual_seed(self.seed)
cfg = copy.deepcopy(self.cfg)
src_vocab = trg_vocab = self.vocab
model = build_model(cfg["model"],
src_vocab=src_vocab, trg_vocab=trg_vocab)
def check_layer_norm(m: nn.Module):
for name, child in m.named_children():
if isinstance(child, nn.LayerNorm):
self.assertTensorEqual(child.weight,
torch.ones([self.hidden_size]))
self.assertTensorEqual(child.bias,
torch.zeros([self.hidden_size]))
else:
check_layer_norm(child)
check_layer_norm(model)
| 1,962 | 31.180328 | 75 | py |
KSTER | KSTER-main/test/unit/test_encoder.py | from torch.nn import GRU, LSTM
import torch
from joeynmt.encoders import RecurrentEncoder
from .test_helpers import TensorTestCase
class TestRecurrentEncoder(TensorTestCase):
def setUp(self):
self.emb_size = 10
self.num_layers = 3
self.hidden_size = 7
seed = 42
torch.manual_seed(seed)
def test_recurrent_encoder_size(self):
for bidirectional in [True, False]:
directional_factor = 2 if bidirectional else 1
encoder = RecurrentEncoder(hidden_size=self.hidden_size,
emb_size=self.emb_size,
num_layers=self.num_layers,
bidirectional=bidirectional)
self.assertEqual(encoder.rnn.hidden_size, self.hidden_size)
# output size is affected by bidirectionality
self.assertEqual(encoder.output_size,
self.hidden_size*directional_factor)
self.assertEqual(encoder.rnn.bidirectional, bidirectional)
def test_recurrent_encoder_type(self):
valid_rnn_types = {"gru": GRU, "lstm": LSTM}
for name, obj in valid_rnn_types.items():
encoder = RecurrentEncoder(rnn_type=name)
self.assertEqual(type(encoder.rnn), obj)
def test_recurrent_input_dropout(self):
drop_prob = 0.5
encoder = RecurrentEncoder(dropout=drop_prob, emb_dropout=drop_prob)
input_tensor = torch.Tensor([2, 3, 1, -1])
encoder.train()
dropped = encoder.emb_dropout(input=input_tensor)
# eval switches off dropout
encoder.eval()
no_drop = encoder.emb_dropout(input=input_tensor)
# when dropout is applied, remaining values are divided by drop_prob
self.assertGreaterEqual((no_drop - (drop_prob*dropped)).abs().sum(), 0)
drop_prob = 1.0
encoder = RecurrentEncoder(dropout=drop_prob, emb_dropout=drop_prob)
all_dropped = encoder.emb_dropout(input=input_tensor)
self.assertEqual(all_dropped.sum(), 0)
encoder.eval()
none_dropped = encoder.emb_dropout(input=input_tensor)
self.assertTensorEqual(no_drop, none_dropped)
self.assertTensorEqual((no_drop - all_dropped), no_drop)
def test_recurrent_freeze(self):
encoder = RecurrentEncoder(freeze=True)
for n, p in encoder.named_parameters():
self.assertFalse(p.requires_grad)
def test_recurrent_forward(self):
time_dim = 4
batch_size = 2
bidirectional = True
directions = 2 if bidirectional else 1
encoder = RecurrentEncoder(emb_size=self.emb_size,
num_layers=self.num_layers,
hidden_size=self.hidden_size,
bidirectional=bidirectional)
x = torch.rand(size=(batch_size, time_dim, self.emb_size))
# no padding, no mask
x_length = torch.Tensor([time_dim]*batch_size).int()
mask = torch.ones_like(x)
output, hidden = encoder(embed_src=x, src_length=x_length, mask=mask)
self.assertEqual(output.shape, torch.Size(
[batch_size, time_dim, directions*self.hidden_size]))
self.assertEqual(hidden.shape, torch.Size(
[batch_size, directions*self.hidden_size]))
hidden_target = torch.Tensor(
[[0.1323, 0.0125, 0.2900, -0.0725, -0.0102, -0.4405,
0.1226, -0.3333, -0.3186, -0.2411, 0.1790, 0.1281,
0.0739, -0.0536],
[0.1431, 0.0085, 0.2828, -0.0933, -0.0139, -0.4525,
0.0946, -0.3279, -0.3001, -0.2223, 0.2023, 0.0708,
0.0131, -0.0124]])
output_target = torch.Tensor(
[[[[ 0.0041, 0.0324, 0.0846, -0.0056, 0.0353, -0.2528, 0.0289,
-0.3333, -0.3186, -0.2411, 0.1790, 0.1281, 0.0739, -0.0536],
[ 0.0159, 0.0248, 0.1496, -0.0176, 0.0457, -0.3839, 0.0780,
-0.3137, -0.2731, -0.2310, 0.1866, 0.0758, 0.0366, -0.0069],
[ 0.0656, 0.0168, 0.2182, -0.0391, 0.0214, -0.4389, 0.1100,
-0.2625, -0.1970, -0.2249, 0.1374, 0.0337, 0.0139, 0.0284],
[ 0.1323, 0.0125, 0.2900, -0.0725, -0.0102, -0.4405, 0.1226,
-0.1649, -0.1023, -0.1823, 0.0712, 0.0039, -0.0228, 0.0444]],
[[ 0.0296, 0.0254, 0.1007, -0.0225, 0.0207, -0.2612, 0.0061,
-0.3279, -0.3001, -0.2223, 0.2023, 0.0708, 0.0131, -0.0124],
[ 0.0306, 0.0096, 0.1566, -0.0386, 0.0387, -0.3958, 0.0556,
-0.3034, -0.2701, -0.2165, 0.2061, 0.0364, -0.0012, 0.0184],
[ 0.0842, 0.0075, 0.2181, -0.0696, 0.0121, -0.4389, 0.0874,
-0.2432, -0.1979, -0.2168, 0.1519, 0.0066, -0.0080, 0.0485],
[ 0.1431, 0.0085, 0.2828, -0.0933, -0.0139, -0.4525, 0.0946,
-0.1608, -0.1140, -0.1646, 0.0796, -0.0202, -0.0207, 0.0379]]]])
self.assertTensorAlmostEqual(hidden_target, hidden)
self.assertTensorAlmostEqual(output_target, output)
| 5,086 | 45.669725 | 79 | py |
KSTER | KSTER-main/test/unit/test_search.py | import torch
import numpy as np
from joeynmt.search import greedy, recurrent_greedy, transformer_greedy
from joeynmt.search import beam_search
from joeynmt.decoders import RecurrentDecoder, TransformerDecoder
from joeynmt.encoders import RecurrentEncoder
from joeynmt.embeddings import Embeddings
from joeynmt.model import Model
from joeynmt.vocabulary import Vocabulary
from .test_helpers import TensorTestCase
# TODO for transformer and rnn, make sure both return the same result for
# beam_size<2 and greedy decoding
class TestSearch(TensorTestCase):
def setUp(self):
self.emb_size = 12
self.num_layers = 3
self.hidden_size = 12
self.ff_size = 24
self.num_heads = 4
self.dropout = 0.
self.encoder_hidden_size = 3
self.vocab = Vocabulary(tokens=['word'])
self.vocab_size = len(self.vocab) # = 5
seed = 42
torch.manual_seed(seed)
#self.bos_index = 2
self.pad_index = 1
#self.eos_index = 3
class TestSearchTransformer(TestSearch):
def _build(self, batch_size):
src_time_dim = 4
vocab_size = 7
emb = Embeddings(embedding_dim=self.emb_size, vocab_size=vocab_size,
padding_idx=self.pad_index)
decoder = TransformerDecoder(
num_layers=self.num_layers, num_heads=self.num_heads,
hidden_size=self.hidden_size, ff_size=self.ff_size,
dropout=self.dropout, emb_dropout=self.dropout,
vocab_size=vocab_size)
encoder_output = torch.rand(
size=(batch_size, src_time_dim, self.hidden_size))
for p in decoder.parameters():
torch.nn.init.uniform_(p, -0.5, 0.5)
src_mask = torch.ones(size=(batch_size, 1, src_time_dim)) == 1
encoder_hidden = None # unused
model = Model(encoder=None, decoder=decoder,
src_embed=emb, trg_embed=emb,
src_vocab=self.vocab, trg_vocab=self.vocab)
return src_mask, model, encoder_output, encoder_hidden
def test_transformer_greedy(self):
batch_size = 2
max_output_length = 3
src_mask, model, encoder_output, encoder_hidden = self._build(
batch_size=batch_size)
output, attention_scores = transformer_greedy(
src_mask=src_mask, max_output_length=max_output_length, model=model,
encoder_output=encoder_output, encoder_hidden=encoder_hidden)
# Transformer greedy doesn't return attention scores
self.assertIsNone(attention_scores)
# batch x time
self.assertEqual(output.shape, (batch_size, max_output_length))
np.testing.assert_equal(output, [[5, 5, 5], [5, 5, 5]])
def test_transformer_beam1(self):
batch_size = 2
beam_size = 1
alpha = 1.
max_output_length = 3
src_mask, model, encoder_output, encoder_hidden = self._build(
batch_size=batch_size)
output, attention_scores = beam_search(
size=beam_size, src_mask=src_mask, max_output_length=max_output_length,
model=model, alpha=alpha,
encoder_output=encoder_output, encoder_hidden=encoder_hidden)
# Transformer beam doesn't return attention scores
self.assertIsNone(attention_scores)
# batch x time
self.assertEqual(output.shape, (batch_size, max_output_length))
np.testing.assert_equal(output, [[5, 5, 5], [5, 5, 5]])
# now compare to greedy, they should be the same for beam=1
greedy_output, _ = transformer_greedy(
src_mask=src_mask, max_output_length=max_output_length, model=model,
encoder_output=encoder_output, encoder_hidden=encoder_hidden)
np.testing.assert_equal(output, greedy_output)
def test_transformer_beam7(self):
batch_size = 2
beam_size = 7
alpha = 1.
max_output_length = 3
src_mask, model, encoder_output, encoder_hidden = self._build(
batch_size=batch_size)
output, attention_scores = beam_search(
size=beam_size, src_mask=src_mask, n_best=1,
max_output_length=max_output_length, model=model, alpha=alpha,
encoder_output=encoder_output, encoder_hidden=encoder_hidden)
# Transformer beam doesn't return attention scores
self.assertIsNone(attention_scores)
# batch x time
# now it produces EOS, so everything after gets cut off
self.assertEqual(output.shape, (batch_size, 1))
np.testing.assert_equal(output, [[3], [3]])
class TestSearchRecurrent(TestSearch):
def _build(self, batch_size):
src_time_dim = 4
vocab_size = 7
emb = Embeddings(embedding_dim=self.emb_size, vocab_size=vocab_size,
padding_idx=self.pad_index)
encoder = RecurrentEncoder(emb_size=self.emb_size,
num_layers=self.num_layers,
hidden_size=self.encoder_hidden_size,
bidirectional=True)
decoder = RecurrentDecoder(hidden_size=self.hidden_size,
encoder=encoder, attention="bahdanau",
emb_size=self.emb_size,
vocab_size=self.vocab_size,
num_layers=self.num_layers,
init_hidden="bridge",
input_feeding=True)
encoder_output = torch.rand(
size=(batch_size, src_time_dim, encoder.output_size))
for p in decoder.parameters():
torch.nn.init.uniform_(p, -0.5, 0.5)
src_mask = torch.ones(size=(batch_size, 1, src_time_dim)) == 1
encoder_hidden = torch.rand(size=(batch_size, encoder.output_size))
model = Model(encoder=encoder, decoder=decoder,
src_embed=emb, trg_embed=emb,
src_vocab=self.vocab, trg_vocab=self.vocab)
return src_mask, model, encoder_output, encoder_hidden
def test_recurrent_greedy(self):
batch_size = 2
max_output_length = 3
src_mask, model, encoder_output, encoder_hidden = self._build(
batch_size=batch_size)
output, attention_scores = recurrent_greedy(
src_mask=src_mask, max_output_length=max_output_length, model=model,
encoder_output=encoder_output, encoder_hidden=encoder_hidden)
self.assertEqual(output.shape, (batch_size, max_output_length))
np.testing.assert_equal(output, [[4, 0, 4], [4, 4, 4]])
expected_attention_scores = np.array(
[[[0.22914883, 0.24638498, 0.21247596, 0.3119903],
[0.22970565, 0.24540883, 0.21261126, 0.31227428],
[0.22903332, 0.2459198, 0.2110187, 0.3140282]],
[[0.252522, 0.29074305, 0.257121, 0.19961396],
[0.2519883, 0.2895494, 0.25718424, 0.201278],
[0.2523954, 0.28959078, 0.25769445, 0.2003194]]])
np.testing.assert_array_almost_equal(attention_scores,
expected_attention_scores)
self.assertEqual(attention_scores.shape,
(batch_size, max_output_length, 4))
def test_recurrent_beam1(self):
# beam=1 and greedy should return the same result
batch_size = 2
max_output_length = 3
src_mask, model, encoder_output, encoder_hidden = self._build(
batch_size=batch_size)
greedy_output, _ = recurrent_greedy(
src_mask=src_mask, max_output_length=max_output_length, model=model,
encoder_output=encoder_output, encoder_hidden=encoder_hidden)
beam_size = 1
alpha = 1.0
output, _ = beam_search(
size=beam_size, src_mask=src_mask, n_best=1,
max_output_length=max_output_length, model=model, alpha=alpha,
encoder_output=encoder_output, encoder_hidden=encoder_hidden)
np.testing.assert_array_equal(greedy_output, output)
def test_recurrent_beam7(self):
batch_size = 2
max_output_length = 3
src_mask, model, encoder_output, encoder_hidden = self._build(
batch_size=batch_size)
beam_size = 7
alpha = 1.0
output, _ = beam_search(
size=beam_size, src_mask=src_mask, n_best=1,
max_output_length=max_output_length, model=model, alpha=alpha,
encoder_output=encoder_output, encoder_hidden=encoder_hidden)
self.assertEqual(output.shape, (2, 1))
np.testing.assert_array_equal(output, [[3], [3]])
| 8,723 | 39.018349 | 83 | py |
KSTER | KSTER-main/test/unit/test_knn.py | import sys
sys.path.append("../..")
from joeynmt.knn import KNNElasticSearch, KNNFaissSearch
import time
import numpy as np
embeddings_path = "embeddings_4.npy"
embeddings = np.load(embeddings_path)
m_embeddings = np.load(embeddings_path, mmap_mode="r")
batch_size = 32
d = 512
n_run = 100
# es_knn = KNNElasticSearch(index="wmt14_en_de_base_v2",
# psm="byte.es.mt_corpus.service.hl")
# query = np.random.random((batch_size, d)).astype(np.float32)
# start = time.time()
# for i in range(n_run):
# query = embeddings[i * batch_size: (i + 1) * batch_size]
# res = es_knn.search(query)
# print("es_search delay (batch = %d): %.4f ms" % (batch_size, (time.time() - start) / n_run * 1000))
faiss_knn = KNNFaissSearch(index_path='trained.index',
token_path='token_map')
start = time.time()
for i in range(n_run):
query = embeddings[i * batch_size: (i + 1) * batch_size]
res = faiss_knn.search(query)
indices = np.random.choice(embeddings.shape[0], batch_size, replace=True)
s = m_embeddings[indices]
print("faiss_search delay (batch = %d): %.4f ms" % (batch_size, (time.time() - start) / n_run * 1000)) | 1,210 | 33.6 | 102 | py |
KSTER | KSTER-main/test/unit/__init__.py | 0 | 0 | 0 | py | |
KSTER | KSTER-main/test/unit/test_transformer_decoder.py | import torch
from joeynmt.decoders import TransformerDecoder, TransformerDecoderLayer
from .test_helpers import TensorTestCase
class TestTransformerDecoder(TensorTestCase):
def setUp(self):
self.emb_size = 12
self.num_layers = 3
self.hidden_size = 12
self.ff_size = 24
self.num_heads = 4
self.dropout = 0.
seed = 42
torch.manual_seed(seed)
def test_transformer_decoder_freeze(self):
decoder = TransformerDecoder(freeze=True)
for n, p in decoder.named_parameters():
self.assertFalse(p.requires_grad)
def test_transformer_decoder_output_size(self):
vocab_size = 11
decoder = TransformerDecoder(
num_layers=self.num_layers, num_heads=self.num_heads,
hidden_size=self.hidden_size, ff_size=self.ff_size,
dropout=self.dropout, vocab_size=vocab_size)
if not hasattr(decoder, "output_size"):
self.fail("Missing output_size property.")
self.assertEqual(decoder.output_size, vocab_size)
def test_transformer_decoder_forward(self):
batch_size = 2
src_time_dim = 4
trg_time_dim = 5
vocab_size = 7
trg_embed = torch.rand(size=(batch_size, trg_time_dim, self.emb_size))
decoder = TransformerDecoder(
num_layers=self.num_layers, num_heads=self.num_heads,
hidden_size=self.hidden_size, ff_size=self.ff_size,
dropout=self.dropout, emb_dropout=self.dropout,
vocab_size=vocab_size)
encoder_output = torch.rand(
size=(batch_size, src_time_dim, self.hidden_size))
for p in decoder.parameters():
torch.nn.init.uniform_(p, -0.5, 0.5)
src_mask = torch.ones(size=(batch_size, 1, src_time_dim)) == 1
trg_mask = torch.ones(size=(batch_size, trg_time_dim, 1)) == 1
encoder_hidden = None # unused
decoder_hidden = None # unused
unrol_steps = None # unused
output, states, _, _ = decoder(
trg_embed, encoder_output, encoder_hidden, src_mask, unrol_steps,
decoder_hidden, trg_mask)
output_target = torch.Tensor(
[[[0.1718, 0.5595, -0.1996, -0.6924, 0.4351, -0.0850, 0.2805],
[0.0666, 0.4923, -0.1724, -0.6804, 0.3983, -0.1111, 0.2194],
[-0.0315, 0.3673, -0.2320, -0.6100, 0.3019, 0.0422, 0.2514],
[-0.0026, 0.3807, -0.2195, -0.6010, 0.3081, -0.0101, 0.2099],
[-0.0172, 0.3384, -0.2853, -0.5799, 0.2470, 0.0312, 0.2518]],
[[0.0284, 0.3918, -0.2010, -0.6472, 0.3646, -0.0296, 0.1791],
[0.1017, 0.4387, -0.2031, -0.7084, 0.3051, -0.1354, 0.2511],
[0.0155, 0.4274, -0.2061, -0.6702, 0.3085, -0.0617, 0.2830],
[0.0227, 0.4067, -0.1697, -0.6463, 0.3277, -0.0423, 0.2333],
[0.0133, 0.4409, -0.1186, -0.5694, 0.4450, 0.0290, 0.1643]]]
)
self.assertEqual(output_target.shape, output.shape)
self.assertTensorAlmostEqual(output_target, output)
greedy_predictions = output.argmax(-1)
expect_predictions = output_target.argmax(-1)
self.assertTensorEqual(expect_predictions, greedy_predictions)
states_target = torch.Tensor(
[[[3.7535e-02, 5.3508e-01, 4.9478e-02, -9.1961e-01, -5.3966e-01,
-1.0065e-01, 4.3053e-01, -3.0671e-01, -1.2724e-02, -4.1879e-01,
5.9625e-01, 1.1887e-01],
[1.3837e-01, 4.6963e-01, -3.7059e-02, -6.8479e-01, -4.6042e-01,
-1.0072e-01, 3.9374e-01, -3.0429e-01, -5.4203e-02, -4.3680e-01,
6.4257e-01, 1.1424e-01],
[1.0263e-01, 3.8331e-01, -2.5586e-02, -6.4478e-01, -4.5860e-01,
-1.0590e-01, 5.8806e-01, -2.8856e-01, 1.1084e-02, -4.7479e-01,
5.9094e-01, 1.6089e-01],
[7.3408e-02, 3.7701e-01, -5.8783e-02, -6.2368e-01, -4.4201e-01,
-1.0237e-01, 5.2556e-01, -3.0821e-01, -5.3345e-02, -4.5606e-01,
5.8259e-01, 1.2531e-01],
[4.1206e-02, 3.6129e-01, -1.2955e-02, -5.8638e-01, -4.6023e-01,
-9.4267e-02, 5.5464e-01, -3.0029e-01, -3.3974e-02, -4.8347e-01,
5.4088e-01, 1.2015e-01]],
[[1.1017e-01, 4.7179e-01, 2.6402e-02, -7.2170e-01, -3.9778e-01,
-1.0226e-01, 5.3498e-01, -2.8369e-01, -1.1081e-01, -4.6096e-01,
5.9517e-01, 1.3531e-01],
[2.1947e-01, 4.6407e-01, 8.4276e-02, -6.3263e-01, -4.4953e-01,
-9.7334e-02, 4.0321e-01, -2.9893e-01, -1.0368e-01, -4.5760e-01,
6.1378e-01, 1.3509e-01],
[2.1437e-01, 4.1372e-01, 1.9859e-02, -5.7415e-01, -4.5025e-01,
-9.8621e-02, 4.1182e-01, -2.8410e-01, -1.2729e-03, -4.8586e-01,
6.2318e-01, 1.4731e-01],
[1.9153e-01, 3.8401e-01, 2.6096e-02, -6.2339e-01, -4.0685e-01,
-9.7387e-02, 4.1836e-01, -2.8648e-01, -1.7857e-02, -4.7678e-01,
6.2907e-01, 1.7617e-01],
[3.1713e-02, 3.7548e-01, -6.3005e-02, -7.9804e-01, -3.6541e-01,
-1.0398e-01, 4.2991e-01, -2.9607e-01, 2.1376e-04, -4.5897e-01,
6.1062e-01, 1.6142e-01]]]
)
self.assertEqual(states_target.shape, states.shape)
self.assertTensorAlmostEqual(states_target, states)
def test_transformer_decoder_layers(self):
vocab_size = 7
decoder = TransformerDecoder(
num_layers=self.num_layers, num_heads=self.num_heads,
hidden_size=self.hidden_size, ff_size=self.ff_size,
dropout=self.dropout, vocab_size=vocab_size)
self.assertEqual(len(decoder.layers), self.num_layers)
for layer in decoder.layers:
self.assertTrue(isinstance(layer, TransformerDecoderLayer))
self.assertTrue(hasattr(layer, "src_trg_att"))
self.assertTrue(hasattr(layer, "trg_trg_att"))
self.assertTrue(hasattr(layer, "feed_forward"))
self.assertEqual(layer.size, self.hidden_size)
self.assertEqual(
layer.feed_forward.pwff_layer[0].in_features, self.hidden_size)
self.assertEqual(
layer.feed_forward.pwff_layer[0].out_features, self.ff_size)
| 6,322 | 42.909722 | 79 | py |
KSTER | KSTER-main/test/unit/test_transformer_encoder.py | import torch
from joeynmt.encoders import TransformerEncoder
from .test_helpers import TensorTestCase
class TestTransformerEncoder(TensorTestCase):
def setUp(self):
self.emb_size = 12
self.num_layers = 3
self.hidden_size = 12
self.ff_size = 24
self.num_heads = 4
self.dropout = 0.
self.seed = 42
torch.manual_seed(self.seed)
def test_transformer_encoder_freeze(self):
encoder = TransformerEncoder(freeze=True)
for n, p in encoder.named_parameters():
self.assertFalse(p.requires_grad)
def test_transformer_encoder_forward(self):
batch_size = 2
time_dim = 4
torch.manual_seed(self.seed)
encoder = TransformerEncoder(
hidden_size=self.hidden_size, ff_size=self.ff_size,
num_layers=self.num_layers, num_heads=self.num_heads,
dropout=self.dropout, emb_dropout=self.dropout)
for p in encoder.parameters():
torch.nn.init.uniform_(p, -0.5, 0.5)
x = torch.rand(size=(batch_size, time_dim, self.emb_size))
# no padding, no mask
x_length = torch.Tensor([time_dim] * batch_size).int()
mask = torch.ones([batch_size, 1, time_dim]) == 1
output, hidden = encoder(x, x_length, mask)
self.assertEqual(output.shape, torch.Size(
[batch_size, time_dim, self.hidden_size]))
self.assertEqual(hidden, None)
output_target = torch.Tensor(
[[[1.9728e-01, -1.2042e-01, 8.0998e-02, 1.3411e-03, -3.5960e-01,
-5.2988e-01, -5.6056e-01, -3.5297e-01, 2.6680e-01, 2.8343e-01,
-3.7342e-01, -5.9112e-03],
[8.9687e-02, -1.2491e-01, 7.7809e-02, -1.3500e-03, -2.7002e-01,
-4.7312e-01, -5.7981e-01, -4.1998e-01, 1.0457e-01, 2.9726e-01,
-3.9461e-01, 8.1598e-02],
[3.4988e-02, -1.3020e-01, 6.0043e-02, 2.7782e-02, -3.1483e-01,
-3.8940e-01, -5.5557e-01, -5.9540e-01, -2.9808e-02, 3.1468e-01,
-4.5809e-01, 4.3313e-03],
[1.2234e-01, -1.3285e-01, 6.3068e-02, -2.3343e-02, -2.3519e-01,
-4.0794e-01, -5.6063e-01, -5.5484e-01, -1.1272e-01, 3.0103e-01,
-4.0983e-01, 3.3038e-02]],
[[9.8597e-02, -1.2121e-01, 1.0718e-01, -2.2644e-02, -4.0282e-01,
-4.2646e-01, -5.9981e-01, -3.7200e-01, 1.9538e-01, 2.7036e-01,
-3.4072e-01, -1.7966e-03],
[8.8470e-02, -1.2618e-01, 5.3351e-02, -1.8531e-02, -3.3834e-01,
-4.9047e-01, -5.7063e-01, -4.9790e-01, 2.2070e-01, 3.3964e-01,
-4.1604e-01, 2.3519e-02],
[5.8373e-02, -1.2706e-01, 1.0598e-01, 9.3277e-05, -3.0493e-01,
-4.4406e-01, -5.4723e-01, -5.2214e-01, 8.0374e-02, 2.6307e-01,
- 4.4571e-01, 8.7052e-02],
[7.9567e-02, -1.2977e-01, 1.1731e-01, 2.6198e-02, -2.4024e-01,
-4.2161e-01, -5.7604e-01, -7.3298e-01, 1.6698e-01, 3.1454e-01,
-4.9189e-01, 2.4027e-02]]]
)
self.assertTensorAlmostEqual(output_target, output)
| 3,144 | 40.381579 | 78 | py |
KSTER | KSTER-main/test/unit/test_metric.py | import unittest
from test.unit.test_helpers import TensorTestCase
from joeynmt.metrics import chrf, bleu, token_accuracy
class TestMetrics(TensorTestCase):
def test_chrf_without_whitespace(self):
hyp1 = ["t est"]
ref1 = ["tez t"]
score1 = chrf(hyp1, ref1, remove_whitespace=True)
hyp2 = ["test"]
ref2 = ["tezt"]
score2 = chrf(hyp2, ref2, remove_whitespace=True)
self.assertAlmostEqual(score1, score2)
self.assertAlmostEqual(score1, 0.271, places=3)
def test_chrf_with_whitespace(self):
hyp = ["これはテストです。"]
ref = ["これは テストです。"]
score = chrf(hyp, ref, remove_whitespace=False)
self.assertAlmostEqual(score, 0.558, places=3)
def test_bleu_13a(self):
hyp = ["this is a test."]
ref = ["this is a tezt."]
score = bleu(hyp, ref, tokenize="13a")
self.assertAlmostEqual(score, 42.729, places=3)
def test_bleu_ja_mecab(self):
try:
hyp = ["これはテストです。"]
ref = ["あれがテストです。"]
score = bleu(hyp, ref, tokenize="ja-mecab")
self.assertAlmostEqual(score, 39.764, places=3)
except Exception as e:
raise unittest.SkipTest(f"{e} Skip.")
def test_token_acc_level_char(self):
# if len(hyp) > len(ref)
hyp = [list("tests")]
ref = [list("tezt")]
#level = "char"
score = token_accuracy(hyp, ref)
self.assertEqual(score, 60.0)
# if len(hyp) < len(ref)
hyp = [list("test")]
ref = [list("tezts")]
#level = "char"
score = token_accuracy(hyp, ref)
self.assertEqual(score, 75.0)
| 1,674 | 30.018519 | 59 | py |
KSTER | KSTER-main/test/unit/test_embeddings.py | import torch
from joeynmt.embeddings import Embeddings
from .test_helpers import TensorTestCase
class TestEmbeddings(TensorTestCase):
def setUp(self):
self.emb_size = 10
self.vocab_size = 11
self.pad_idx = 1
seed = 42
torch.manual_seed(seed)
def test_size(self):
emb = Embeddings(embedding_dim=self.emb_size,
vocab_size=self.vocab_size,
padding_idx=self.pad_idx)
self.assertEqual(emb.lut.weight.shape,
torch.Size([self.vocab_size, self.emb_size]))
def test_pad_zeros(self):
emb = Embeddings(embedding_dim=self.emb_size,
vocab_size=self.vocab_size,
padding_idx=self.pad_idx)
# pad embedding should be zeros
self.assertTensorEqual(emb.lut.weight[self.pad_idx],
torch.zeros([self.emb_size]))
def test_freeze(self):
encoder = Embeddings(embedding_dim=self.emb_size,
vocab_size=self.vocab_size,
padding_idx=self.pad_idx,
freeze=True)
for n, p in encoder.named_parameters():
self.assertFalse(p.requires_grad)
def test_forward(self):
# fix the embedding weights
weights = self._get_random_embedding_weights()
emb = Embeddings(embedding_dim=self.emb_size,
vocab_size=self.vocab_size,
padding_idx=self.pad_idx)
self._fill_embeddings(emb, weights)
indices = torch.Tensor([0, 1, self.pad_idx, 9]).long()
embedded = emb.forward(x=indices)
# embedding operation is just slicing from weights matrix
self.assertTensorEqual(embedded, torch.index_select(input=weights,
index=indices, dim=0))
# after embedding, representations for PAD should still be zero
self.assertTensorEqual(embedded[2], torch.zeros([self.emb_size]))
def test_scale(self):
# fix the embedding weights
weights = self._get_random_embedding_weights()
emb = Embeddings(embedding_dim=self.emb_size,
vocab_size=self.vocab_size,
padding_idx=self.pad_idx,
scale=True)
emb.lut.weight.data = weights
indices = torch.Tensor([0, 1, self.pad_idx, 9]).long()
embedded = emb.forward(x=indices)
# now scaled
self.assertTensorNotEqual(
torch.index_select(input=weights, index=indices, dim=0), embedded)
self.assertTensorEqual(
torch.index_select(input=weights, index=indices, dim=0)*
(self.emb_size**0.5), embedded)
def _fill_embeddings(self, embeddings, weights):
embeddings.lut.weight.data = weights
def _get_random_embedding_weights(self):
weights = torch.rand([self.vocab_size, self.emb_size])
weights[self.pad_idx] = torch.zeros([self.emb_size])
return weights
| 3,091 | 38.139241 | 78 | py |
KSTER | KSTER-main/test/unit/test_attention.py | import torch
from joeynmt.attention import BahdanauAttention, LuongAttention
from .test_helpers import TensorTestCase
class TestBahdanauAttention(TensorTestCase):
def setUp(self):
self.key_size = 3
self.query_size = 5
self.hidden_size = 7
seed = 42
torch.manual_seed(seed)
self.bahdanau_att = BahdanauAttention(hidden_size=self.hidden_size,
key_size=self.key_size,
query_size=self.query_size)
def test_bahdanau_attention_size(self):
self.assertIsNone(self.bahdanau_att.key_layer.bias) # no bias
self.assertIsNone(self.bahdanau_att.query_layer.bias) # no bias
self.assertEqual(self.bahdanau_att.key_layer.weight.shape,
torch.Size([self.hidden_size, self.key_size]))
self.assertEqual(self.bahdanau_att.query_layer.weight.shape,
torch.Size([self.hidden_size, self.query_size]))
self.assertEqual(self.bahdanau_att.energy_layer.weight.shape,
torch.Size([1, self.hidden_size]))
self.assertIsNone(self.bahdanau_att.energy_layer.bias)
def test_bahdanau_forward(self):
src_length = 5
trg_length = 4
batch_size = 6
queries = torch.rand(size=(batch_size, trg_length, self.query_size))
keys = torch.rand(size=(batch_size, src_length, self.key_size))
mask = torch.ones(size=(batch_size, 1, src_length)).byte()
# introduce artificial padding areas
mask[0, 0, -3:] = 0
mask[1, 0, -2:] = 0
mask[4, 0, -1:] = 0
for t in range(trg_length):
c, att = None, None
try:
# should raise an AssertionException (missing pre-computation)
query = queries[:, t, :].unsqueeze(1)
c, att = self.bahdanau_att(query=query, mask=mask, values=keys)
except AssertionError:
pass
self.assertIsNone(c)
self.assertIsNone(att)
# now with pre-computation
self.bahdanau_att.compute_proj_keys(keys=keys)
self.assertIsNotNone(self.bahdanau_att.proj_keys)
self.assertEqual(self.bahdanau_att.proj_keys.shape,
torch.Size(
[batch_size, src_length, self.hidden_size]))
contexts = []
attention_probs = []
for t in range(trg_length):
c, att = None, None
try:
# should not raise an AssertionException
query = queries[:, t, :].unsqueeze(1)
c, att = self.bahdanau_att(query=query, mask=mask, values=keys)
except AssertionError:
self.fail()
self.assertIsNotNone(c)
self.assertIsNotNone(att)
self.assertEqual(self.bahdanau_att.proj_query.shape,
torch.Size([batch_size, 1, self.hidden_size]))
contexts.append(c)
attention_probs.append(att)
self.assertEqual(len(attention_probs), trg_length)
self.assertEqual(len(contexts), trg_length)
contexts = torch.cat(contexts, dim=1)
attention_probs = torch.cat(attention_probs, dim=1)
self.assertEqual(contexts.shape,
torch.Size(
[batch_size, trg_length, self.key_size]))
self.assertEqual(attention_probs.shape,
torch.Size([batch_size, trg_length, src_length]))
contexts_target = torch.Tensor(
[[[0.5080, 0.5832, 0.5614],
[0.5096, 0.5816, 0.5596],
[0.5092, 0.5820, 0.5601],
[0.5079, 0.5833, 0.5615]],
[[0.4709, 0.5817, 0.3091],
[0.4720, 0.5793, 0.3063],
[0.4704, 0.5825, 0.3102],
[0.4709, 0.5814, 0.3090]],
[[0.4394, 0.4482, 0.6526],
[0.4390, 0.4475, 0.6522],
[0.4391, 0.4479, 0.6538],
[0.4391, 0.4479, 0.6533]],
[[0.5283, 0.3441, 0.3938],
[0.5297, 0.3457, 0.3956],
[0.5306, 0.3466, 0.3966],
[0.5274, 0.3431, 0.3926]],
[[0.4079, 0.4145, 0.2439],
[0.4064, 0.4156, 0.2445],
[0.4077, 0.4147, 0.2439],
[0.4067, 0.4153, 0.2444]],
[[0.5649, 0.5749, 0.4960],
[0.5660, 0.5763, 0.4988],
[0.5658, 0.5754, 0.4984],
[0.5662, 0.5766, 0.4991]]]
)
self.assertTensorAlmostEqual(contexts_target, contexts)
attention_probs_targets = torch.Tensor(
[[[0.4904, 0.5096, 0.0000, 0.0000, 0.0000],
[0.4859, 0.5141, 0.0000, 0.0000, 0.0000],
[0.4871, 0.5129, 0.0000, 0.0000, 0.0000],
[0.4906, 0.5094, 0.0000, 0.0000, 0.0000]],
[[0.3314, 0.3278, 0.3408, 0.0000, 0.0000],
[0.3337, 0.3230, 0.3433, 0.0000, 0.0000],
[0.3301, 0.3297, 0.3402, 0.0000, 0.0000],
[0.3312, 0.3275, 0.3413, 0.0000, 0.0000]],
[[0.1977, 0.2047, 0.2040, 0.1936, 0.1999],
[0.1973, 0.2052, 0.2045, 0.1941, 0.1988],
[0.1987, 0.2046, 0.2046, 0.1924, 0.1996],
[0.1984, 0.2047, 0.2044, 0.1930, 0.1995]],
[[0.1963, 0.2041, 0.2006, 0.1942, 0.2047],
[0.1954, 0.2065, 0.2011, 0.1934, 0.2036],
[0.1947, 0.2074, 0.2014, 0.1928, 0.2038],
[0.1968, 0.2028, 0.2006, 0.1949, 0.2049]],
[[0.2455, 0.2414, 0.2588, 0.2543, 0.0000],
[0.2450, 0.2447, 0.2566, 0.2538, 0.0000],
[0.2458, 0.2417, 0.2586, 0.2540, 0.0000],
[0.2452, 0.2438, 0.2568, 0.2542, 0.0000]],
[[0.1999, 0.1888, 0.1951, 0.2009, 0.2153],
[0.2035, 0.1885, 0.1956, 0.1972, 0.2152],
[0.2025, 0.1885, 0.1950, 0.1980, 0.2159],
[0.2044, 0.1884, 0.1955, 0.1970, 0.2148]]]
)
self.assertTensorAlmostEqual(attention_probs_targets, attention_probs)
def test_bahdanau_precompute_None(self):
self.assertIsNone(self.bahdanau_att.proj_keys)
self.assertIsNone(self.bahdanau_att.proj_query)
def test_bahdanau_precompute(self):
src_length = 5
batch_size = 6
keys = torch.rand(size=(batch_size, src_length, self.key_size))
self.bahdanau_att.compute_proj_keys(keys=keys)
proj_keys_targets = torch.Tensor(
[[[0.4042, 0.1373, 0.3308, 0.2317, 0.3011, 0.2978, -0.0975],
[0.4740, 0.4829, -0.0853, -0.2634, 0.4623, 0.0333, -0.2702],
[0.4540, 0.0645, 0.6046, 0.4632, 0.3459, 0.4631, -0.0919],
[0.4744, 0.5098, -0.2441, -0.3713, 0.4265, -0.0407, -0.2527],
[0.0314, 0.1189, 0.3825, 0.1119, 0.2548, 0.1239, -0.1921]],
[[0.7057, 0.2725, 0.2426, 0.1979, 0.4285, 0.3727, -0.1126],
[0.3967, 0.0223, 0.3664, 0.3488, 0.2107, 0.3531, -0.0095],
[0.4311, 0.4695, 0.3035, -0.0640, 0.5914, 0.1713, -0.3695],
[0.0797, 0.1038, 0.3847, 0.1476, 0.2486, 0.1568, -0.1672],
[0.3379, 0.3671, 0.3622, 0.0166, 0.5097, 0.1845, -0.3207]],
[[0.4051, 0.4552, -0.0709, -0.2616, 0.4339, 0.0126, -0.2682],
[0.5379, 0.5037, 0.0074, -0.2046, 0.5243, 0.0969, -0.2953],
[0.0250, 0.0544, 0.3859, 0.1679, 0.1976, 0.1471, -0.1392],
[0.1880, 0.2725, 0.1849, -0.0598, 0.3383, 0.0693, -0.2329],
[0.0759, 0.1006, 0.0955, -0.0048, 0.1361, 0.0400, -0.0913]],
[[-0.0207, 0.1266, 0.5529, 0.1728, 0.3192, 0.1611, -0.2560],
[0.5713, 0.2364, 0.0718, 0.0801, 0.3141, 0.2455, -0.0729],
[0.1574, 0.1162, 0.3591, 0.1572, 0.2602, 0.1838, -0.1510],
[0.1357, 0.0192, 0.1817, 0.1391, 0.1037, 0.1389, -0.0277],
[0.3088, 0.2804, 0.2024, -0.0045, 0.3680, 0.1386, -0.2127]],
[[0.1181, 0.0899, 0.1139, 0.0329, 0.1390, 0.0744, -0.0758],
[0.0713, 0.2682, 0.4111, 0.0129, 0.4044, 0.0985, -0.3177],
[0.5340, 0.1713, 0.5365, 0.3679, 0.4262, 0.4373, -0.1456],
[0.3902, -0.0242, 0.4498, 0.4313, 0.1997, 0.4012, 0.0075],
[0.1764, 0.1531, -0.0564, -0.0876, 0.1390, 0.0129, -0.0714]],
[[0.3772, 0.3725, 0.3053, -0.0012, 0.4982, 0.1808, -0.3006],
[0.4391, -0.0472, 0.3379, 0.4136, 0.1434, 0.3918, 0.0687],
[0.3697, 0.2313, 0.4745, 0.2100, 0.4348, 0.3000, -0.2242],
[0.8427, 0.3705, 0.1227, 0.1079, 0.4890, 0.3604, -0.1305],
[0.3526, 0.3477, 0.1473, -0.0740, 0.4132, 0.1138, -0.2452]]]
)
self.assertTensorAlmostEqual(proj_keys_targets,
self.bahdanau_att.proj_keys)
class TestLuongAttention(TensorTestCase):
def setUp(self):
self.addTypeEqualityFunc(torch.Tensor,
lambda x, y, msg: self.failureException(
msg) if not torch.equal(x, y) else True)
self.key_size = 3
self.query_size = 5
self.hidden_size = self.query_size
seed = 42
torch.manual_seed(seed)
self.luong_att = LuongAttention(hidden_size=self.hidden_size,
key_size=self.key_size)
def test_luong_attention_size(self):
self.assertIsNone(self.luong_att.key_layer.bias) # no bias
self.assertEqual(self.luong_att.key_layer.weight.shape,
torch.Size([self.hidden_size, self.key_size]))
def test_luong_attention_forward(self):
src_length = 5
trg_length = 4
batch_size = 6
queries = torch.rand(size=(batch_size, trg_length, self.query_size))
keys = torch.rand(size=(batch_size, src_length, self.key_size))
mask = torch.ones(size=(batch_size, 1, src_length)).byte()
# introduce artificial padding areas
mask[0, 0, -3:] = 0
mask[1, 0, -2:] = 0
mask[4, 0, -1:] = 0
for t in range(trg_length):
c, att = None, None
try:
# should raise an AssertionException (missing pre-computation)
query = queries[:, t, :].unsqueeze(1)
c, att = self.luong_att(query=query, mask=mask, values=keys)
except AssertionError:
pass
self.assertIsNone(c)
self.assertIsNone(att)
# now with pre-computation
self.luong_att.compute_proj_keys(keys=keys)
self.assertIsNotNone(self.luong_att.proj_keys)
self.assertEqual(self.luong_att.proj_keys.shape,
torch.Size([batch_size, src_length, self.hidden_size]))
contexts = []
attention_probs = []
for t in range(trg_length):
c, att = None, None
try:
# should not raise an AssertionException
query = queries[:, t, :].unsqueeze(1)
c, att = self.luong_att(query=query, mask=mask, values=keys)
except AssertionError:
self.fail()
self.assertIsNotNone(c)
self.assertIsNotNone(att)
contexts.append(c)
attention_probs.append(att)
self.assertEqual(len(attention_probs), trg_length)
self.assertEqual(len(contexts), trg_length)
contexts = torch.cat(contexts, dim=1)
attention_probs = torch.cat(attention_probs, dim=1)
self.assertEqual(contexts.shape,
torch.Size([batch_size, trg_length, self.key_size]))
self.assertEqual(attention_probs.shape,
torch.Size([batch_size, trg_length, src_length]))
context_targets = torch.Tensor([[[0.5347, 0.2918, 0.4707],
[0.5062, 0.2657, 0.4117],
[0.4969, 0.2572, 0.3926],
[0.5320, 0.2893, 0.4651]],
[[0.5210, 0.6707, 0.4343],
[0.5111, 0.6809, 0.4274],
[0.5156, 0.6622, 0.4274],
[0.5046, 0.6634, 0.4175]],
[[0.4998, 0.5570, 0.3388],
[0.4949, 0.5357, 0.3609],
[0.4982, 0.5208, 0.3468],
[0.5013, 0.5474, 0.3503]],
[[0.5911, 0.6944, 0.5319],
[0.5964, 0.6899, 0.5257],
[0.6161, 0.6771, 0.5042],
[0.5937, 0.7011, 0.5330]],
[[0.4439, 0.5916, 0.3691],
[0.4409, 0.5970, 0.3762],
[0.4446, 0.5845, 0.3659],
[0.4417, 0.6157, 0.3796]],
[[0.4581, 0.4343, 0.5151],
[0.4493, 0.4297, 0.5348],
[0.4399, 0.4265, 0.5419],
[0.4833, 0.4570, 0.4855]]])
self.assertTensorAlmostEqual(context_targets, contexts)
attention_probs_targets = torch.Tensor(
[[[0.3238, 0.6762, 0.0000, 0.0000, 0.0000],
[0.4090, 0.5910, 0.0000, 0.0000, 0.0000],
[0.4367, 0.5633, 0.0000, 0.0000, 0.0000],
[0.3319, 0.6681, 0.0000, 0.0000, 0.0000]],
[[0.2483, 0.3291, 0.4226, 0.0000, 0.0000],
[0.2353, 0.3474, 0.4174, 0.0000, 0.0000],
[0.2725, 0.3322, 0.3953, 0.0000, 0.0000],
[0.2803, 0.3476, 0.3721, 0.0000, 0.0000]],
[[0.1955, 0.1516, 0.2518, 0.1466, 0.2546],
[0.2220, 0.1613, 0.2402, 0.1462, 0.2303],
[0.2074, 0.1953, 0.2142, 0.1536, 0.2296],
[0.2100, 0.1615, 0.2434, 0.1376, 0.2475]],
[[0.2227, 0.2483, 0.1512, 0.1486, 0.2291],
[0.2210, 0.2331, 0.1599, 0.1542, 0.2318],
[0.2123, 0.1808, 0.1885, 0.1702, 0.2482],
[0.2233, 0.2479, 0.1435, 0.1433, 0.2421]],
[[0.2475, 0.2482, 0.2865, 0.2178, 0.0000],
[0.2494, 0.2410, 0.2976, 0.2120, 0.0000],
[0.2498, 0.2449, 0.2778, 0.2275, 0.0000],
[0.2359, 0.2603, 0.3174, 0.1864, 0.0000]],
[[0.2362, 0.1929, 0.2128, 0.1859, 0.1723],
[0.2230, 0.2118, 0.2116, 0.1890, 0.1646],
[0.2118, 0.2251, 0.2039, 0.1891, 0.1700],
[0.2859, 0.1874, 0.2083, 0.1583, 0.1601]]])
self.assertTensorAlmostEqual(attention_probs_targets, attention_probs)
def test_luong_precompute_None(self):
self.assertIsNone(self.luong_att.proj_keys)
def test_luong_precompute(self):
src_length = 5
batch_size = 6
keys = torch.rand(size=(batch_size, src_length, self.key_size))
self.luong_att.compute_proj_keys(keys=keys)
proj_keys_targets = torch.Tensor(
[[[0.5362, 0.1826, 0.4716, 0.3245, 0.4122],
[0.3819, 0.0934, 0.2750, 0.2311, 0.2378],
[0.2246, 0.2934, 0.3999, 0.0519, 0.4430],
[0.1271, 0.0636, 0.2444, 0.1294, 0.1659],
[0.3494, 0.0372, 0.1326, 0.1908, 0.1295]],
[[0.3363, 0.5984, 0.2090, -0.2695, 0.6584],
[0.3098, 0.3608, 0.3623, 0.0098, 0.5004],
[0.6133, 0.2568, 0.4264, 0.2688, 0.4716],
[0.4058, 0.1438, 0.3043, 0.2127, 0.2971],
[0.6604, 0.3490, 0.5228, 0.2593, 0.5967]],
[[0.4224, 0.1182, 0.4883, 0.3403, 0.3458],
[0.4257, 0.3757, -0.1431, -0.2208, 0.3383],
[0.0681, 0.2540, 0.4165, 0.0269, 0.3934],
[0.5341, 0.3288, 0.3937, 0.1532, 0.5132],
[0.6244, 0.1647, 0.2378, 0.2548, 0.3196]],
[[0.2222, 0.3380, 0.2374, -0.0748, 0.4212],
[0.4042, 0.1373, 0.3308, 0.2317, 0.3011],
[0.4740, 0.4829, -0.0853, -0.2634, 0.4623],
[0.4540, 0.0645, 0.6046, 0.4632, 0.3459],
[0.4744, 0.5098, -0.2441, -0.3713, 0.4265]],
[[0.0314, 0.1189, 0.3825, 0.1119, 0.2548],
[0.7057, 0.2725, 0.2426, 0.1979, 0.4285],
[0.3967, 0.0223, 0.3664, 0.3488, 0.2107],
[0.4311, 0.4695, 0.3035, -0.0640, 0.5914],
[0.0797, 0.1038, 0.3847, 0.1476, 0.2486]],
[[0.3379, 0.3671, 0.3622, 0.0166, 0.5097],
[0.4051, 0.4552, -0.0709, -0.2616, 0.4339],
[0.5379, 0.5037, 0.0074, -0.2046, 0.5243],
[0.0250, 0.0544, 0.3859, 0.1679, 0.1976],
[0.1880, 0.2725, 0.1849, -0.0598, 0.3383]]]
)
self.assertTensorAlmostEqual(proj_keys_targets, self.luong_att.proj_keys)
| 16,419 | 42.786667 | 81 | py |
KSTER | KSTER-main/test/unit/test_helpers.py | import unittest
import torch
class TensorTestCase(unittest.TestCase):
def assertTensorNotEqual(self, expected, actual):
equal = torch.equal(expected, actual)
if equal:
self.fail("Tensors did match but weren't supposed to: expected {},"
" actual {}.".format(expected, actual))
def assertTensorEqual(self, expected, actual):
equal = torch.equal(expected, actual)
if not equal:
self.fail("Tensors didn't match but were supposed to {} vs"
" {}".format(expected, actual))
def assertTensorAlmostEqual(self, expected, actual):
diff = torch.all(
torch.lt(torch.abs(torch.add(expected, -actual)), 1e-4))
if not diff:
self.fail("Tensors didn't match but were supposed to {} vs"
" {}".format(expected, actual))
| 879 | 34.2 | 79 | py |
KSTER | KSTER-main/scripts/average_checkpoints_launcher.py | import os
import glob
import subprocess
subfolder = os.listdir("models")[0]
folder = os.path.join("models", subfolder)
inputs_str = " ".join(glob.glob("%s/[0-9]*.ckpt" % folder))
output_str = "%s/averaged.ckpt" % folder
subprocess.call("python3 scripts/average_checkpoints.py --inputs %s --output %s" % (inputs_str, output_str), shell=True) | 342 | 33.3 | 120 | py |
KSTER | KSTER-main/scripts/preprocess_jparacrawl.py | # coding: utf-8
"""
Preprocess JParaCrawl
"""
import os
import argparse
import pandas as pd
import numpy as np
import unicodedata
from collections import OrderedDict
def prepare(data_dir, size, seed=None):
dtype = OrderedDict({'source': str, 'probability': float, 'en': str, 'ja': str})
df = pd.read_csv(os.path.join(data_dir, 'en-ja', 'en-ja.bicleaner05.txt'), header=None, names=dtype.keys(),
sep='\t', encoding='utf8', quoting=3, keep_default_na=False, na_values='', dtype=dtype)
df = df.drop_duplicates(subset=['en', 'ja'])
df = df[~df['en'].str.contains('�') & ~df['ja'].str.contains('�')]
df = df[['en', 'ja']].applymap(lambda x: unicodedata.normalize('NFKC', x))
df = df.dropna(how='any')
if seed is not None:
np.random.seed(seed)
test_index = np.random.choice(df.index, size=size, replace=False)
train_index = np.setdiff1d(df.index, test_index)
for lang in ['en', 'ja']:
for data_set, drop_index in zip(['train', 'dev'], [test_index, train_index]):
df[lang].drop(index=drop_index, inplace=False).to_csv(os.path.join(data_dir, data_set+'.'+lang),
header=False, index=False, sep='\t', encoding='utf8', quoting=3)
def main():
PATH = os.path.dirname(os.path.abspath('__file__'))
ap = argparse.ArgumentParser("Preprocess JParaCrawl")
ap.add_argument("--data_dir", type=str, default=os.path.join(PATH, "../test/data/jparacrawl"),
help="path to data dir. default: ../test/data/jparacrawl")
ap.add_argument("--dev_size", type=int, default=5000, help="development set size")
ap.add_argument("--seed", type=int, default=12345, help="random seed for train-dev-split")
args = ap.parse_args()
prepare(args.data_dir, args.dev_size, args.seed)
if __name__ == "__main__":
main()
| 1,849 | 36.755102 | 111 | py |
KSTER | KSTER-main/scripts/post_process_hypothesis.py | from joeynmt.vocabulary import Vocabulary
import os
import subprocess
import yaml
import glob
from sacremoses import MosesTokenizer, MosesDetokenizer
import spacy
from collections import Counter
config_path = glob.glob("*.yaml")[0]
config = yaml.safe_load(open(config_path, "r", encoding="utf-8"))
src_lang = config["data"]["src"]
trg_lang = config["data"]["trg"]
print(f"src_lang: {src_lang}\ttrg_lang: {trg_lang}")
base_path = "analysis"
detokenized_base_path = os.path.join(base_path, "detokenized")
tokenized_base_path = os.path.join(base_path, "tokenized")
bpe_base_path = os.path.join(base_path, "bpe")
detokenized_dev_path = os.path.join(detokenized_base_path, f"dev.{trg_lang}")
detokenized_test_path = os.path.join(detokenized_base_path, f"test.{trg_lang}")
tokenized_dev_path = os.path.join(tokenized_base_path, f"dev.tok.{trg_lang}")
tokenized_test_path = os.path.join(tokenized_base_path, f"test.tok.{trg_lang}")
bpe_dev_path = os.path.join(bpe_base_path, f"dev.bpe.32k.{trg_lang}")
bpe_test_path = os.path.join(bpe_base_path, f"test.bpe.32k.{trg_lang}")
print("rename hypothesis files")
subprocess.call(f"mv {base_path}/beam4_alpha0.6.dev {base_path}/dev.{trg_lang}", shell=True)
subprocess.call(f"mv {base_path}/beam4_alpha0.6.test {base_path}/test.{trg_lang}", shell=True)
if not os.path.exists(detokenized_base_path):
os.makedirs(detokenized_base_path)
print(f"copy hypothesis files into {detokenized_base_path}")
subprocess.call(f"cp {base_path}/dev.{trg_lang} {detokenized_base_path}", shell=True)
subprocess.call(f"cp {base_path}/test.{trg_lang} {detokenized_base_path}", shell=True)
def file_tokenize(src_path: str, trg_path: str, lang: str) -> None:
src_file = open(src_path, "r", encoding="utf-8")
trg_file = open(trg_path, "w", encoding="utf-8")
tokenizer = MosesTokenizer(lang=lang)
for line in src_file.readlines():
line = " ".join(tokenizer.tokenize(line.strip(), aggressive_dash_splits=True, escape=False))
trg_file.write(line + "\n")
src_file.close()
trg_file.close()
if not os.path.exists(tokenized_base_path):
os.makedirs(tokenized_base_path)
print("tokenize hypothesis")
file_tokenize(detokenized_dev_path, tokenized_dev_path, trg_lang)
file_tokenize(detokenized_test_path, tokenized_test_path, trg_lang)
if not os.path.exists(bpe_base_path):
os.makedirs(bpe_base_path)
dataset_base_path = os.path.dirname(config["data"]["train"])
codes_path = os.path.join(dataset_base_path, "codes.txt")
vocabulary_path = os.path.join(dataset_base_path, f"vocabulary.{trg_lang}")
print("segment word into subwords with bpe")
subprocess.call(f"subword-nmt apply-bpe -c {codes_path} --vocabulary {vocabulary_path} --vocabulary-threshold 50 < {tokenized_dev_path} > {bpe_dev_path}", shell=True)
subprocess.call(f"subword-nmt apply-bpe -c {codes_path} --vocabulary {vocabulary_path} --vocabulary-threshold 50 < {tokenized_test_path} > {bpe_test_path}", shell=True)
print(f"copy src files to {bpe_base_path}")
subprocess.call("cat %s >> %s" % (config["data"]["dev"] + "." + src_lang, f"{bpe_base_path}/dev.bpe.32k.{src_lang}"), shell=True)
subprocess.call("cat %s >> %s" % (config["data"]["test"] + "." + src_lang, f"{bpe_base_path}/test.bpe.32k.{src_lang}"), shell=True)
subprocess.call("cp %s %s" % (os.path.join(dataset_base_path, "vocab.txt"), bpe_base_path), shell=True)
model_dict = {
"en": "en_core_web_sm",
"de": "de_core_news_sm"
}
#subprocess.call("python3 -m spacy download %s" % model_dict[trg_lang], shell=True)
nlp = spacy.load(model_dict[trg_lang])
def compute_pos_tag_for_tokenized_file(src_path: str, trg_path: str) -> None:
src_file = open(src_path, "r", encoding="utf-8")
trg_file = open(trg_path, "w", encoding="utf-8")
for line in src_file.readlines():
line = line.strip()
words = line.split()
spaces = [True for _ in range(len(words) - 1)] + [False]
doc = spacy.tokens.doc.Doc(nlp.vocab, words=words, spaces=spaces)
for name, proc in nlp.pipeline:
doc = proc(doc)
pos_tags = []
for token in doc:
pos_tags.append(str(token.pos_))
pos_tags_line = " ".join(pos_tags)
trg_file.write(pos_tags_line + "\n")
src_file.close()
trg_file.close()
print("compute pos tag for tokenized file")
compute_pos_tag_for_tokenized_file(tokenized_dev_path, os.path.join(tokenized_base_path, f"dev.{trg_lang}.pos"))
compute_pos_tag_for_tokenized_file(tokenized_test_path, os.path.join(tokenized_base_path, f"test.{trg_lang}.pos"))
def assign_pos_tag_for_bpe(src_path: str, bpe_path: str, trg_path: str) -> None:
src_file = open(src_path, "r", encoding="utf-8")
bpe_file = open(bpe_path, "r", encoding="utf-8")
trg_file = open(trg_path, "w", encoding="utf-8")
for src_pos_line, bpe_line in zip(src_file.readlines(), bpe_file.readlines()):
src_pos_line = src_pos_line.strip().split()
bpe_line = bpe_line.strip().split()
trg_pos_tags = []
p = 0
for subword in bpe_line:
trg_pos_tags.append(src_pos_line[p])
if not subword.endswith("@@"):
p += 1
assert p == len(src_pos_line)
assert len(bpe_line) == len(trg_pos_tags)
trg_pos_tags_line = " ".join(trg_pos_tags)
trg_file.write(trg_pos_tags_line + "\n")
src_file.close()
bpe_file.close()
trg_file.close()
print("assign pos tag for bpe")
assign_pos_tag_for_bpe(os.path.join(tokenized_base_path, f"dev.{trg_lang}.pos"), os.path.join(bpe_base_path, f"dev.bpe.32k.{trg_lang}"),
os.path.join(bpe_base_path, f"dev.bpe.32k.{trg_lang}.pos"))
assign_pos_tag_for_bpe(os.path.join(tokenized_base_path, f"test.{trg_lang}.pos"), os.path.join(bpe_base_path, f"test.bpe.32k.{trg_lang}"),
os.path.join(bpe_base_path, f"test.bpe.32k.{trg_lang}.pos"))
token_file = open("token_map", "r", encoding="utf-8")
token_map = [int(x.strip()) for x in token_file.readlines()]
token_file.close()
counter = Counter(token_map)
vocab_file = open(config["data"]["trg_vocab"], "r", encoding="utf-8")
vocab_size = len(vocab_file.readlines()) + 4
vocab_file.close()
frequency = [counter[i] if i in counter else 0 for i in range(vocab_size)]
frequency_file = open(os.path.join(base_path, "token_frequency.txt"), "w", encoding="utf-8")
for c in frequency:
frequency_file.write(str(c) + "\n")
frequency_file.close()
config["data"]["dev"] = os.path.join(bpe_base_path, "dev.bpe.32k")
config["data"]["test"] = os.path.join(bpe_base_path, "test.bpe.32k")
with open("analysis_" + config_path, "w", encoding="utf-8") as f:
yaml.safe_dump(config, f) | 6,629 | 41.774194 | 168 | py |
KSTER | KSTER-main/scripts/average_checkpoints.py | #!/usr/bin/env python3
# coding: utf-8
"""
Checkpoint averaging
Mainly follows:
https://github.com/pytorch/fairseq/blob/master/scripts/average_checkpoints.py
"""
import argparse
import collections
import torch
from typing import List
def average_checkpoints(inputs: List[str]) -> dict:
"""Loads checkpoints from inputs and returns a model with averaged weights.
Args:
inputs: An iterable of string paths of checkpoints to load from.
Returns:
A dict of string keys mapping to various values. The 'model' key
from the returned dict should correspond to an OrderedDict mapping
string parameter names to torch Tensors.
"""
params_dict = collections.OrderedDict()
params_keys = None
new_state = None
num_models = len(inputs)
for f in inputs:
state = torch.load(
f,
map_location=(
lambda s, _: torch.serialization.default_restore_location(
s, 'cpu')
),
)
# Copies over the settings from the first checkpoint
if new_state is None:
new_state = state
# Averaging: only handle the network params.
model_params = state['model_state']
model_params_keys = list(model_params.keys())
if params_keys is None:
params_keys = model_params_keys
elif params_keys != model_params_keys:
raise KeyError(
'For checkpoint {}, expected list of params: {}, '
'but found: {}'.format(f, params_keys, model_params_keys)
)
for k in params_keys:
p = model_params[k]
if isinstance(p, torch.HalfTensor):
p = p.float()
if k not in params_dict:
params_dict[k] = p.clone()
# NOTE: clone() is needed in case of p is a shared parameter
else:
params_dict[k] += p
averaged_params = collections.OrderedDict()
# v should be a list of torch Tensor.
for k, v in params_dict.items():
averaged_params[k] = v
if averaged_params[k].is_floating_point():
averaged_params[k].div_(num_models)
else:
averaged_params[k] //= num_models
new_state['model_state'] = averaged_params
return new_state
def main():
parser = argparse.ArgumentParser(
description='Tool to average the params of input checkpoints to '
'produce a new checkpoint',
)
parser.add_argument('--inputs', required=True, nargs='+',
help='Input checkpoint file paths.')
parser.add_argument('--output', required=True, metavar='FILE',
help='Write the new checkpoint to this path.')
args = parser.parse_args()
print(args)
new_state = average_checkpoints(args.inputs)
torch.save(new_state, args.output)
print('Finished writing averaged checkpoint to {}.'.format(args.output))
if __name__ == '__main__':
main()
| 3,018 | 30.123711 | 79 | py |
KSTER | KSTER-main/scripts/plot_validations.py | # coding: utf-8
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import argparse
import numpy as np
def read_vfiles(vfiles):
"""
Parse validation report files
:param vfiles: list of files
:return:
"""
models = {}
for vfile in vfiles:
model_name = vfile.split("/")[-2] if "//" not in vfile \
else vfile.split("/")[-3]
with open(vfile, "r") as validf:
steps = {}
for line in validf:
entries = line.strip().split()
key = int(entries[1])
steps[key] = {}
for i in range(2, len(entries)-1, 2):
name = entries[i].strip(":")
value = float(entries[i+1])
steps[key][name] = value
models[model_name] = steps
return models
def plot_models(models, plot_values, output_path):
"""
Plot the learning curves for several models
:param models:
:param plot_values:
:param output_path:
:return:
"""
# models is a dict: name -> ckpt values
f, axes = plt.subplots(len(plot_values), len(models),
sharex='col', sharey='row',
figsize=(3*len(models), 3*len(plot_values)))
axes = np.array(axes).reshape((len(plot_values), len(models)))
for col, model_name in enumerate(models):
values = {}
# get arrays for plotting
for step in sorted(models[model_name]):
logged_values = models[model_name][step]
for plot_value in plot_values:
if plot_value not in logged_values:
continue
elif plot_value not in values:
values[plot_value] = [[], []]
values[plot_value][1].append(logged_values[plot_value])
values[plot_value][0].append(step)
for row, plot_value in enumerate(plot_values):
axes[row][col].plot(values[plot_value][0], values[plot_value][1])
axes[row][0].set_ylabel(plot_value)
axes[0][col].set_title(model_name)
plt.tight_layout()
if output_path.endswith(".pdf"):
pp = PdfPages(output_path)
pp.savefig(f)
pp.close()
else:
if not output_path.endswith(".png"):
output_path += ".png"
plt.savefig(output_path)
plt.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser("JoeyNMT Validation plotting.")
parser.add_argument("model_dirs", type=str, nargs="+",
help="Model directories.")
parser.add_argument("--plot_values", type=str, nargs="+", default=["bleu"],
help="Value(s) to plot. Default: bleu")
parser.add_argument("--output_path", type=str, default="plot.pdf",
help="Plot will be stored in this location.")
args = parser.parse_args()
vfiles = [m+"/validations.txt" for m in args.model_dirs]
models = read_vfiles(vfiles)
plot_models(models, args.plot_values, args.output_path)
| 3,130 | 32.308511 | 79 | py |
KSTER | KSTER-main/scripts/combiner_average_checkpoints_launcher.py | import os
import glob
import subprocess
import yaml
subfolder = os.listdir("models")[0]
folder = os.path.join("models", subfolder)
files = glob.glob("%s/[0-9]*.ckpt" % folder)
ids = sorted([int(f[len(folder)+1:-5]) for f in files])
config_path = glob.glob("*.yaml")[0]
config = yaml.safe_load(open(config_path, "r", encoding="utf-8"))
keep_last_ckpts = config["combiner_training"]["keep_last_ckpts"]
ids = ids[0:keep_last_ckpts]
inputs_str = " ".join(["%s/%d.ckpt" % (folder, id) for id in ids])
output_str = "%s/averaged.ckpt" % folder
subprocess.call("python3 scripts/average_checkpoints.py --inputs %s --output %s" % (inputs_str, output_str), shell=True)
subprocess.call("cp models/%s/best.ckpt ." % subfolder, shell=True)
subprocess.call("cp models/%s/averaged.ckpt ." % subfolder, shell=True) | 802 | 35.5 | 120 | py |
KSTER | KSTER-main/scripts/build_vocab.py | #!/usr/bin/env python3
import argparse
from collections import OrderedDict
import numpy as np
def build_vocab(train_paths, output_path):
"""
Builds the vocabulary.
Compatible with Nematus build_dict function, but does not
output frequencies and special symbols.
:param train_paths:
:param output_path:
:return:
"""
counter = OrderedDict()
# iterate over input paths
for path in train_paths:
with open(path, encoding="utf-8", mode="r") as f:
for line in f:
for token in line.strip('\r\n ').split(' '):
if token:
if token not in counter:
counter[token] = 0
counter[token] += 1
words = list(counter.keys())
freqs = list(counter.values())
sorted_idx = np.argsort(freqs)
sorted_words = [words[ii] for ii in sorted_idx[::-1]]
with open(output_path, mode='w', encoding='utf-8') as f:
for word in sorted_words:
f.write(word + "\n")
if __name__ == "__main__":
ap = argparse.ArgumentParser(
description="Builds a vocabulary from training file(s)."
""
"Can be used to build a joint vocabulary for weight tying."
"To do so, first apply BPE to both source and target "
"training files, and then build a vocabulary using"
"this script from their concatenation."
""
"If you provide multiple files then this program "
"will merge them before building a joint vocabulary."
"")
ap.add_argument("train_paths", type=str,
help="One or more input (training) file(s)", nargs="+")
ap.add_argument("--output_path", type=str,
help="Output path for the built vocabulary",
default="vocab.txt")
args = ap.parse_args()
build_vocab(args.train_paths, args.output_path)
| 2,034 | 31.301587 | 79 | py |
KSTER | KSTER-main/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
print(sys.path)
# -- Project information -----------------------------------------------------
project = 'Joey NMT'
copyright = '2018, Jasmijn Bastings and Julia Kreutzer'
author = 'Jasmijn Bastings and Julia Kreutzer'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1.2'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'JoeyNMTdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'JoeyNMT.tex', 'Joey NMT Documentation',
'Joost Bastings and Julia Kreutzer', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'joeynmt', 'Joey NMT Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'JoeyNMT', 'Joey NMT Documentation',
author, 'JoeyNMT', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| 5,375 | 28.377049 | 79 | py |
KSTER | KSTER-main/joeynmt/vocabulary.py | # coding: utf-8
"""
Vocabulary module
"""
from collections import defaultdict, Counter
from typing import List
import numpy as np
from torchtext.data import Dataset
from joeynmt.constants import UNK_TOKEN, DEFAULT_UNK_ID, \
EOS_TOKEN, BOS_TOKEN, PAD_TOKEN
class Vocabulary:
""" Vocabulary represents mapping between tokens and indices. """
def __init__(self, tokens: List[str] = None, file: str = None) -> None:
"""
Create vocabulary from list of tokens or file.
Special tokens are added if not already in file or list.
File format: token with index i is in line i.
:param tokens: list of tokens
:param file: file to load vocabulary from
"""
# don't rename stoi and itos since needed for torchtext
# warning: stoi grows with unknown tokens, don't use for saving or size
# special symbols
self.specials = [UNK_TOKEN, PAD_TOKEN, BOS_TOKEN, EOS_TOKEN]
self.stoi = defaultdict(DEFAULT_UNK_ID)
self.itos = []
if tokens is not None:
self._from_list(tokens)
elif file is not None:
self._from_file(file)
def _from_list(self, tokens: List[str] = None) -> None:
"""
Make vocabulary from list of tokens.
Tokens are assumed to be unique and pre-selected.
Special symbols are added if not in list.
:param tokens: list of tokens
"""
self.add_tokens(tokens=self.specials+tokens)
assert len(self.stoi) == len(self.itos)
def _from_file(self, file: str) -> None:
"""
Make vocabulary from contents of file.
File format: token with index i is in line i.
:param file: path to file where the vocabulary is loaded from
"""
tokens = []
with open(file, "r") as open_file:
for line in open_file:
tokens.append(line.strip("\n"))
self._from_list(tokens)
def __str__(self) -> str:
return self.stoi.__str__()
def to_file(self, file: str) -> None:
"""
Save the vocabulary to a file, by writing token with index i in line i.
:param file: path to file where the vocabulary is written
"""
with open(file, "w") as open_file:
for t in self.itos:
open_file.write("{}\n".format(t))
def add_tokens(self, tokens: List[str]) -> None:
"""
Add list of tokens to vocabulary
:param tokens: list of tokens to add to the vocabulary
"""
for t in tokens:
new_index = len(self.itos)
# add to vocab if not already there
if t not in self.itos:
self.itos.append(t)
self.stoi[t] = new_index
def is_unk(self, token: str) -> bool:
"""
Check whether a token is covered by the vocabulary
:param token:
:return: True if covered, False otherwise
"""
return self.stoi[token] == DEFAULT_UNK_ID()
def __len__(self) -> int:
return len(self.itos)
def array_to_sentence(self, array: np.array, cut_at_eos=True,
skip_pad=True) -> List[str]:
"""
Converts an array of IDs to a sentence, optionally cutting the result
off at the end-of-sequence token.
:param array: 1D array containing indices
:param cut_at_eos: cut the decoded sentences at the first <eos>
:param skip_pad: skip generated <pad> tokens
:return: list of strings (tokens)
"""
sentence = []
for i in array:
s = self.itos[i]
if cut_at_eos and s == EOS_TOKEN:
break
if skip_pad and s == PAD_TOKEN:
continue
sentence.append(s)
return sentence
def arrays_to_sentences(self, arrays: np.array, cut_at_eos=True,
skip_pad=True) -> List[List[str]]:
"""
Convert multiple arrays containing sequences of token IDs to their
sentences, optionally cutting them off at the end-of-sequence token.
:param arrays: 2D array containing indices
:param cut_at_eos: cut the decoded sentences at the first <eos>
:param skip_pad: skip generated <pad> tokens
:return: list of list of strings (tokens)
"""
sentences = []
for array in arrays:
sentences.append(
self.array_to_sentence(array=array, cut_at_eos=cut_at_eos,
skip_pad=skip_pad))
return sentences
def build_vocab(field: str, max_size: int, min_freq: int, dataset: Dataset,
vocab_file: str = None) -> Vocabulary:
"""
Builds vocabulary for a torchtext `field` from given`dataset` or
`vocab_file`.
:param field: attribute e.g. "src"
:param max_size: maximum size of vocabulary
:param min_freq: minimum frequency for an item to be included
:param dataset: dataset to load data for field from
:param vocab_file: file to store the vocabulary,
if not None, load vocabulary from here
:return: Vocabulary created from either `dataset` or `vocab_file`
"""
if vocab_file is not None:
# load it from file
vocab = Vocabulary(file=vocab_file)
else:
# create newly
def filter_min(counter: Counter, min_freq: int):
""" Filter counter by min frequency """
filtered_counter = Counter({t: c for t, c in counter.items()
if c >= min_freq})
return filtered_counter
def sort_and_cut(counter: Counter, limit: int):
""" Cut counter to most frequent,
sorted numerically and alphabetically"""
# sort by frequency, then alphabetically
tokens_and_frequencies = sorted(counter.items(),
key=lambda tup: tup[0])
tokens_and_frequencies.sort(key=lambda tup: tup[1], reverse=True)
vocab_tokens = [i[0] for i in tokens_and_frequencies[:limit]]
return vocab_tokens
tokens = []
for i in dataset.examples:
if field == "src":
tokens.extend(i.src)
elif field == "trg":
tokens.extend(i.trg)
counter = Counter(tokens)
if min_freq > -1:
counter = filter_min(counter, min_freq)
vocab_tokens = sort_and_cut(counter, max_size)
assert len(vocab_tokens) <= max_size
vocab = Vocabulary(tokens=vocab_tokens)
assert len(vocab) <= max_size + len(vocab.specials)
assert vocab.itos[DEFAULT_UNK_ID()] == UNK_TOKEN
# check for all except for UNK token whether they are OOVs
for s in vocab.specials[1:]:
assert not vocab.is_unk(s)
return vocab
| 6,887 | 33.09901 | 79 | py |
KSTER | KSTER-main/joeynmt/__main__.py | import argparse
from joeynmt.training import train
from joeynmt.combiner_training import combiner_train
from joeynmt.prediction import test
from joeynmt.prediction import translate
from joeynmt.prediction import analyze
def main():
ap = argparse.ArgumentParser("KSTER")
ap.add_argument("mode", choices=["train", "combiner_train", "test", "build_database", "analyze", "translate", "score_translations"],
help="train a model or test or translate")
ap.add_argument("config_path", type=str,
help="path to YAML config file")
ap.add_argument("--ckpt", type=str,
help="checkpoint for prediction")
ap.add_argument("--combiner", type=str, default="no_combiner",
choices=["no_combiner", "static_combiner", "dynamic_combiner"],
help="specify combiner type")
ap.add_argument("--combiner_path", type=str, default=None,
help="used when combiner is dynamic_combiner")
ap.add_argument("--top_k", type=int, default=None,
help="knn search size")
ap.add_argument("--mixing_weight", type=float, default=None,
help="the weight of example-based distribution")
ap.add_argument("--kernel", type=str, default=None, choices=["gaussian", "laplacian"],
help="used to compute similarity of query and retrieved examples based on distances")
ap.add_argument("--bandwidth", type=float, default=None,
help="bandwidth in gaussian kernel or laplacian kernel")
ap.add_argument("--index_path", type=str, default=None,
help="path of database index file")
ap.add_argument("--token_map_path", type=str, default=None,
help="path of database token_map file")
ap.add_argument("--embedding_path", type=str, default=None,
help="path of database embeddings file, used when mode == build_database or combiner == dynamic_combiner")
ap.add_argument("--in_memory", type=str, default="True", choices=["True", "False"],
help="whether load embeddings file to memory, used when combiner == dynamic_combiner")
ap.add_argument("--output_path", type=str,
help="path for saving translation output")
ap.add_argument("--save_attention", action="store_true",
help="save attention visualizations")
ap.add_argument("--division", type=str, default="train", choices=["train", "dev", "test"],
help="part of dataset, used when mode == build_database")
args = ap.parse_args()
combiner_cfg = {
"type": args.combiner,
"top_k": args.top_k,
"mixing_weight": args.mixing_weight,
"kernel": args.kernel,
"bandwidth": args.bandwidth,
"combiner_path": args.combiner_path,
"index_path": args.index_path,
"token_map_path": args.token_map_path,
"embedding_path": args.embedding_path,
"in_memory": args.in_memory == "True"
}
if args.mode == "train":
train(cfg_file=args.config_path)
elif args.mode == "combiner_train":
combiner_train(cfg_file=args.config_path, ckpt=args.ckpt, combiner_cfg=combiner_cfg)
elif args.mode == "test":
test(cfg_file=args.config_path, ckpt=args.ckpt, combiner_cfg=combiner_cfg,
output_path=args.output_path, save_attention=args.save_attention)
elif args.mode == "build_database":
from joeynmt.build_database import build_database
build_database(cfg_file=args.config_path, ckpt=args.ckpt, division=args.division,
index_path=args.index_path, embedding_path=args.embedding_path, token_map_path=args.token_map_path)
elif args.mode == "analyze":
analyze(cfg_file=args.config_path, ckpt=args.ckpt, combiner_cfg=combiner_cfg,
output_path=args.output_path)
elif args.mode == "score_translations":
from joeynmt.prediction import score_translations
score_translations(cfg_file=args.config_path, ckpt=args.ckpt, combiner_cfg=combiner_cfg,
output_path=args.output_path)
elif args.mode == "translate":
translate(cfg_file=args.config_path, ckpt=args.ckpt,
output_path=args.output_path)
else:
raise ValueError("Unknown mode")
if __name__ == "__main__":
main()
| 4,404 | 41.76699 | 136 | py |
KSTER | KSTER-main/joeynmt/build_database.py | import torch
import numpy as np
import logging
from hashlib import md5
from joeynmt.prediction import parse_test_args
from joeynmt.helpers import load_config, load_checkpoint, get_latest_checkpoint
from joeynmt.data import load_data, Dataset, make_data_iter
from joeynmt.model import build_model, _DataParallel, Model
from joeynmt.batch import Batch
from joeynmt.constants import PAD_TOKEN, BOS_TOKEN, EOS_TOKEN
from joeynmt.faiss_index import FaissIndex
from npy_append_array import NpyAppendArray
stream_handler = logging.StreamHandler()
formatter = logging.Formatter(
"%(asctime)s %(filename)s:%(lineno)d:\n[%(levelname)s]:%(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
stream_handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.addHandler(stream_handler)
logger.setLevel(logging.INFO)
logger.propagate = False
def generate_id(sequence: str) -> str:
return md5(sequence.encode()).hexdigest()
# pylint: disable=too-many-arguments,too-many-locals,no-member
def store_examples(model: Model, embedding_path: str, token_map_path: str, data: Dataset, batch_size: int,
use_cuda: bool, level: str, n_gpu: int, batch_class: Batch, batch_type: str) \
-> None:
"""
Extract hidden states generated by trained model and sent them to kafka.
:param model: model module
:param data: dataset for validation
:param batch_size: validation batch size
:param batch_class: class type of batch
:param use_cuda: if True, use CUDA
:param level: segmentation level, one of "char", "bpe", "word"
:param eval_metric: evaluation metric, e.g. "bleu"
:param n_gpu: number of GPUs
"""
assert batch_size >= n_gpu, "batch_size must be bigger than n_gpu."
if batch_size > 1000 and batch_type == "sentence":
logger.warning(
"WARNING: Are you sure you meant to work on huge batches like "
"this? 'batch_size' is > 1000 for sentence-batching. "
"Consider decreasing it or switching to"
" 'eval_batch_type: token'.")
valid_iter = make_data_iter(
dataset=data, batch_size=batch_size, batch_type=batch_type,
shuffle=False, train=False)
pad_index = model.src_vocab.stoi[PAD_TOKEN]
# disable dropout
model.eval()
# don't track gradients during validation
total_doc = 0
sentence_count = 0
disp_step = 100
npaa = NpyAppendArray(embedding_path)
token_map_file = open(token_map_path, "w", encoding="utf-8")
id_st = set()
with torch.no_grad():
for step, valid_batch in enumerate(iter(valid_iter)):
# run as during training to get validation loss (e.g. xent)
batch = batch_class(valid_batch, pad_index, use_cuda=use_cuda)
trg_length = batch.trg_length.cpu().numpy().tolist()
trg = batch.trg.cpu().numpy()
batch_src_texts = model.src_vocab.arrays_to_sentences(arrays=batch.src, cut_at_eos=True)
batch_trg_texts = model.trg_vocab.arrays_to_sentences(arrays=batch.trg, cut_at_eos=False)
sentence_count += batch.nseqs
# sort batch now by src length and keep track of order
sort_reverse_index = batch.sort_by_src_length()
_, batch_hidden_states, _, _ = model._encode_decode(**vars(batch))
batch_hidden_states = batch_hidden_states[sort_reverse_index].cpu().numpy().astype(np.float16)
for i in range(len(batch_trg_texts)):
src_text = " ".join(batch_src_texts[i])
trg_tokens = batch_trg_texts[i][0: trg_length[i] - 1]
trg_token_ids = trg[i][0: trg_length[i] - 1].tolist()
hidden_states = batch_hidden_states[i][0: trg_length[i] - 1]
sequence = src_text + BOS_TOKEN
for token, token_id, embedding in zip(trg_tokens, trg_token_ids, hidden_states):
_id = generate_id(sequence)
if _id in id_st:
continue
else:
id_st.add(_id)
npaa.append(embedding[np.newaxis, :])
token_map_file.write(f"{token_id}\n")
sequence += token
total_doc += 1
if step % disp_step == 0 and step > 0:
logger.info(f"save {sentence_count} sentences with {total_doc} tokens")
del npaa
token_map_file.close()
logger.info(f"save {sentence_count} sentences with {total_doc} tokens")
def build_database(cfg_file: str, ckpt: str, division: str, index_path: str, embedding_path: str, token_map_path: str,
batch_class: Batch = Batch, datasets: dict = None) -> None:
"""
The function to store hidden states generated from trained transformer model.
Handles loading a model from checkpoint, generating hidden states by force decoding and storing them.
:param cfg_file: path to configuration file
:param ckpt: path to checkpoint to load
:param batch_class: class type of batch
:param output_path: path to output
:param datasets: datasets to predict
"""
logger.info("load config")
cfg = load_config(cfg_file)
model_dir = cfg["training"]["model_dir"]
assert division in ["train", "dev", "test"]
logger.info(division)
# when checkpoint is not specified, take latest (best) from model dir
if ckpt is None:
ckpt = get_latest_checkpoint(model_dir)
try:
step = ckpt.split(model_dir+"/")[1].split(".ckpt")[0]
except IndexError:
step = "best"
# load the data
logger.info("load data")
if datasets is None:
train_data, dev_data, test_data, src_vocab, trg_vocab = load_data(
data_cfg=cfg["data"], datasets=["train", "dev", "test"])
data_to_predict = {"train": train_data, "dev": dev_data, "test": test_data}
else: # avoid to load data again
data_to_predict = {"train": datasets["train"], "dev": datasets["dev"], "test": datasets["test"]}
src_vocab = datasets["src_vocab"]
trg_vocab = datasets["trg_vocab"]
# parse test args
batch_size, batch_type, use_cuda, device, n_gpu, level, eval_metric, \
max_output_length, beam_size, beam_alpha, postprocess, \
bpe_type, sacrebleu, decoding_description, tokenizer_info \
= parse_test_args(cfg, mode="test")
# load model state from disk
logger.info("load checkpoints")
model_checkpoint = load_checkpoint(ckpt, use_cuda=use_cuda)
# build model and load parameters into it
model = build_model(cfg["model"], src_vocab=src_vocab, trg_vocab=trg_vocab)
model.load_state_dict(model_checkpoint["model_state"])
if use_cuda:
model.to(device)
# multi-gpu eval
if n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = _DataParallel(model)
for data_set_name, data_set in data_to_predict.items():
if data_set is None:
continue
if not data_set_name == division:
continue
dataset_file = cfg["data"][data_set_name] + "." + cfg["data"]["trg"]
logger.info("Force decoding on %s set (%s)..." % (data_set_name, dataset_file))
logger.info("store examples")
store_examples(model, embedding_path=embedding_path, token_map_path=token_map_path, data=data_set,
batch_size=batch_size, batch_class=batch_class, batch_type=batch_type, level=level,
use_cuda=use_cuda, n_gpu=n_gpu)
logger.info("train index")
index = FaissIndex()
index.train(embedding_path)
index.add(embedding_path)
index.export(index_path)
del index | 7,739 | 39.52356 | 119 | py |
KSTER | KSTER-main/joeynmt/prediction.py | # coding: utf-8
"""
This modules holds methods for generating predictions from a model.
"""
import os
import sys
from typing import List, Optional
import logging
import numpy as np
import json
import torch
from torchtext.data import Dataset, Field
from joeynmt.helpers import bpe_postprocess, check_combiner_cfg, load_config, make_logger,\
get_latest_checkpoint, load_checkpoint, store_attention_plots,\
get_sacrebleu_description
from joeynmt.metrics import bleu, chrf, token_accuracy, sequence_accuracy
from joeynmt.model import build_model, Model, _DataParallel
from joeynmt.search import run_batch
from joeynmt.batch import Batch
from joeynmt.data import load_data, make_data_iter, MonoDataset
from joeynmt.constants import UNK_TOKEN, PAD_TOKEN, EOS_TOKEN
from joeynmt.vocabulary import Vocabulary
from joeynmt.combiners import build_combiner, NoCombiner
logger = logging.getLogger(__name__)
# pylint: disable=too-many-arguments,too-many-locals,no-member
def validate_on_data(model: Model, data: Dataset,
batch_size: int,
use_cuda: bool, max_output_length: int,
level: str, eval_metric: Optional[str],
n_gpu: int,
batch_class: Batch = Batch,
compute_loss: bool = False,
beam_size: int = 1, beam_alpha: int = -1,
batch_type: str = "sentence",
postprocess: bool = True,
bpe_type: str = "subword-nmt",
sacrebleu: dict = None) \
-> (float, float, float, List[str], List[List[str]], List[str],
List[str], List[List[str]], List[np.array]):
"""
Generate translations for the given data.
If `compute_loss` is True and references are given,
also compute the loss.
:param model: model module
:param data: dataset for validation
:param batch_size: validation batch size
:param batch_class: class type of batch
:param use_cuda: if True, use CUDA
:param max_output_length: maximum length for generated hypotheses
:param level: segmentation level, one of "char", "bpe", "word"
:param eval_metric: evaluation metric, e.g. "bleu"
:param n_gpu: number of GPUs
:param compute_loss: whether to computes a scalar loss
for given inputs and targets
:param beam_size: beam size for validation.
If <2 then greedy decoding (default).
:param beam_alpha: beam search alpha for length penalty,
disabled if set to -1 (default).
:param batch_type: validation batch type (sentence or token)
:param postprocess: if True, remove BPE segmentation from translations
:param bpe_type: bpe type, one of {"subword-nmt", "sentencepiece"}
:param sacrebleu: sacrebleu options
:return:
- current_valid_score: current validation score [eval_metric],
- valid_loss: validation loss,
- valid_ppl:, validation perplexity,
- valid_sources: validation sources,
- valid_sources_raw: raw validation sources (before post-processing),
- valid_references: validation references,
- valid_hypotheses: validation_hypotheses,
- decoded_valid: raw validation hypotheses (before post-processing),
- valid_attention_scores: attention scores for validation hypotheses
"""
assert batch_size >= n_gpu, "batch_size must be bigger than n_gpu."
if sacrebleu is None: # assign default value
sacrebleu = {"remove_whitespace": True, "tokenize": "13a", "use_detokenization": False}
if batch_size > 1000 and batch_type == "sentence":
logger.warning(
"WARNING: Are you sure you meant to work on huge batches like "
"this? 'batch_size' is > 1000 for sentence-batching. "
"Consider decreasing it or switching to"
" 'eval_batch_type: token'.")
valid_iter = make_data_iter(
dataset=data, batch_size=batch_size, batch_type=batch_type,
shuffle=False, train=False)
valid_sources_raw = data.src
pad_index = model.src_vocab.stoi[PAD_TOKEN]
# disable dropout
model.eval()
# check combiner
if not hasattr(model, "combiner"):
model.combiner = NoCombiner()
# don't track gradients during validation
with torch.no_grad():
all_outputs = []
valid_attention_scores = []
total_loss = 0
total_ntokens = 0
total_nseqs = 0
for valid_batch in iter(valid_iter):
# run as during training to get validation loss (e.g. xent)
batch = batch_class(valid_batch, pad_index, use_cuda=use_cuda)
# sort batch now by src length and keep track of order
sort_reverse_index = batch.sort_by_src_length()
# run as during training with teacher forcing
if compute_loss and batch.trg is not None:
batch_loss, _, _, _ = model(return_type="combiner_loss", **vars(batch))
if n_gpu > 1:
batch_loss = batch_loss.mean() # average on multi-gpu
total_loss += batch_loss
total_ntokens += batch.ntokens
total_nseqs += batch.nseqs
# run as during inference to produce translations
output, attention_scores = run_batch(
model=model, batch=batch, beam_size=beam_size,
beam_alpha=beam_alpha, max_output_length=max_output_length)
# sort outputs back to original order
all_outputs.extend(output[sort_reverse_index])
valid_attention_scores.extend(
attention_scores[sort_reverse_index]
if attention_scores is not None else [])
assert len(all_outputs) == len(data)
if compute_loss and total_ntokens > 0:
# total validation loss
valid_loss = total_loss
# exponent of token-level negative log prob
valid_ppl = torch.exp(total_loss / total_ntokens)
else:
valid_loss = -1
valid_ppl = -1
# decode back to symbols
decoded_valid = model.trg_vocab.arrays_to_sentences(arrays=all_outputs,
cut_at_eos=True)
# evaluate with metric on full dataset
join_char = " " if level in ["word", "bpe"] else ""
valid_sources = [join_char.join(s) for s in data.src]
valid_references = [join_char.join(t) for t in data.trg]
valid_hypotheses = [join_char.join(t) for t in decoded_valid]
# post-process
if level == "bpe" and postprocess:
valid_sources = [bpe_postprocess(s, bpe_type=bpe_type)
for s in valid_sources]
valid_references = [bpe_postprocess(v, bpe_type=bpe_type)
for v in valid_references]
valid_hypotheses = [bpe_postprocess(v, bpe_type=bpe_type)
for v in valid_hypotheses]
if sacrebleu["use_detokenization"]:
batch_src_detokenize = sacrebleu["batch_src_detokenize"]
batch_trg_detokenize = sacrebleu["batch_trg_detokenize"]
valid_sources = batch_src_detokenize(valid_sources)
valid_references = batch_trg_detokenize(valid_references)
valid_hypotheses = batch_trg_detokenize(valid_hypotheses)
# if references are given, evaluate against them
if valid_references:
assert len(valid_hypotheses) == len(valid_references)
current_valid_score = 0
if eval_metric.lower() == 'bleu':
# this version does not use any tokenization
current_valid_score = bleu(
valid_hypotheses, valid_references,
tokenize=sacrebleu["tokenize"])
elif eval_metric.lower() == 'chrf':
current_valid_score = chrf(valid_hypotheses, valid_references,
remove_whitespace=sacrebleu["remove_whitespace"])
elif eval_metric.lower() == 'token_accuracy':
current_valid_score = token_accuracy( # supply List[List[str]]
list(decoded_valid), list(data.trg))
elif eval_metric.lower() == 'sequence_accuracy':
current_valid_score = sequence_accuracy(
valid_hypotheses, valid_references)
else:
current_valid_score = -1
return current_valid_score, valid_loss, valid_ppl, valid_sources, \
valid_sources_raw, valid_references, valid_hypotheses, \
decoded_valid, valid_attention_scores
def parse_test_args(cfg, mode="test"):
"""
parse test args
:param cfg: config object
:param mode: 'test' or 'translate'
:return:
"""
if "test" not in cfg["data"].keys():
raise ValueError("Test data must be specified in config.")
batch_size = cfg["training"].get(
"eval_batch_size", cfg["training"].get("batch_size", 1))
batch_type = cfg["training"].get(
"eval_batch_type", cfg["training"].get("batch_type", "sentence"))
use_cuda = (cfg["training"].get("use_cuda", False)
and torch.cuda.is_available())
device = torch.device("cuda" if use_cuda else "cpu")
if mode == 'test':
n_gpu = torch.cuda.device_count() if use_cuda else 0
k = cfg["testing"].get("beam_size", 1)
batch_per_device = batch_size*k // n_gpu if n_gpu > 1 else batch_size*k
logger.info("Process device: %s, n_gpu: %d, "
"batch_size per device: %d (with beam_size)",
device, n_gpu, batch_per_device)
eval_metric = cfg["training"]["eval_metric"]
elif mode == 'translate':
# in multi-gpu, batch_size must be bigger than n_gpu!
n_gpu = 1 if use_cuda else 0
logger.debug("Process device: %s, n_gpu: %d", device, n_gpu)
eval_metric = ""
level = cfg["data"]["level"]
max_output_length = cfg["training"].get("max_output_length", None)
# whether to use beam search for decoding, 0: greedy decoding
if "testing" in cfg.keys():
beam_size = cfg["testing"].get("beam_size", 1)
beam_alpha = cfg["testing"].get("alpha", -1)
postprocess = cfg["testing"].get("postprocess", True)
bpe_type = cfg["testing"].get("bpe_type", "subword-nmt")
sacrebleu = get_sacrebleu_description(cfg)
else:
beam_size = 1
beam_alpha = -1
postprocess = True
bpe_type = "subword-nmt"
sacrebleu = {"remove_whitespace": True, "tokenize": "13a", "use_detokenization": False}
decoding_description = "Greedy decoding" if beam_size < 2 else \
"Beam search decoding with beam size = {} and alpha = {}". \
format(beam_size, beam_alpha)
tokenizer_info = f"[{sacrebleu['tokenize']}]" \
if eval_metric == "bleu" else ""
return batch_size, batch_type, use_cuda, device, n_gpu, level, \
eval_metric, max_output_length, beam_size, beam_alpha, \
postprocess, bpe_type, sacrebleu, decoding_description, \
tokenizer_info
# pylint: disable-msg=logging-too-many-args
def test(cfg_file,
ckpt: str,
combiner_cfg: dict,
batch_class: Batch = Batch,
output_path: str = None,
save_attention: bool = False,
datasets: dict = None) -> None:
"""
Main test function. Handles loading a model from checkpoint, generating
translations and storing them and attention plots.
:param cfg_file: path to configuration file
:param ckpt: path to checkpoint to load
:param batch_class: class type of batch
:param output_path: path to output
:param datasets: datasets to predict
:param save_attention: whether to save the computed attention weights
"""
cfg = load_config(cfg_file)
model_dir = cfg["training"]["model_dir"]
check_combiner_cfg(combiner_cfg)
cfg["combiner"] = combiner_cfg
if len(logger.handlers) == 0:
_ = make_logger(model_dir, mode="test") # version string returned
# when checkpoint is not specified, take latest (best) from model dir
if ckpt is None:
ckpt = get_latest_checkpoint(model_dir)
try:
step = ckpt.split(model_dir+"/")[1].split(".ckpt")[0]
except IndexError:
step = "best"
# load the data
if datasets is None:
_, dev_data, test_data, src_vocab, trg_vocab = load_data(
data_cfg=cfg["data"], datasets=["dev", "test"])
data_to_predict = {"dev": dev_data, "test": test_data}
else: # avoid to load data again
data_to_predict = {"dev": datasets["dev"], "test": datasets["test"]}
src_vocab = datasets["src_vocab"]
trg_vocab = datasets["trg_vocab"]
# parse test args
batch_size, batch_type, use_cuda, device, n_gpu, level, eval_metric, \
max_output_length, beam_size, beam_alpha, postprocess, \
bpe_type, sacrebleu, decoding_description, tokenizer_info \
= parse_test_args(cfg, mode="test")
# load model state from disk
model_checkpoint = load_checkpoint(ckpt, use_cuda=use_cuda)
# build model and load parameters into it
model = build_model(cfg["model"], src_vocab=src_vocab, trg_vocab=trg_vocab)
model.load_state_dict(model_checkpoint["model_state"])
combiner = build_combiner(cfg)
# load combiner from checkpoint for dynamic combiners
if combiner_cfg["type"]=="dynamic_combiner":
combiner_checkpoint = load_checkpoint(combiner_cfg["combiner_path"], use_cuda=use_cuda)
combiner.load_state_dict(combiner_checkpoint["model_state"])
model.combiner = combiner
if use_cuda:
model.to(device)
# multi-gpu eval
if n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = _DataParallel(model)
for data_set_name, data_set in data_to_predict.items():
if data_set is None:
continue
dataset_file = cfg["data"][data_set_name] + "." + cfg["data"]["trg"]
logger.info("Decoding on %s set (%s)...", data_set_name, dataset_file)
#pylint: disable=unused-variable
score, loss, ppl, sources, sources_raw, references, hypotheses, \
hypotheses_raw, attention_scores = validate_on_data(
model, data=data_set, batch_size=batch_size,
batch_class=batch_class, batch_type=batch_type, level=level,
max_output_length=max_output_length, eval_metric=eval_metric,
use_cuda=use_cuda, compute_loss=False, beam_size=beam_size,
beam_alpha=beam_alpha, postprocess=postprocess,
bpe_type=bpe_type, sacrebleu=sacrebleu, n_gpu=n_gpu)
#pylint: enable=unused-variable
if "trg" in data_set.fields:
logger.info("%4s %s%s: %6.2f [%s]",
data_set_name, eval_metric, tokenizer_info,
score, decoding_description)
else:
logger.info("No references given for %s -> no evaluation.",
data_set_name)
if save_attention:
if attention_scores:
attention_name = "{}.{}.att".format(data_set_name, step)
attention_path = os.path.join(model_dir, attention_name)
logger.info("Saving attention plots. This might take a while..")
store_attention_plots(attentions=attention_scores,
targets=hypotheses_raw,
sources=data_set.src,
indices=range(len(hypotheses)),
output_prefix=attention_path)
logger.info("Attention plots saved to: %s", attention_path)
else:
logger.warning("Attention scores could not be saved. "
"Note that attention scores are not available "
"when using beam search. "
"Set beam_size to 1 for greedy decoding.")
if output_path is not None:
output_path_set = "{}.{}".format(output_path, data_set_name)
with open(output_path_set, mode="w", encoding="utf-8") as out_file:
for hyp in hypotheses:
out_file.write(hyp + "\n")
logger.info("Translations saved to: %s", output_path_set)
# pylint: disable-msg=logging-too-many-args
def analyze(cfg_file,
ckpt: str,
combiner_cfg: dict,
output_path: str,
batch_class: Batch = Batch,
datasets: dict = None) -> None:
"""
Main test function. Handles loading a model from checkpoint, generating
translations and storing them and attention plots.
:param cfg_file: path to configuration file
:param ckpt: path to checkpoint to load
:param batch_class: class type of batch
:param output_path: path to output
:param datasets: datasets to predict
:param save_attention: whether to save the computed attention weights
"""
cfg = load_config(cfg_file)
model_dir = cfg["training"]["model_dir"]
check_combiner_cfg(combiner_cfg)
cfg["combiner"] = combiner_cfg
if len(logger.handlers) == 0:
_ = make_logger(model_dir, mode="test") # version string returned
# load the data
if datasets is None:
_, dev_data, test_data, src_vocab, trg_vocab = load_data(
data_cfg=cfg["data"], datasets=["dev", "test"])
data_to_predict = {"dev": dev_data, "test": test_data}
else: # avoid to load data again
data_to_predict = {"dev": datasets["dev"], "test": datasets["test"]}
src_vocab = datasets["src_vocab"]
trg_vocab = datasets["trg_vocab"]
# parse test args
batch_size, batch_type, use_cuda, device, n_gpu, level, eval_metric, \
max_output_length, beam_size, beam_alpha, postprocess, \
bpe_type, sacrebleu, decoding_description, tokenizer_info \
= parse_test_args(cfg, mode="test")
# load model state from disk
model_checkpoint = load_checkpoint(ckpt, use_cuda=use_cuda)
# build model and load parameters into it
model = build_model(cfg["model"], src_vocab=src_vocab, trg_vocab=trg_vocab)
model.load_state_dict(model_checkpoint["model_state"])
combiner = build_combiner(cfg)
# load combiner from checkpoint for dynamic combiners
if combiner_cfg["type"]=="dynamic_combiner":
combiner_checkpoint = load_checkpoint(combiner_cfg["combiner_path"], use_cuda=use_cuda)
combiner.load_state_dict(combiner_checkpoint["model_state"])
model.combiner = combiner
if use_cuda:
model.to(device)
# multi-gpu eval
if n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = _DataParallel(model)
for data_set_name, data_set in data_to_predict.items():
if data_set is None:
continue
dataset_file = cfg["data"][data_set_name] + "." + cfg["data"]["trg"]
logger.info("Force decoding on %s set (%s)...", data_set_name, dataset_file)
valid_iter = make_data_iter(
dataset=data_set, batch_size=batch_size, batch_type=batch_type,
shuffle=False, train=False)
valid_sources_raw = data_set.src
pad_index = model.src_vocab.stoi[PAD_TOKEN]
# disable dropout
model.eval()
# don't track gradients during validation
all_docs = []
with torch.no_grad():
for step, valid_batch in enumerate(iter(valid_iter)):
# run as during training to get validation loss (e.g. xent)
batch = batch_class(valid_batch, pad_index, use_cuda=use_cuda)
trg_length = batch.trg_length.cpu().numpy().tolist()
trg = batch.trg
batch_src_texts = model.src_vocab.arrays_to_sentences(arrays=batch.src, cut_at_eos=True)
batch_trg_texts = model.trg_vocab.arrays_to_sentences(arrays=batch.trg, cut_at_eos=False)
# sort batch now by src length and keep track of order
sort_reverse_index = batch.sort_by_src_length()
batch_logits, batch_hidden_states, _, _ = model._encode_decode(**vars(batch))
batch_logits = batch_logits[sort_reverse_index]
batch_hidden_states = batch_hidden_states[sort_reverse_index]
batch_mixed_distribution, batch_model_based_distribution, example_based_distribution, batch_mixing_weight, batch_bandwidth = model.combiner.detailed_forward(batch_hidden_states, batch_logits)
batch_preds, batch_model_preds, batch_example_preds = batch_mixed_distribution.argmax(dim=-1), batch_model_based_distribution.argmax(dim=-1), example_based_distribution.argmax(dim=-1)
batch_trg_probs = torch.gather(batch_mixed_distribution, -1, trg.unsqueeze(-1)).squeeze(-1)
batch_trg_model_based_distribution = torch.gather(batch_model_based_distribution, -1, trg.unsqueeze(-1)).squeeze(-1)
batch_trg_example_based_distribution = torch.gather(example_based_distribution, -1, trg.unsqueeze(-1)).squeeze(-1)
batch_preds, batch_model_preds, batch_example_preds = batch_preds.cpu().numpy(), batch_model_preds.cpu().numpy(), batch_example_preds.cpu().numpy()
batch_trg_probs, batch_trg_model_based_distribution, batch_trg_example_based_distribution = batch_trg_probs.cpu().numpy(), batch_trg_model_based_distribution.cpu().numpy(), batch_trg_example_based_distribution.cpu().numpy()
batch_mixing_weight, batch_bandwidth = batch_mixing_weight.cpu().numpy(), batch_bandwidth.cpu().numpy()
for i in range(len(batch_trg_texts)):
trg_len = trg_length[i] - 1
doc = {
"src": " ".join(batch_src_texts[i]),
"trg_tokens": batch_trg_texts[i][0: trg_len],
"preds": batch_preds[i][0: trg_len].tolist(),
"model_preds": batch_model_preds[i][0: trg_len].tolist(),
"example_preds": batch_example_preds[i][0: trg_len].tolist(),
"trg_probs": batch_trg_probs[i][0: trg_len].tolist(),
"trg_model_probs": batch_trg_model_based_distribution[i][0: trg_len].tolist(),
"trg_example_probs": batch_trg_example_based_distribution[i][0: trg_len].tolist(),
"mixing_weight": batch_mixing_weight[i][0: trg_len].tolist(),
"bandwidth": batch_bandwidth[i][0: trg_len].tolist()
}
all_docs.append(json.dumps(doc))
output_path_set = "{}.{}".format(output_path, data_set_name)
with open(output_path_set, mode="w", encoding="utf-8") as out_file:
for doc in all_docs:
out_file.write(doc + "\n")
logger.info("Analysis saved to: %s", output_path_set)
# pylint: disable-msg=logging-too-many-args
def score_translations(cfg_file,
ckpt: str,
combiner_cfg: dict,
output_path: str,
batch_class: Batch = Batch,
datasets: dict = None) -> None:
"""
Main test function. Handles loading a model from checkpoint, generating
translations and storing them and attention plots.
:param cfg_file: path to configuration file
:param ckpt: path to checkpoint to load
:param batch_class: class type of batch
:param output_path: path to output
:param datasets: datasets to predict
:param save_attention: whether to save the computed attention weights
"""
from joeynmt.loss import XentLoss
from joeynmt.constants import PAD_TOKEN
cfg = load_config(cfg_file)
model_dir = cfg["training"]["model_dir"]
check_combiner_cfg(combiner_cfg)
cfg["combiner"] = combiner_cfg
if len(logger.handlers) == 0:
_ = make_logger(model_dir, mode="test") # version string returned
# load the data
if datasets is None:
_, dev_data, test_data, src_vocab, trg_vocab = load_data(
data_cfg=cfg["data"], datasets=["dev", "test"])
data_to_predict = {"dev": dev_data, "test": test_data}
else: # avoid to load data again
data_to_predict = {"dev": datasets["dev"], "test": datasets["test"]}
src_vocab = datasets["src_vocab"]
trg_vocab = datasets["trg_vocab"]
# parse test args
batch_size, batch_type, use_cuda, device, n_gpu, level, eval_metric, \
max_output_length, beam_size, beam_alpha, postprocess, \
bpe_type, sacrebleu, decoding_description, tokenizer_info \
= parse_test_args(cfg, mode="test")
batch_size = 1
batch_type = "sentence"
# load model state from disk
model_checkpoint = load_checkpoint(ckpt, use_cuda=use_cuda)
# build model and load parameters into it
model = build_model(cfg["model"], src_vocab=src_vocab, trg_vocab=trg_vocab)
model.load_state_dict(model_checkpoint["model_state"])
combiner = build_combiner(cfg)
# load combiner from checkpoint for dynamic combiners
if combiner_cfg["type"]=="dynamic_combiner":
combiner_checkpoint = load_checkpoint(combiner_cfg["combiner_path"], use_cuda=use_cuda)
combiner.load_state_dict(combiner_checkpoint["model_state"])
model.combiner = combiner
loss_function = XentLoss(pad_index=model.pad_index, smoothing=0.1)
if use_cuda:
model.to(device)
# multi-gpu eval
if n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = _DataParallel(model)
for data_set_name, data_set in data_to_predict.items():
if data_set is None:
continue
if data_set_name == "dev":
continue
dataset_file = cfg["data"][data_set_name] + "." + cfg["data"]["trg"]
logger.info("Force decoding on %s set (%s)...", data_set_name, dataset_file)
valid_iter = make_data_iter(
dataset=data_set, batch_size=batch_size, batch_type=batch_type,
shuffle=False, train=False)
valid_sources_raw = data_set.src
pad_index = model.src_vocab.stoi[PAD_TOKEN]
# disable dropout
model.eval()
# don't track gradients during validation
losses = []
with torch.no_grad():
for step, valid_batch in enumerate(iter(valid_iter)):
# run as during training to get validation loss (e.g. xent)
batch = batch_class(valid_batch, pad_index, use_cuda=use_cuda)
trg_length = batch.trg_length.cpu().numpy().tolist()
trg = batch.trg
# sort batch now by src length and keep track of order
sort_reverse_index = batch.sort_by_src_length()
batch_logits, batch_hidden_states, _, _ = model._encode_decode(**vars(batch))
batch_logits = batch_logits[sort_reverse_index]
batch_hidden_states = batch_hidden_states[sort_reverse_index]
batch_log_probs = model.combiner(batch_hidden_states, batch_logits)
loss = loss_function(batch_log_probs, trg).item()
losses.append(loss)
if output_path != None:
with open(output_path, mode="w", encoding="utf-8") as out_file:
for loss in losses:
out_file.write(f"{loss}\n")
logger.info(f"Losses saved to: {output_path}")
def translate(cfg_file: str,
ckpt: str,
output_path: str = None,
batch_class: Batch = Batch) -> None:
"""
Interactive translation function.
Loads model from checkpoint and translates either the stdin input or
asks for input to translate interactively.
The input has to be pre-processed according to the data that the model
was trained on, i.e. tokenized or split into subwords.
Translations are printed to stdout.
:param cfg_file: path to configuration file
:param ckpt: path to checkpoint to load
:param output_path: path to output file
:param batch_class: class type of batch
"""
def _load_line_as_data(line):
""" Create a dataset from one line via a temporary file. """
# write src input to temporary file
tmp_name = "tmp"
tmp_suffix = ".src"
tmp_filename = tmp_name+tmp_suffix
with open(tmp_filename, "w") as tmp_file:
tmp_file.write("{}\n".format(line))
test_data = MonoDataset(path=tmp_name, ext=tmp_suffix,
field=src_field)
# remove temporary file
if os.path.exists(tmp_filename):
os.remove(tmp_filename)
return test_data
def _translate_data(test_data):
""" Translates given dataset, using parameters from outer scope. """
# pylint: disable=unused-variable
score, loss, ppl, sources, sources_raw, references, hypotheses, \
hypotheses_raw, attention_scores = validate_on_data(
model, data=test_data, batch_size=batch_size,
batch_class=batch_class, batch_type=batch_type, level=level,
max_output_length=max_output_length, eval_metric="",
use_cuda=use_cuda, compute_loss=False, beam_size=beam_size,
beam_alpha=beam_alpha, postprocess=postprocess,
bpe_type=bpe_type, sacrebleu=sacrebleu, n_gpu=n_gpu)
return hypotheses
cfg = load_config(cfg_file)
model_dir = cfg["training"]["model_dir"]
_ = make_logger(model_dir, mode="translate")
# version string returned
# when checkpoint is not specified, take oldest from model dir
if ckpt is None:
ckpt = get_latest_checkpoint(model_dir)
# read vocabs
src_vocab_file = cfg["data"].get("src_vocab", model_dir + "/src_vocab.txt")
trg_vocab_file = cfg["data"].get("trg_vocab", model_dir + "/trg_vocab.txt")
src_vocab = Vocabulary(file=src_vocab_file)
trg_vocab = Vocabulary(file=trg_vocab_file)
data_cfg = cfg["data"]
level = data_cfg["level"]
lowercase = data_cfg["lowercase"]
tok_fun = lambda s: list(s) if level == "char" else s.split()
src_field = Field(init_token=None, eos_token=EOS_TOKEN,
pad_token=PAD_TOKEN, tokenize=tok_fun,
batch_first=True, lower=lowercase,
unk_token=UNK_TOKEN,
include_lengths=True)
src_field.vocab = src_vocab
# parse test args
batch_size, batch_type, use_cuda, device, n_gpu, level, _, \
max_output_length, beam_size, beam_alpha, postprocess, \
bpe_type, sacrebleu, _, _ = parse_test_args(cfg, mode="translate")
# load model state from disk
model_checkpoint = load_checkpoint(ckpt, use_cuda=use_cuda)
# build model and load parameters into it
model = build_model(cfg["model"], src_vocab=src_vocab, trg_vocab=trg_vocab)
model.load_state_dict(model_checkpoint["model_state"])
if use_cuda:
model.to(device)
if not sys.stdin.isatty():
# input file given
test_data = MonoDataset(path=sys.stdin, ext="", field=src_field)
hypotheses = _translate_data(test_data)
if output_path is not None:
# write to outputfile if given
output_path_set = "{}".format(output_path)
with open(output_path_set, mode="w", encoding="utf-8") as out_file:
for hyp in hypotheses:
out_file.write(hyp + "\n")
logger.info("Translations saved to: %s.", output_path_set)
else:
# print to stdout
for hyp in hypotheses:
print(hyp)
else:
# enter interactive mode
batch_size = 1
batch_type = "sentence"
while True:
try:
src_input = input("\nPlease enter a source sentence "
"(pre-processed): \n")
if not src_input.strip():
break
# every line has to be made into dataset
test_data = _load_line_as_data(line=src_input)
hypotheses = _translate_data(test_data)
print("JoeyNMT: {}".format(hypotheses[0]))
except (KeyboardInterrupt, EOFError):
print("\nBye.")
break
| 32,482 | 41.406005 | 239 | py |
KSTER | KSTER-main/joeynmt/constants.py | # coding: utf-8
"""
Defining global constants
"""
UNK_TOKEN = '<unk>'
PAD_TOKEN = '<pad>'
BOS_TOKEN = '<s>'
EOS_TOKEN = '</s>'
DEFAULT_UNK_ID = lambda: 0
| 156 | 12.083333 | 26 | py |
KSTER | KSTER-main/joeynmt/plotting.py | #!/usr/bin/env python
from typing import List, Optional
import numpy as np
# pylint: disable=wrong-import-position
import matplotlib
matplotlib.use('Agg')
from matplotlib import rcParams
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
def plot_heatmap(scores: np.array, column_labels: List[str],
row_labels: List[str], output_path: Optional[str] = None,
dpi: int = 300) -> Figure:
"""
Plotting function that can be used to visualize (self-)attention.
Plots are saved if `output_path` is specified, in format that this file
ends with ('pdf' or 'png').
:param scores: attention scores
:param column_labels: labels for columns (e.g. target tokens)
:param row_labels: labels for rows (e.g. source tokens)
:param output_path: path to save to
:param dpi: set resolution for matplotlib
:return: pyplot figure
"""
if output_path is not None:
assert output_path.endswith(".png") or output_path.endswith(".pdf"), \
"output path must have .png or .pdf extension"
x_sent_len = len(column_labels)
y_sent_len = len(row_labels)
scores = scores[:y_sent_len, :x_sent_len]
# check that cut off part didn't have any attention
assert np.sum(scores[y_sent_len:, :x_sent_len]) == 0
# automatic label size
labelsize = 25 * (10 / max(x_sent_len, y_sent_len))
# font config
rcParams['xtick.labelsize'] = labelsize
rcParams['ytick.labelsize'] = labelsize
#rcParams['font.family'] = "sans-serif"
#rcParams['font.sans-serif'] = ["Fira Sans"]
#rcParams['font.weight'] = "regular"
fig, ax = plt.subplots(figsize=(10, 10), dpi=dpi)
plt.imshow(scores, cmap='viridis', aspect='equal',
origin='upper', vmin=0., vmax=1.)
ax.set_xticklabels(column_labels, minor=False, rotation="vertical")
ax.set_yticklabels(row_labels, minor=False)
ax.xaxis.tick_top()
ax.set_xticks(np.arange(scores.shape[1]) + 0, minor=False)
ax.set_yticks(np.arange(scores.shape[0]) + 0, minor=False)
plt.tight_layout()
if output_path is not None:
if output_path.endswith(".pdf"):
pp = PdfPages(output_path)
pp.savefig(fig)
pp.close()
else:
if not output_path.endswith(".png"):
output_path += ".png"
plt.savefig(output_path)
plt.close()
return fig
| 2,476 | 30.75641 | 78 | py |
KSTER | KSTER-main/joeynmt/batch.py | # coding: utf-8
"""
Implementation of a mini-batch.
"""
import torch
class Batch:
"""Object for holding a batch of data with mask during training.
Input is a batch from a torch text iterator.
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, torch_batch, pad_index, use_cuda=False):
"""
Create a new joey batch from a torch batch.
This batch extends torch text's batch attributes with src and trg
length, masks, number of non-padded tokens in trg.
Furthermore, it can be sorted by src length.
:param torch_batch:
:param pad_index:
:param use_cuda:
"""
self.src, self.src_length = torch_batch.src
self.src_mask = (self.src != pad_index).unsqueeze(1)
self.nseqs = self.src.size(0)
self.trg_input = None
self.trg = None
self.trg_mask = None
self.trg_length = None
self.ntokens = None
self.use_cuda = use_cuda
self.device = torch.device("cuda" if self.use_cuda else "cpu")
if hasattr(torch_batch, "trg"):
trg, trg_length = torch_batch.trg
# trg_input is used for teacher forcing, last one is cut off
self.trg_input = trg[:, :-1]
self.trg_length = trg_length
# trg is used for loss computation, shifted by one since BOS
self.trg = trg[:, 1:]
# we exclude the padded areas from the loss computation
self.trg_mask = (self.trg_input != pad_index).unsqueeze(1)
self.ntokens = (self.trg != pad_index).data.sum().item()
if self.use_cuda:
self._make_cuda()
def _make_cuda(self):
"""
Move the batch to GPU
:return:
"""
self.src = self.src.to(self.device)
self.src_mask = self.src_mask.to(self.device)
self.src_length = self.src_length.to(self.device)
if self.trg_input is not None:
self.trg_input = self.trg_input.to(self.device)
self.trg = self.trg.to(self.device)
self.trg_mask = self.trg_mask.to(self.device)
def sort_by_src_length(self):
"""
Sort by src length (descending) and return index to revert sort
:return:
"""
_, perm_index = self.src_length.sort(0, descending=True)
rev_index = [0]*perm_index.size(0)
for new_pos, old_pos in enumerate(perm_index.cpu().numpy()):
rev_index[old_pos] = new_pos
sorted_src_length = self.src_length[perm_index]
sorted_src = self.src[perm_index]
sorted_src_mask = self.src_mask[perm_index]
if self.trg_input is not None:
sorted_trg_input = self.trg_input[perm_index]
sorted_trg_length = self.trg_length[perm_index]
sorted_trg_mask = self.trg_mask[perm_index]
sorted_trg = self.trg[perm_index]
self.src = sorted_src
self.src_length = sorted_src_length
self.src_mask = sorted_src_mask
if self.trg_input is not None:
self.trg_input = sorted_trg_input
self.trg_mask = sorted_trg_mask
self.trg_length = sorted_trg_length
self.trg = sorted_trg
if self.use_cuda:
self._make_cuda()
return rev_index
| 3,327 | 32.616162 | 73 | py |
KSTER | KSTER-main/joeynmt/loss.py | # coding: utf-8
"""
Module to implement training loss
"""
import torch
from torch import nn, Tensor
from torch.autograd import Variable
class XentLoss(nn.Module):
"""
Cross-Entropy Loss with optional label smoothing
"""
def __init__(self, pad_index: int, smoothing: float = 0.0):
super().__init__()
self.smoothing = smoothing
self.pad_index = pad_index
if self.smoothing <= 0.0:
# standard xent loss
self.criterion = nn.NLLLoss(ignore_index=self.pad_index,
reduction='sum')
else:
# custom label-smoothed loss, computed with KL divergence loss
self.criterion = nn.KLDivLoss(reduction='sum')
def _smooth_targets(self, targets: Tensor, vocab_size: int):
"""
Smooth target distribution. All non-reference words get uniform
probability mass according to "smoothing".
:param targets: target indices, batch*seq_len
:param vocab_size: size of the output vocabulary
:return: smoothed target distributions, batch*seq_len x vocab_size
"""
# batch*seq_len x vocab_size
smooth_dist = targets.new_zeros((targets.size(0), vocab_size)).float()
# fill distribution uniformly with smoothing
smooth_dist.fill_(self.smoothing / (vocab_size - 2))
# assign true label the probability of 1-smoothing ("confidence")
smooth_dist.scatter_(1, targets.unsqueeze(1).data, 1.0-self.smoothing)
# give padding probability of 0 everywhere
smooth_dist[:, self.pad_index] = 0
# masking out padding area (sum of probabilities for padding area = 0)
padding_positions = torch.nonzero(targets.data == self.pad_index,
as_tuple=False)
# pylint: disable=len-as-condition
if len(padding_positions) > 0:
smooth_dist.index_fill_(0, padding_positions.squeeze(), 0.0)
return Variable(smooth_dist, requires_grad=False)
# pylint: disable=arguments-differ
def forward(self, log_probs, targets):
"""
Compute the cross-entropy between logits and targets.
If label smoothing is used, target distributions are not one-hot, but
"1-smoothing" for the correct target token and the rest of the
probability mass is uniformly spread across the other tokens.
:param log_probs: log probabilities as predicted by model
:param targets: target indices
:return:
"""
if self.smoothing > 0:
targets = self._smooth_targets(
targets=targets.contiguous().view(-1),
vocab_size=log_probs.size(-1))
# targets: distributions with batch*seq_len x vocab_size
assert log_probs.contiguous().view(-1, log_probs.size(-1)).shape \
== targets.shape
else:
# targets: indices with batch*seq_len
targets = targets.contiguous().view(-1)
loss = self.criterion(
log_probs.contiguous().view(-1, log_probs.size(-1)), targets)
return loss
| 3,120 | 38.506329 | 78 | py |
KSTER | KSTER-main/joeynmt/embeddings.py | # coding: utf-8
"""
Embedding module
"""
import io
import math
import logging
import torch
from torch import nn, Tensor
from joeynmt.helpers import freeze_params
from joeynmt.vocabulary import Vocabulary
logger = logging.getLogger(__name__)
class Embeddings(nn.Module):
"""
Simple embeddings class
"""
# pylint: disable=unused-argument
def __init__(self,
embedding_dim: int = 64,
scale: bool = False,
vocab_size: int = 0,
padding_idx: int = 1,
freeze: bool = False,
**kwargs):
"""
Create new embeddings for the vocabulary.
Use scaling for the Transformer.
:param embedding_dim:
:param scale:
:param vocab_size:
:param padding_idx:
:param freeze: freeze the embeddings during training
"""
super().__init__()
self.embedding_dim = embedding_dim
self.scale = scale
self.vocab_size = vocab_size
self.lut = nn.Embedding(vocab_size, self.embedding_dim,
padding_idx=padding_idx)
if freeze:
freeze_params(self)
# pylint: disable=arguments-differ
def forward(self, x: Tensor) -> Tensor:
"""
Perform lookup for input `x` in the embedding table.
:param x: index in the vocabulary
:return: embedded representation for `x`
"""
if self.scale:
return self.lut(x) * math.sqrt(self.embedding_dim)
return self.lut(x)
def __repr__(self):
return "%s(embedding_dim=%d, vocab_size=%d)" % (
self.__class__.__name__, self.embedding_dim, self.vocab_size)
#from fairseq
def load_from_file(self, embed_path: str, vocab: Vocabulary):
"""Load pretrained embedding weights from text file.
- First line is expected to contain vocabulary size and dimension.
The dimension has to match the model's specified embedding size,
the vocabulary size is used in logging only.
- Each line should contain word and embedding weights
separated by spaces.
- The pretrained vocabulary items that are not part of the
joeynmt's vocabulary will be ignored (not loaded from the file).
- The initialization (specified in config["model"]["embed_initializer"])
of joeynmt's vocabulary items that are not part of the
pretrained vocabulary will be kept (not overwritten in this func).
- This function should be called after initialization!
Example:
2 5
the -0.0230 -0.0264 0.0287 0.0171 0.1403
at -0.0395 -0.1286 0.0275 0.0254 -0.0932
:param embed_path: embedding weights text file
:param vocab: Vocabulary object
"""
embed_dict = {}
# parse file
with io.open(embed_path, 'r', encoding='utf-8',
errors='ignore') as f_embed:
vocab_size, d = map(int, f_embed.readline().split())
assert self.embedding_dim == d, \
"Embedding dimension doesn't match."
for line in f_embed.readlines():
tokens = line.rstrip().split(' ')
if tokens[0] in vocab.stoi.keys():
embed_dict[tokens[0]] = torch.FloatTensor(
[float(t) for t in tokens[1:]])
logger.warning("Loaded {} of {} ({:%}) tokens "
"in the pre-trained embeddings.".format(
len(embed_dict), vocab_size,
len(embed_dict)/vocab_size))
# assign
for idx in range(len(vocab)):
token = vocab.itos[idx]
if token in embed_dict:
assert self.embedding_dim == len(embed_dict[token])
self.lut.weight.data[idx] = embed_dict[token]
logger.warning("Loaded {} of {} ({:%}) tokens "
"of the JoeyNMT's vocabulary.".format(
len(embed_dict), len(vocab),
len(embed_dict)/len(vocab)))
| 4,157 | 33.363636 | 80 | py |
KSTER | KSTER-main/joeynmt/training.py | # coding: utf-8
"""
Training module
"""
import argparse
import time
import shutil
from typing import List
import logging
import os
import sys
import collections
import pathlib
import numpy as np
import torch
from torch import Tensor
from torch.utils.tensorboard import SummaryWriter
from torchtext.data import Dataset
from joeynmt.model import build_model
from joeynmt.batch import Batch
from joeynmt.helpers import log_data_info, load_config, log_cfg, \
store_attention_plots, load_checkpoint, make_model_dir, \
make_logger, set_seed, symlink_update, latest_checkpoint_update, \
ConfigurationError, get_sacrebleu_description
from joeynmt.model import Model, _DataParallel
from joeynmt.prediction import validate_on_data
from joeynmt.loss import XentLoss
from joeynmt.data import load_data, make_data_iter
from joeynmt.builders import build_optimizer, build_scheduler, \
build_gradient_clipper
from joeynmt.prediction import test
# for fp16 training
try:
from apex import amp
amp.register_half_function(torch, "einsum")
except ImportError as no_apex:
# error handling in TrainManager object construction
pass
logger = logging.getLogger(__name__)
# pylint: disable=too-many-instance-attributes
class TrainManager:
""" Manages training loop, validations, learning rate scheduling
and early stopping."""
def __init__(self, model: Model, config: dict,
batch_class: Batch = Batch) -> None:
"""
Creates a new TrainManager for a model, specified as in configuration.
:param model: torch module defining the model
:param config: dictionary containing the training configurations
:param batch_class: batch class to encapsulate the torch class
"""
train_config = config["training"]
self.batch_class = batch_class
# files for logging and storing
self.model_dir = train_config["model_dir"]
assert os.path.exists(self.model_dir)
self.logging_freq = train_config.get("logging_freq", 100)
self.valid_report_file = "{}/validations.txt".format(self.model_dir)
self.tb_writer = SummaryWriter(log_dir=self.model_dir + "/tensorboard/")
self.save_latest_checkpoint = train_config.get("save_latest_ckpt", True)
# model
self.model = model
self._log_parameters_list()
# objective
self.label_smoothing = train_config.get("label_smoothing", 0.0)
self.model.loss_function = XentLoss(pad_index=self.model.pad_index,
smoothing=self.label_smoothing)
self.normalization = train_config.get("normalization", "batch")
if self.normalization not in ["batch", "tokens", "none"]:
raise ConfigurationError("Invalid normalization option."
"Valid options: "
"'batch', 'tokens', 'none'.")
# optimization
self.learning_rate_min = train_config.get("learning_rate_min", 1.0e-8)
self.clip_grad_fun = build_gradient_clipper(config=train_config)
self.optimizer = build_optimizer(config=train_config,
parameters=model.parameters())
# validation & early stopping
self.validation_freq = train_config.get("validation_freq", 1000)
self.log_valid_sents = train_config.get("print_valid_sents", [0, 1, 2])
self.ckpt_queue = collections.deque(
maxlen=train_config.get("keep_last_ckpts", 5))
self.eval_metric = train_config.get("eval_metric", "bleu")
if self.eval_metric not in [
'bleu', 'chrf', 'token_accuracy', 'sequence_accuracy'
]:
raise ConfigurationError("Invalid setting for 'eval_metric', "
"valid options: 'bleu', 'chrf', "
"'token_accuracy', 'sequence_accuracy'.")
self.early_stopping_metric = train_config.get("early_stopping_metric",
"eval_metric")
# early_stopping_metric decides on how to find the early stopping point:
# ckpts are written when there's a new high/low score for this metric.
# If we schedule after BLEU/chrf/accuracy, we want to maximize the
# score, else we want to minimize it.
if self.early_stopping_metric in ["ppl", "loss"]:
self.minimize_metric = True
elif self.early_stopping_metric == "eval_metric":
if self.eval_metric in [
"bleu", "chrf", "token_accuracy", "sequence_accuracy"
]:
self.minimize_metric = False
# eval metric that has to get minimized (not yet implemented)
else:
self.minimize_metric = True
else:
raise ConfigurationError(
"Invalid setting for 'early_stopping_metric', "
"valid options: 'loss', 'ppl', 'eval_metric'.")
# eval options
test_config = config["testing"]
self.bpe_type = test_config.get("bpe_type", "subword-nmt")
self.sacrebleu = get_sacrebleu_description(config)
# learning rate scheduling
self.scheduler, self.scheduler_step_at = build_scheduler(
config=train_config,
scheduler_mode="min" if self.minimize_metric else "max",
optimizer=self.optimizer,
hidden_size=config["model"]["encoder"]["hidden_size"])
# data & batch handling
self.level = config["data"]["level"]
if self.level not in ["word", "bpe", "char"]:
raise ConfigurationError("Invalid segmentation level. "
"Valid options: 'word', 'bpe', 'char'.")
self.shuffle = train_config.get("shuffle", True)
self.epochs = train_config["epochs"]
self.batch_size = train_config["batch_size"]
# Placeholder so that we can use the train_iter in other functions.
self.train_iter = None
self.train_iter_state = None
# per-device batch_size = self.batch_size // self.n_gpu
self.batch_type = train_config.get("batch_type", "sentence")
self.eval_batch_size = train_config.get("eval_batch_size",
self.batch_size)
# per-device eval_batch_size = self.eval_batch_size // self.n_gpu
self.eval_batch_type = train_config.get("eval_batch_type",
self.batch_type)
self.batch_multiplier = train_config.get("batch_multiplier", 1)
# generation
self.max_output_length = train_config.get("max_output_length", None)
# CPU / GPU
self.use_cuda = train_config["use_cuda"] and torch.cuda.is_available()
self.n_gpu = torch.cuda.device_count() if self.use_cuda else 0
self.device = torch.device("cuda" if self.use_cuda else "cpu")
if self.use_cuda:
self.model.to(self.device)
# fp16
self.fp16 = train_config.get("fp16", False)
if self.fp16:
if 'apex' not in sys.modules:
raise ImportError("Please install apex from "
"https://www.github.com/nvidia/apex "
"to use fp16 training.") from no_apex
self.model, self.optimizer = amp.initialize(self.model,
self.optimizer,
opt_level='O1')
# opt level: one of {"O0", "O1", "O2", "O3"}
# see https://nvidia.github.io/apex/amp.html#opt-levels
# initialize training statistics
self.stats = self.TrainStatistics(
steps=0,
stop=False,
total_tokens=0,
best_ckpt_iter=0,
best_ckpt_score=np.inf if self.minimize_metric else -np.inf,
minimize_metric=self.minimize_metric,
max_steps=train_config["max_steps"])
# model parameters
if "load_model" in train_config.keys():
self.init_from_checkpoint(
train_config["load_model"],
reset_best_ckpt=train_config.get("reset_best_ckpt", False),
reset_scheduler=train_config.get("reset_scheduler", False),
reset_optimizer=train_config.get("reset_optimizer", False),
reset_iter_state=train_config.get("reset_iter_state", False))
# multi-gpu training (should be after apex fp16 initialization)
if self.n_gpu > 1:
self.model = _DataParallel(self.model)
def _save_checkpoint(self, new_best: bool = True) -> None:
"""
Save the model's current parameters and the training state to a
checkpoint.
The training state contains the total number of training steps,
the total number of training tokens,
the best checkpoint score and iteration so far,
and optimizer and scheduler states.
:param new_best: This boolean signals which symlink we will use for the
new checkpoint. If it is true, we update best.ckpt, else latest.ckpt.
"""
model_path = os.path.join(self.model_dir,
"{}.ckpt".format(self.stats.steps))
model_state_dict = self.model.module.state_dict() \
if isinstance(self.model, torch.nn.DataParallel) \
else self.model.state_dict()
state = {
"steps":
self.stats.steps,
"total_tokens":
self.stats.total_tokens,
"best_ckpt_score":
self.stats.best_ckpt_score,
"best_ckpt_iteration":
self.stats.best_ckpt_iter,
"model_state":
model_state_dict,
"optimizer_state":
self.optimizer.state_dict(),
"scheduler_state":
self.scheduler.state_dict() if self.scheduler is not None else None,
'amp_state':
amp.state_dict() if self.fp16 else None,
"train_iter_state":
self.train_iter.state_dict()
}
torch.save(state, model_path)
symlink_target = "{}.ckpt".format(self.stats.steps)
if new_best:
if len(self.ckpt_queue) == self.ckpt_queue.maxlen:
to_delete = self.ckpt_queue.popleft() # delete oldest ckpt
try:
os.remove(to_delete)
except FileNotFoundError:
logger.warning(
"Wanted to delete old checkpoint %s but "
"file does not exist.", to_delete)
self.ckpt_queue.append(model_path)
best_path = "{}/best.ckpt".format(self.model_dir)
try:
# create/modify symbolic link for best checkpoint
symlink_update(symlink_target, best_path)
except OSError:
# overwrite best.ckpt
torch.save(state, best_path)
if self.save_latest_checkpoint:
last_path = "{}/latest.ckpt".format(self.model_dir)
previous_path = latest_checkpoint_update(symlink_target, last_path)
# If the last ckpt is in the ckpt_queue, we don't want to delete it.
can_delete = True
for ckpt_path in self.ckpt_queue:
if pathlib.Path(ckpt_path).resolve() == previous_path:
can_delete = False
break
if can_delete and previous_path is not None:
os.remove(previous_path)
def init_from_checkpoint(self,
path: str,
reset_best_ckpt: bool = False,
reset_scheduler: bool = False,
reset_optimizer: bool = False,
reset_iter_state: bool = False) -> None:
"""
Initialize the trainer from a given checkpoint file.
This checkpoint file contains not only model parameters, but also
scheduler and optimizer states, see `self._save_checkpoint`.
:param path: path to checkpoint
:param reset_best_ckpt: reset tracking of the best checkpoint,
use for domain adaptation with a new dev
set or when using a new metric for fine-tuning.
:param reset_scheduler: reset the learning rate scheduler, and do not
use the one stored in the checkpoint.
:param reset_optimizer: reset the optimizer, and do not use the one
stored in the checkpoint.
:param reset_iter_state: reset the sampler's internal state and do not
use the one stored in the checkpoint.
"""
logger.info("Loading model from %s", path)
model_checkpoint = load_checkpoint(path=path, use_cuda=self.use_cuda)
# restore model and optimizer parameters
self.model.load_state_dict(model_checkpoint["model_state"])
if not reset_optimizer:
self.optimizer.load_state_dict(model_checkpoint["optimizer_state"])
else:
logger.info("Reset optimizer.")
if not reset_scheduler:
if model_checkpoint["scheduler_state"] is not None and \
self.scheduler is not None:
self.scheduler.load_state_dict(
model_checkpoint["scheduler_state"])
else:
logger.info("Reset scheduler.")
# restore counts
self.stats.steps = model_checkpoint["steps"]
self.stats.total_tokens = model_checkpoint["total_tokens"]
if not reset_best_ckpt:
self.stats.best_ckpt_score = model_checkpoint["best_ckpt_score"]
self.stats.best_ckpt_iter = model_checkpoint["best_ckpt_iteration"]
else:
logger.info("Reset tracking of the best checkpoint.")
if (not reset_iter_state
and model_checkpoint.get('train_iter_state', None) is not None):
self.train_iter_state = model_checkpoint["train_iter_state"]
# move parameters to cuda
if self.use_cuda:
self.model.to(self.device)
# fp16
if self.fp16 and model_checkpoint.get("amp_state", None) is not None:
amp.load_state_dict(model_checkpoint['amp_state'])
# pylint: disable=unnecessary-comprehension
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
def train_and_validate(self, train_data: Dataset, valid_data: Dataset) \
-> None:
"""
Train the model and validate it from time to time on the validation set.
:param train_data: training data
:param valid_data: validation data
"""
self.train_iter = make_data_iter(train_data,
batch_size=self.batch_size,
batch_type=self.batch_type,
train=True,
shuffle=self.shuffle)
if self.train_iter_state is not None:
self.train_iter.load_state_dict(self.train_iter_state)
#################################################################
# simplify accumulation logic:
#################################################################
# for epoch in range(epochs):
# self.model.zero_grad()
# epoch_loss = 0.0
# batch_loss = 0.0
# for i, batch in enumerate(iter(self.train_iter)):
#
# # gradient accumulation:
# # loss.backward() inside _train_step()
# batch_loss += self._train_step(inputs)
#
# if (i + 1) % self.batch_multiplier == 0:
# self.optimizer.step() # update!
# self.model.zero_grad() # reset gradients
# self.steps += 1 # increment counter
#
# epoch_loss += batch_loss # accumulate batch loss
# batch_loss = 0 # reset batch loss
#
# # leftovers are just ignored.
#################################################################
logger.info(
"Train stats:\n"
"\tdevice: %s\n"
"\tn_gpu: %d\n"
"\t16-bits training: %r\n"
"\tgradient accumulation: %d\n"
"\tbatch size per device: %d\n"
"\ttotal batch size (w. parallel & accumulation): %d", self.device,
self.n_gpu, self.fp16, self.batch_multiplier, self.batch_size //
self.n_gpu if self.n_gpu > 1 else self.batch_size,
self.batch_size * self.batch_multiplier)
for epoch_no in range(self.epochs):
logger.info("EPOCH %d", epoch_no + 1)
if self.scheduler is not None and self.scheduler_step_at == "epoch":
self.scheduler.step(epoch=epoch_no)
self.model.train()
# Reset statistics for each epoch.
start = time.time()
total_valid_duration = 0
start_tokens = self.stats.total_tokens
self.model.zero_grad()
epoch_loss = 0
batch_loss = 0
for i, batch in enumerate(iter(self.train_iter)):
# create a Batch object from torchtext batch
batch = self.batch_class(batch, self.model.pad_index,
use_cuda=self.use_cuda)
# get batch loss
batch_loss += self._train_step(batch)
# update!
if (i + 1) % self.batch_multiplier == 0:
# clip gradients (in-place)
if self.clip_grad_fun is not None:
if self.fp16:
self.clip_grad_fun(
params=amp.master_params(self.optimizer))
else:
self.clip_grad_fun(params=self.model.parameters())
# make gradient step
self.optimizer.step()
# decay lr
if self.scheduler is not None \
and self.scheduler_step_at == "step":
self.scheduler.step()
# reset gradients
self.model.zero_grad()
# increment step counter
self.stats.steps += 1
if self.stats.steps >= self.stats.max_steps:
self.stats.stop = True
# log learning progress
if self.stats.steps % self.logging_freq == 0:
self.tb_writer.add_scalar("train/train_batch_loss",
batch_loss, self.stats.steps)
elapsed = time.time() - start - total_valid_duration
elapsed_tokens = self.stats.total_tokens - start_tokens
logger.info(
"Epoch %3d, Step: %8d, Batch Loss: %12.6f, "
"Tokens per Sec: %8.0f, Lr: %.6f", epoch_no + 1,
self.stats.steps, batch_loss,
elapsed_tokens / elapsed,
self.optimizer.param_groups[0]["lr"])
start = time.time()
total_valid_duration = 0
start_tokens = self.stats.total_tokens
# Only add complete loss of full mini-batch to epoch_loss
epoch_loss += batch_loss # accumulate epoch_loss
batch_loss = 0 # rest batch_loss
# validate on the entire dev set
if self.stats.steps % self.validation_freq == 0:
valid_duration = self._validate(valid_data, epoch_no)
total_valid_duration += valid_duration
if self.stats.stop:
break
if self.stats.stop:
logger.info('Training ended since minimum lr %f was reached.',
self.learning_rate_min)
break
logger.info('Epoch %3d: total training loss %.2f', epoch_no + 1,
epoch_loss)
else:
logger.info('Training ended after %3d epochs.', epoch_no + 1)
logger.info('Best validation result (greedy) at step %8d: %6.2f %s.',
self.stats.best_ckpt_iter, self.stats.best_ckpt_score,
self.early_stopping_metric)
self.tb_writer.close() # close Tensorboard writer
def _train_step(self, batch: Batch) -> Tensor:
"""
Train the model on one batch: Compute the loss.
:param batch: training batch
:return: loss for batch (sum)
"""
# reactivate training
self.model.train()
# get loss
batch_loss, _, _, _ = self.model(return_type="loss", **vars(batch))
# sum multi-gpu losses
if self.n_gpu > 1:
batch_loss = batch_loss.sum()
# normalize batch loss
if self.normalization == "batch":
normalizer = batch.nseqs
elif self.normalization == "tokens":
normalizer = batch.ntokens
elif self.normalization == "none":
normalizer = 1
else:
raise NotImplementedError("Only normalize by 'batch' or 'tokens' "
"or summation of loss 'none' implemented")
norm_batch_loss = batch_loss / normalizer
if self.n_gpu > 1:
norm_batch_loss = norm_batch_loss / self.n_gpu
if self.batch_multiplier > 1:
norm_batch_loss = norm_batch_loss / self.batch_multiplier
# accumulate gradients
if self.fp16:
with amp.scale_loss(norm_batch_loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
norm_batch_loss.backward()
# increment token counter
self.stats.total_tokens += batch.ntokens
return norm_batch_loss.item()
def _validate(self, valid_data, epoch_no):
valid_start_time = time.time()
valid_score, valid_loss, valid_ppl, valid_sources, \
valid_sources_raw, valid_references, valid_hypotheses, \
valid_hypotheses_raw, valid_attention_scores = \
validate_on_data(
batch_size=self.eval_batch_size,
batch_class=self.batch_class,
data=valid_data,
eval_metric=self.eval_metric,
level=self.level, model=self.model,
use_cuda=self.use_cuda,
max_output_length=self.max_output_length,
compute_loss=True,
beam_size=1, # greedy validations
batch_type=self.eval_batch_type,
postprocess=True, # always remove BPE for validation
bpe_type=self.bpe_type, # "subword-nmt" or "sentencepiece"
sacrebleu=self.sacrebleu, # sacrebleu options
n_gpu=self.n_gpu
)
self.tb_writer.add_scalar("valid/valid_loss", valid_loss,
self.stats.steps)
self.tb_writer.add_scalar("valid/valid_score", valid_score,
self.stats.steps)
self.tb_writer.add_scalar("valid/valid_ppl", valid_ppl,
self.stats.steps)
if self.early_stopping_metric == "loss":
ckpt_score = valid_loss
elif self.early_stopping_metric in ["ppl", "perplexity"]:
ckpt_score = valid_ppl
else:
ckpt_score = valid_score
if self.scheduler is not None \
and self.scheduler_step_at == "validation":
self.scheduler.step(ckpt_score)
new_best = False
if self.stats.is_best(ckpt_score):
self.stats.best_ckpt_score = ckpt_score
self.stats.best_ckpt_iter = self.stats.steps
logger.info('Hooray! New best validation result [%s]!',
self.early_stopping_metric)
if self.ckpt_queue.maxlen > 0:
logger.info("Saving new checkpoint.")
new_best = True
self._save_checkpoint(new_best)
elif self.save_latest_checkpoint:
self._save_checkpoint(new_best)
# append to validation report
self._add_report(valid_score=valid_score,
valid_loss=valid_loss,
valid_ppl=valid_ppl,
eval_metric=self.eval_metric,
new_best=new_best)
self._log_examples(sources_raw=[v for v in valid_sources_raw],
sources=valid_sources,
hypotheses_raw=valid_hypotheses_raw,
hypotheses=valid_hypotheses,
references=valid_references)
valid_duration = time.time() - valid_start_time
logger.info(
'Validation result (greedy) at epoch %3d, '
'step %8d: %s: %6.2f, loss: %8.4f, ppl: %8.4f, '
'duration: %.4fs', epoch_no + 1, self.stats.steps, self.eval_metric,
valid_score, valid_loss, valid_ppl, valid_duration)
# store validation set outputs
self._store_outputs(valid_hypotheses)
# store attention plots for selected valid sentences
if valid_attention_scores:
store_attention_plots(attentions=valid_attention_scores,
targets=valid_hypotheses_raw,
sources=[s for s in valid_data.src],
indices=self.log_valid_sents,
output_prefix="{}/att.{}".format(
self.model_dir, self.stats.steps),
tb_writer=self.tb_writer,
steps=self.stats.steps)
return valid_duration
def _add_report(self,
valid_score: float,
valid_ppl: float,
valid_loss: float,
eval_metric: str,
new_best: bool = False) -> None:
"""
Append a one-line report to validation logging file.
:param valid_score: validation evaluation score [eval_metric]
:param valid_ppl: validation perplexity
:param valid_loss: validation loss (sum over whole validation set)
:param eval_metric: evaluation metric, e.g. "bleu"
:param new_best: whether this is a new best model
"""
current_lr = -1
# ignores other param groups for now
for param_group in self.optimizer.param_groups:
current_lr = param_group['lr']
if current_lr < self.learning_rate_min:
self.stats.stop = True
with open(self.valid_report_file, 'a') as opened_file:
opened_file.write(
"Steps: {}\tLoss: {:.5f}\tPPL: {:.5f}\t{}: {:.5f}\t"
"LR: {:.8f}\t{}\n".format(self.stats.steps, valid_loss,
valid_ppl, eval_metric, valid_score,
current_lr, "*" if new_best else ""))
def _log_parameters_list(self) -> None:
"""
Write all model parameters (name, shape) to the log.
"""
model_parameters = filter(lambda p: p.requires_grad,
self.model.parameters())
n_params = sum([np.prod(p.size()) for p in model_parameters])
logger.info("Total params: %d", n_params)
trainable_params = [
n for (n, p) in self.model.named_parameters() if p.requires_grad
]
logger.debug("Trainable parameters: %s", sorted(trainable_params))
assert trainable_params
def _log_examples(self,
sources: List[str],
hypotheses: List[str],
references: List[str],
sources_raw: List[List[str]] = None,
hypotheses_raw: List[List[str]] = None,
references_raw: List[List[str]] = None) -> None:
"""
Log a the first `self.log_valid_sents` sentences from given examples.
:param sources: decoded sources (list of strings)
:param hypotheses: decoded hypotheses (list of strings)
:param references: decoded references (list of strings)
:param sources_raw: raw sources (list of list of tokens)
:param hypotheses_raw: raw hypotheses (list of list of tokens)
:param references_raw: raw references (list of list of tokens)
"""
for p in self.log_valid_sents:
if p >= len(sources):
continue
logger.info("Example #%d", p)
if sources_raw is not None:
logger.debug("\tRaw source: %s", sources_raw[p])
if references_raw is not None:
logger.debug("\tRaw reference: %s", references_raw[p])
if hypotheses_raw is not None:
logger.debug("\tRaw hypothesis: %s", hypotheses_raw[p])
logger.info("\tSource: %s", sources[p])
logger.info("\tReference: %s", references[p])
logger.info("\tHypothesis: %s", hypotheses[p])
def _store_outputs(self, hypotheses: List[str]) -> None:
"""
Write current validation outputs to file in `self.model_dir.`
:param hypotheses: list of strings
"""
current_valid_output_file = "{}/{}.hyps".format(self.model_dir,
self.stats.steps)
with open(current_valid_output_file, 'w') as opened_file:
for hyp in hypotheses:
opened_file.write("{}\n".format(hyp))
class TrainStatistics:
def __init__(self,
steps: int = 0,
stop: bool = False,
total_tokens: int = 0,
best_ckpt_iter: int = 0,
best_ckpt_score: float = np.inf,
minimize_metric: bool = True,
max_steps: int = 200000) -> None:
# global update step counter
self.steps = steps
# stop training if this flag is True
# by reaching learning rate minimum
self.stop = stop
# number of total tokens seen so far
self.total_tokens = total_tokens
# store iteration point of best ckpt
self.best_ckpt_iter = best_ckpt_iter
# initial values for best scores
self.best_ckpt_score = best_ckpt_score
# minimize or maximize score
self.minimize_metric = minimize_metric
self.max_steps = max_steps
def is_best(self, score):
if self.minimize_metric:
is_best = score < self.best_ckpt_score
else:
is_best = score > self.best_ckpt_score
return is_best
def train(cfg_file: str) -> None:
"""
Main training function. After training, also test on test data if given.
:param cfg_file: path to configuration yaml file
"""
cfg = load_config(cfg_file)
# make logger
model_dir = make_model_dir(cfg["training"]["model_dir"],
overwrite=cfg["training"].get(
"overwrite", False))
_ = make_logger(model_dir, mode="train") # version string returned
# TODO: save version number in model checkpoints
# set the random seed
set_seed(seed=cfg["training"].get("random_seed", 42))
# load the data
train_data, dev_data, test_data, src_vocab, trg_vocab = load_data(
data_cfg=cfg["data"])
# build an encoder-decoder model
model = build_model(cfg["model"], src_vocab=src_vocab, trg_vocab=trg_vocab)
# for training management, e.g. early stopping and model selection
trainer = TrainManager(model=model, config=cfg)
# store copy of original training config in model dir
shutil.copy2(cfg_file, model_dir + "/config.yaml")
# log all entries of config
log_cfg(cfg)
log_data_info(train_data=train_data,
valid_data=dev_data,
test_data=test_data,
src_vocab=src_vocab,
trg_vocab=trg_vocab)
logger.info(str(model))
# store the vocabs
src_vocab_file = "{}/src_vocab.txt".format(cfg["training"]["model_dir"])
src_vocab.to_file(src_vocab_file)
trg_vocab_file = "{}/trg_vocab.txt".format(cfg["training"]["model_dir"])
trg_vocab.to_file(trg_vocab_file)
# train the model
trainer.train_and_validate(train_data=train_data, valid_data=dev_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser('Joey-NMT')
parser.add_argument("config",
default="configs/default.yaml",
type=str,
help="Training configuration file (yaml).")
args = parser.parse_args()
train(cfg_file=args.config) | 33,649 | 40.389914 | 80 | py |
KSTER | KSTER-main/joeynmt/model.py | # coding: utf-8
"""
Module to represents whole models
"""
from typing import Callable
import logging
import torch.nn as nn
from torch import Tensor
import torch.nn.functional as F
from joeynmt.initialization import initialize_model
from joeynmt.embeddings import Embeddings
from joeynmt.encoders import Encoder, RecurrentEncoder, TransformerEncoder
from joeynmt.decoders import Decoder, RecurrentDecoder, TransformerDecoder
from joeynmt.constants import PAD_TOKEN, EOS_TOKEN, BOS_TOKEN
from joeynmt.vocabulary import Vocabulary
from joeynmt.helpers import ConfigurationError
logger = logging.getLogger(__name__)
class Model(nn.Module):
"""
Base Model class
"""
def __init__(self,
encoder: Encoder,
decoder: Decoder,
src_embed: Embeddings,
trg_embed: Embeddings,
src_vocab: Vocabulary,
trg_vocab: Vocabulary) -> None:
"""
Create a new encoder-decoder model
:param encoder: encoder
:param decoder: decoder
:param src_embed: source embedding
:param trg_embed: target embedding
:param src_vocab: source vocabulary
:param trg_vocab: target vocabulary
"""
super().__init__()
self.src_embed = src_embed
self.trg_embed = trg_embed
self.encoder = encoder
self.decoder = decoder
self.src_vocab = src_vocab
self.trg_vocab = trg_vocab
self.bos_index = self.trg_vocab.stoi[BOS_TOKEN]
self.pad_index = self.trg_vocab.stoi[PAD_TOKEN]
self.eos_index = self.trg_vocab.stoi[EOS_TOKEN]
self._loss_function = None # set by the TrainManager
@property
def loss_function(self):
return self._x
@loss_function.setter
def loss_function(self, loss_function: Callable):
self._loss_function = loss_function
def forward(self, return_type: str = None, **kwargs) \
-> (Tensor, Tensor, Tensor, Tensor):
""" Interface for multi-gpu
For DataParallel, We need to encapsulate all model call: model.encode(),
model.decode(), and model.encode_decode() by model.__call__().
model.__call__() triggers model.forward() together with pre hooks
and post hooks, which take care of multi-gpu distribution.
:param return_type: one of {"loss", "encode", "decode"}
"""
if return_type is None:
raise ValueError("Please specify return_type: "
"{`loss`, `encode`, `decode`}.")
return_tuple = (None, None, None, None)
if return_type == "loss":
assert self.loss_function is not None
out, _, _, _ = self._encode_decode(**kwargs)
# compute log probs
log_probs = F.log_softmax(out, dim=-1)
# compute batch loss
batch_loss = self.loss_function(log_probs, kwargs["trg"])
# return batch loss
# = sum over all elements in batch that are not pad
return_tuple = (batch_loss, None, None, None)
elif return_type == "combiner_loss":
assert self.loss_function is not None
if not hasattr(self, "combiner"):
return self.forward(return_type="loss", **kwargs)
logits, hidden, _, _ = self._encode_decode(**kwargs)
# compute log probs
log_probs = self.combiner(hidden, logits)
# compute batch loss
batch_loss = self.loss_function(log_probs, kwargs["trg"])
# return batch loss
# = sum over all elements in batch that are not pad
return_tuple = (batch_loss, None, None, None)
elif return_type == "encode":
encoder_output, encoder_hidden = self._encode(**kwargs)
# return encoder outputs
return_tuple = (encoder_output, encoder_hidden, None, None)
elif return_type == "decode":
outputs, hidden, att_probs, att_vectors = self._decode(**kwargs)
# return decoder outputs
return_tuple = (outputs, hidden, att_probs, att_vectors)
return return_tuple
# pylint: disable=arguments-differ
def _encode_decode(self, src: Tensor, trg_input: Tensor, src_mask: Tensor,
src_length: Tensor, trg_mask: Tensor = None, **kwargs) \
-> (Tensor, Tensor, Tensor, Tensor):
"""
First encodes the source sentence.
Then produces the target one word at a time.
:param src: source input
:param trg_input: target input
:param src_mask: source mask
:param src_length: length of source inputs
:param trg_mask: target mask
:return: decoder outputs
"""
encoder_output, encoder_hidden = self._encode(src=src,
src_length=src_length,
src_mask=src_mask,
**kwargs)
unroll_steps = trg_input.size(1)
assert "decoder_hidden" not in kwargs.keys()
return self._decode(encoder_output=encoder_output,
encoder_hidden=encoder_hidden,
src_mask=src_mask, trg_input=trg_input,
unroll_steps=unroll_steps,
trg_mask=trg_mask, **kwargs)
def _encode(self, src: Tensor, src_length: Tensor, src_mask: Tensor,
**_kwargs) -> (Tensor, Tensor):
"""
Encodes the source sentence.
:param src:
:param src_length:
:param src_mask:
:return: encoder outputs (output, hidden_concat)
"""
return self.encoder(self.src_embed(src), src_length, src_mask,
**_kwargs)
def _decode(self, encoder_output: Tensor, encoder_hidden: Tensor,
src_mask: Tensor, trg_input: Tensor,
unroll_steps: int, decoder_hidden: Tensor = None,
att_vector: Tensor = None, trg_mask: Tensor = None, **_kwargs) \
-> (Tensor, Tensor, Tensor, Tensor):
"""
Decode, given an encoded source sentence.
:param encoder_output: encoder states for attention computation
:param encoder_hidden: last encoder state for decoder initialization
:param src_mask: source mask, 1 at valid tokens
:param trg_input: target inputs
:param unroll_steps: number of steps to unrol the decoder for
:param decoder_hidden: decoder hidden state (optional)
:param att_vector: previous attention vector (optional)
:param trg_mask: mask for target steps
:return: decoder outputs (outputs, hidden, att_probs, att_vectors)
"""
return self.decoder(trg_embed=self.trg_embed(trg_input),
encoder_output=encoder_output,
encoder_hidden=encoder_hidden,
src_mask=src_mask,
unroll_steps=unroll_steps,
hidden=decoder_hidden,
prev_att_vector=att_vector,
trg_mask=trg_mask,
**_kwargs)
def __repr__(self) -> str:
"""
String representation: a description of encoder, decoder and embeddings
:return: string representation
"""
return "%s(\n" \
"\tencoder=%s,\n" \
"\tdecoder=%s,\n" \
"\tsrc_embed=%s,\n" \
"\ttrg_embed=%s)" % (self.__class__.__name__, self.encoder,
self.decoder, self.src_embed,
self.trg_embed)
class _DataParallel(nn.DataParallel):
""" DataParallel wrapper to pass through the model attributes """
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.module, name)
def build_model(cfg: dict = None,
src_vocab: Vocabulary = None,
trg_vocab: Vocabulary = None) -> Model:
"""
Build and initialize the model according to the configuration.
:param cfg: dictionary configuration containing model specifications
:param src_vocab: source vocabulary
:param trg_vocab: target vocabulary
:return: built and initialized model
"""
logger.info("Building an encoder-decoder model...")
src_padding_idx = src_vocab.stoi[PAD_TOKEN]
trg_padding_idx = trg_vocab.stoi[PAD_TOKEN]
src_embed = Embeddings(
**cfg["encoder"]["embeddings"], vocab_size=len(src_vocab),
padding_idx=src_padding_idx)
# this ties source and target embeddings
# for softmax layer tying, see further below
if cfg.get("tied_embeddings", False):
if src_vocab.itos == trg_vocab.itos:
# share embeddings for src and trg
trg_embed = src_embed
else:
raise ConfigurationError(
"Embedding cannot be tied since vocabularies differ.")
else:
trg_embed = Embeddings(
**cfg["decoder"]["embeddings"], vocab_size=len(trg_vocab),
padding_idx=trg_padding_idx)
# build encoder
enc_dropout = cfg["encoder"].get("dropout", 0.)
enc_emb_dropout = cfg["encoder"]["embeddings"].get("dropout", enc_dropout)
if cfg["encoder"].get("type", "recurrent") == "transformer":
assert cfg["encoder"]["embeddings"]["embedding_dim"] == \
cfg["encoder"]["hidden_size"], \
"for transformer, emb_size must be hidden_size"
encoder = TransformerEncoder(**cfg["encoder"],
emb_size=src_embed.embedding_dim,
emb_dropout=enc_emb_dropout)
else:
encoder = RecurrentEncoder(**cfg["encoder"],
emb_size=src_embed.embedding_dim,
emb_dropout=enc_emb_dropout)
# build decoder
dec_dropout = cfg["decoder"].get("dropout", 0.)
dec_emb_dropout = cfg["decoder"]["embeddings"].get("dropout", dec_dropout)
if cfg["decoder"].get("type", "recurrent") == "transformer":
decoder = TransformerDecoder(
**cfg["decoder"], encoder=encoder, vocab_size=len(trg_vocab),
emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout)
else:
decoder = RecurrentDecoder(
**cfg["decoder"], encoder=encoder, vocab_size=len(trg_vocab),
emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout)
model = Model(encoder=encoder, decoder=decoder,
src_embed=src_embed, trg_embed=trg_embed,
src_vocab=src_vocab, trg_vocab=trg_vocab)
# tie softmax layer with trg embeddings
if cfg.get("tied_softmax", False):
if trg_embed.lut.weight.shape == \
model.decoder.output_layer.weight.shape:
# (also) share trg embeddings and softmax layer:
model.decoder.output_layer.weight = trg_embed.lut.weight
else:
raise ConfigurationError(
"For tied_softmax, the decoder embedding_dim and decoder "
"hidden_size must be the same."
"The decoder must be a Transformer.")
# custom initialization of model parameters
initialize_model(model, cfg, src_padding_idx, trg_padding_idx)
# initialize embeddings from file
pretrained_enc_embed_path = cfg["encoder"]["embeddings"].get(
"load_pretrained", None)
pretrained_dec_embed_path = cfg["decoder"]["embeddings"].get(
"load_pretrained", None)
if pretrained_enc_embed_path:
logger.info("Loading pretraind src embeddings...")
model.src_embed.load_from_file(pretrained_enc_embed_path, src_vocab)
if pretrained_dec_embed_path and not cfg.get("tied_embeddings", False):
logger.info("Loading pretraind trg embeddings...")
model.trg_embed.load_from_file(pretrained_dec_embed_path, trg_vocab)
logger.info("Enc-dec model built.")
return model
| 12,260 | 37.800633 | 80 | py |
KSTER | KSTER-main/joeynmt/data.py | # coding: utf-8
"""
Data module
"""
import sys
import random
import os
import os.path
from typing import Optional
import logging
from torchtext.datasets import TranslationDataset
from torchtext import data
from torchtext.data import Dataset, Iterator, Field
from joeynmt.constants import UNK_TOKEN, EOS_TOKEN, BOS_TOKEN, PAD_TOKEN
from joeynmt.vocabulary import build_vocab, Vocabulary
logger = logging.getLogger(__name__)
def load_data(data_cfg: dict, datasets: list = None)\
-> (Dataset, Dataset, Optional[Dataset], Vocabulary, Vocabulary):
"""
Load train, dev and optionally test data as specified in configuration.
Vocabularies are created from the training set with a limit of `voc_limit`
tokens and a minimum token frequency of `voc_min_freq`
(specified in the configuration dictionary).
The training data is filtered to include sentences up to `max_sent_length`
on source and target side.
If you set ``random_train_subset``, a random selection of this size is used
from the training set instead of the full training set.
:param data_cfg: configuration dictionary for data
("data" part of configuation file)
:param datasets: list of dataset names to load
:return:
- train_data: training dataset
- dev_data: development dataset
- test_data: testdata set if given, otherwise None
- src_vocab: source vocabulary extracted from training data
- trg_vocab: target vocabulary extracted from training data
"""
if datasets is None:
datasets = ["train", "dev", "test"]
# load data from files
src_lang = data_cfg["src"]
trg_lang = data_cfg["trg"]
train_path = data_cfg.get("train", None)
dev_path = data_cfg.get("dev", None)
test_path = data_cfg.get("test", None)
if train_path is None and dev_path is None and test_path is None:
raise ValueError('Please specify at least one data source path.')
level = data_cfg["level"]
lowercase = data_cfg["lowercase"]
max_sent_length = data_cfg["max_sent_length"]
tok_fun = lambda s: list(s) if level == "char" else s.split()
src_field = data.Field(init_token=None, eos_token=EOS_TOKEN,
pad_token=PAD_TOKEN, tokenize=tok_fun,
batch_first=True, lower=lowercase,
unk_token=UNK_TOKEN,
include_lengths=True)
trg_field = data.Field(init_token=BOS_TOKEN, eos_token=EOS_TOKEN,
pad_token=PAD_TOKEN, tokenize=tok_fun,
unk_token=UNK_TOKEN,
batch_first=True, lower=lowercase,
include_lengths=True)
train_data = None
if "train" in datasets and train_path is not None:
logger.info("Loading training data...")
train_data = TranslationDataset(path=train_path,
exts=("." + src_lang, "." + trg_lang),
fields=(src_field, trg_field),
filter_pred=
lambda x: len(vars(x)['src'])
<= max_sent_length
and len(vars(x)['trg'])
<= max_sent_length)
random_train_subset = data_cfg.get("random_train_subset", -1)
if random_train_subset > -1:
# select this many training examples randomly and discard the rest
keep_ratio = random_train_subset / len(train_data)
keep, _ = train_data.split(
split_ratio=[keep_ratio, 1 - keep_ratio],
random_state=random.getstate())
train_data = keep
src_max_size = data_cfg.get("src_voc_limit", sys.maxsize)
src_min_freq = data_cfg.get("src_voc_min_freq", 1)
trg_max_size = data_cfg.get("trg_voc_limit", sys.maxsize)
trg_min_freq = data_cfg.get("trg_voc_min_freq", 1)
src_vocab_file = data_cfg.get("src_vocab", None)
trg_vocab_file = data_cfg.get("trg_vocab", None)
assert (train_data is not None) or (src_vocab_file is not None)
assert (train_data is not None) or (trg_vocab_file is not None)
logger.info("Building vocabulary...")
src_vocab = build_vocab(field="src", min_freq=src_min_freq,
max_size=src_max_size,
dataset=train_data, vocab_file=src_vocab_file)
trg_vocab = build_vocab(field="trg", min_freq=trg_min_freq,
max_size=trg_max_size,
dataset=train_data, vocab_file=trg_vocab_file)
dev_data = None
if "dev" in datasets and dev_path is not None:
logger.info("Loading dev data...")
dev_data = TranslationDataset(path=dev_path,
exts=("." + src_lang, "." + trg_lang),
fields=(src_field, trg_field))
test_data = None
if "test" in datasets and test_path is not None:
logger.info("Loading test data...")
# check if target exists
if os.path.isfile(test_path + "." + trg_lang):
test_data = TranslationDataset(
path=test_path, exts=("." + src_lang, "." + trg_lang),
fields=(src_field, trg_field))
else:
# no target is given -> create dataset from src only
test_data = MonoDataset(path=test_path, ext="." + src_lang,
field=src_field)
src_field.vocab = src_vocab
trg_field.vocab = trg_vocab
logger.info("Data loaded.")
return train_data, dev_data, test_data, src_vocab, trg_vocab
# pylint: disable=global-at-module-level
global max_src_in_batch, max_tgt_in_batch
# pylint: disable=unused-argument,global-variable-undefined
def token_batch_size_fn(new, count, sofar):
"""Compute batch size based on number of tokens (+padding)."""
global max_src_in_batch, max_tgt_in_batch
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
max_src_in_batch = max(max_src_in_batch, len(new.src))
src_elements = count * max_src_in_batch
if hasattr(new, 'trg'): # for monolingual data sets ("translate" mode)
max_tgt_in_batch = max(max_tgt_in_batch, len(new.trg) + 2)
tgt_elements = count * max_tgt_in_batch
else:
tgt_elements = 0
return max(src_elements, tgt_elements)
def make_data_iter(dataset: Dataset,
batch_size: int,
batch_type: str = "sentence",
train: bool = False,
shuffle: bool = False) -> Iterator:
"""
Returns a torchtext iterator for a torchtext dataset.
:param dataset: torchtext dataset containing src and optionally trg
:param batch_size: size of the batches the iterator prepares
:param batch_type: measure batch size by sentence count or by token count
:param train: whether it's training time, when turned off,
bucketing, sorting within batches and shuffling is disabled
:param shuffle: whether to shuffle the data before each epoch
(no effect if set to True for testing)
:return: torchtext iterator
"""
batch_size_fn = token_batch_size_fn if batch_type == "token" else None
if train:
# optionally shuffle and sort during training
data_iter = data.BucketIterator(
repeat=False, sort=False, dataset=dataset,
batch_size=batch_size, batch_size_fn=batch_size_fn,
train=True, sort_within_batch=True,
sort_key=lambda x: len(x.src), shuffle=shuffle)
else:
# don't sort/shuffle for validation/inference
data_iter = data.BucketIterator(
repeat=False, dataset=dataset,
batch_size=batch_size, batch_size_fn=batch_size_fn,
train=False, sort=False)
return data_iter
class MonoDataset(Dataset):
"""Defines a dataset for machine translation without targets."""
@staticmethod
def sort_key(ex):
return len(ex.src)
def __init__(self, path: str, ext: str, field: Field, **kwargs) -> None:
"""
Create a monolingual dataset (=only sources) given path and field.
:param path: Prefix of path to the data file
:param ext: Containing the extension to path for this language.
:param field: Containing the fields that will be used for data.
:param kwargs: Passed to the constructor of data.Dataset.
"""
fields = [('src', field)]
if hasattr(path, "readline"): # special usage: stdin
src_file = path
else:
src_path = os.path.expanduser(path + ext)
src_file = open(src_path)
examples = []
for src_line in src_file:
src_line = src_line.strip()
if src_line != '':
examples.append(data.Example.fromlist(
[src_line], fields))
src_file.close()
super().__init__(examples, fields, **kwargs)
| 9,111 | 37.774468 | 79 | py |
KSTER | KSTER-main/joeynmt/transformer_layers.py | # -*- coding: utf-8 -*-
import math
import torch
import torch.nn as nn
from torch import Tensor
# pylint: disable=arguments-differ
class MultiHeadedAttention(nn.Module):
"""
Multi-Head Attention module from "Attention is All You Need"
Implementation modified from OpenNMT-py.
https://github.com/OpenNMT/OpenNMT-py
"""
def __init__(self, num_heads: int, size: int, dropout: float = 0.1):
"""
Create a multi-headed attention layer.
:param num_heads: the number of heads
:param size: model size (must be divisible by num_heads)
:param dropout: probability of dropping a unit
"""
super().__init__()
assert size % num_heads == 0
self.head_size = head_size = size // num_heads
self.model_size = size
self.num_heads = num_heads
self.k_layer = nn.Linear(size, num_heads * head_size)
self.v_layer = nn.Linear(size, num_heads * head_size)
self.q_layer = nn.Linear(size, num_heads * head_size)
self.output_layer = nn.Linear(size, size)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
def forward(self, k: Tensor, v: Tensor, q: Tensor, mask: Tensor = None):
"""
Computes multi-headed attention.
:param k: keys [B, M, D] with M being the sentence length.
:param v: values [B, M, D]
:param q: query [B, M, D]
:param mask: optional mask [B, 1, M]
:return:
"""
batch_size = k.size(0)
num_heads = self.num_heads
# project the queries (q), keys (k), and values (v)
k = self.k_layer(k)
v = self.v_layer(v)
q = self.q_layer(q)
# reshape q, k, v for our computation to [batch_size, num_heads, ..]
k = k.view(batch_size, -1, num_heads, self.head_size).transpose(1, 2)
v = v.view(batch_size, -1, num_heads, self.head_size).transpose(1, 2)
q = q.view(batch_size, -1, num_heads, self.head_size).transpose(1, 2)
# compute scores
q = q / math.sqrt(self.head_size)
# batch x num_heads x query_len x key_len
scores = torch.matmul(q, k.transpose(2, 3))
# apply the mask (if we have one)
# we add a dimension for the heads to it below: [B, 1, 1, M]
if mask is not None:
scores = scores.masked_fill(~mask.unsqueeze(1), float('-inf'))
# apply attention dropout and compute context vectors.
attention = self.softmax(scores)
attention = self.dropout(attention)
# get context vector (select values with attention) and reshape
# back to [B, M, D]
context = torch.matmul(attention, v)
context = context.transpose(1, 2).contiguous().view(
batch_size, -1, num_heads * self.head_size)
output = self.output_layer(context)
return output
# pylint: disable=arguments-differ
class PositionwiseFeedForward(nn.Module):
"""
Position-wise Feed-forward layer
Projects to ff_size and then back down to input_size.
"""
def __init__(self, input_size, ff_size, dropout=0.1):
"""
Initializes position-wise feed-forward layer.
:param input_size: dimensionality of the input.
:param ff_size: dimensionality of intermediate representation
:param dropout:
"""
super().__init__()
self.layer_norm = nn.LayerNorm(input_size, eps=1e-6)
self.pwff_layer = nn.Sequential(
nn.Linear(input_size, ff_size),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(ff_size, input_size),
nn.Dropout(dropout),
)
def forward(self, x):
x_norm = self.layer_norm(x)
return self.pwff_layer(x_norm) + x
# pylint: disable=arguments-differ
class PositionalEncoding(nn.Module):
"""
Pre-compute position encodings (PE).
In forward pass, this adds the position-encodings to the
input for as many time steps as necessary.
Implementation based on OpenNMT-py.
https://github.com/OpenNMT/OpenNMT-py
"""
def __init__(self,
size: int = 0,
max_len: int = 5000):
"""
Positional Encoding with maximum length max_len
:param size:
:param max_len:
:param dropout:
"""
if size % 2 != 0:
raise ValueError("Cannot use sin/cos positional encoding with "
"odd dim (got dim={:d})".format(size))
pe = torch.zeros(max_len, size)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp((torch.arange(0, size, 2, dtype=torch.float) *
-(math.log(10000.0) / size)))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
pe = pe.unsqueeze(0) # shape: [1, size, max_len]
super().__init__()
self.register_buffer('pe', pe)
self.dim = size
def forward(self, emb):
"""Embed inputs.
Args:
emb (FloatTensor): Sequence of word vectors
``(seq_len, batch_size, self.dim)``
"""
# Add position encodings
return emb + self.pe[:, :emb.size(1)]
class TransformerEncoderLayer(nn.Module):
"""
One Transformer encoder layer has a Multi-head attention layer plus
a position-wise feed-forward layer.
"""
def __init__(self,
size: int = 0,
ff_size: int = 0,
num_heads: int = 0,
dropout: float = 0.1):
"""
A single Transformer layer.
:param size:
:param ff_size:
:param num_heads:
:param dropout:
"""
super().__init__()
self.layer_norm = nn.LayerNorm(size, eps=1e-6)
self.src_src_att = MultiHeadedAttention(num_heads, size,
dropout=dropout)
self.feed_forward = PositionwiseFeedForward(size, ff_size=ff_size,
dropout=dropout)
self.dropout = nn.Dropout(dropout)
self.size = size
# pylint: disable=arguments-differ
def forward(self, x: Tensor, mask: Tensor) -> Tensor:
"""
Forward pass for a single transformer encoder layer.
First applies layer norm, then self attention,
then dropout with residual connection (adding the input to the result),
and then a position-wise feed-forward layer.
:param x: layer input
:param mask: input mask
:return: output tensor
"""
x_norm = self.layer_norm(x)
h = self.src_src_att(x_norm, x_norm, x_norm, mask)
h = self.dropout(h) + x
o = self.feed_forward(h)
return o
class TransformerDecoderLayer(nn.Module):
"""
Transformer decoder layer.
Consists of self-attention, source-attention, and feed-forward.
"""
def __init__(self,
size: int = 0,
ff_size: int = 0,
num_heads: int = 0,
dropout: float = 0.1):
"""
Represents a single Transformer decoder layer.
It attends to the source representation and the previous decoder states.
:param size: model dimensionality
:param ff_size: size of the feed-forward intermediate layer
:param num_heads: number of heads
:param dropout: dropout to apply to input
"""
super().__init__()
self.size = size
self.trg_trg_att = MultiHeadedAttention(num_heads, size,
dropout=dropout)
self.src_trg_att = MultiHeadedAttention(num_heads, size,
dropout=dropout)
self.feed_forward = PositionwiseFeedForward(size, ff_size=ff_size,
dropout=dropout)
self.x_layer_norm = nn.LayerNorm(size, eps=1e-6)
self.dec_layer_norm = nn.LayerNorm(size, eps=1e-6)
self.dropout = nn.Dropout(dropout)
# pylint: disable=arguments-differ
def forward(self,
x: Tensor = None,
memory: Tensor = None,
src_mask: Tensor = None,
trg_mask: Tensor = None) -> Tensor:
"""
Forward pass of a single Transformer decoder layer.
:param x: inputs
:param memory: source representations
:param src_mask: source mask
:param trg_mask: target mask (so as to not condition on future steps)
:return: output tensor
"""
# decoder/target self-attention
x_norm = self.x_layer_norm(x)
h1 = self.trg_trg_att(x_norm, x_norm, x_norm, mask=trg_mask)
h1 = self.dropout(h1) + x
# source-target attention
h1_norm = self.dec_layer_norm(h1)
h2 = self.src_trg_att(memory, memory, h1_norm, mask=src_mask)
# final position-wise feed-forward layer
o = self.feed_forward(self.dropout(h2) + h1)
return o
def context_representations(self, x: Tensor = None,
memory: Tensor = None,
src_mask: Tensor = None,
trg_mask: Tensor = None) -> Tensor:
"""
Compute context representations from last decoder layer in Transformer.
:param x: inputs
:param memory: source representations
:param src_mask: source mask
:param trg_mask: target mask (so as to not condition on future steps)
:return: output tensor
"""
# decoder/target self-attention
x_norm = self.x_layer_norm(x)
h1 = self.trg_trg_att(x_norm, x_norm, x_norm, mask=trg_mask)
h1 = self.dropout(h1) + x
# source-target attention
h1_norm = self.dec_layer_norm(h1)
h2 = self.src_trg_att(memory, memory, h1_norm, mask=src_mask)
# final position-wise feed-forward layer
o = self.feed_forward.layer_norm(self.dropout(h2) + h1)
return o | 10,131 | 32.66113 | 80 | py |
KSTER | KSTER-main/joeynmt/faiss_index.py | # -*- coding: utf-8 -*-
# create@ 2021-02-04 13:50
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import faiss
import numpy as np
from typing import Tuple
import re
class FaissIndex(object):
def __init__(self, factory_template: str = "IVF256,PQ32", load_index_path: str = None, use_gpu: bool = True, index_type: str = "L2") -> None:
super(FaissIndex, self).__init__()
self.factory_template = factory_template
self.gpu_num = faiss.get_num_gpus()
self.use_gpu = use_gpu and (self.gpu_num > 0)
self.index_type = index_type
self._is_trained = False
if load_index_path != None:
self.load(index_path=load_index_path)
@property
def is_trained(self) -> bool:
return self._is_trained
def _get_clustering_parameters(self, total_samples: int) -> Tuple[int, int]:
if 0 < total_samples <= 10 ** 6:
centroids = int(8 * total_samples ** 0.5)
training_samples = total_samples
elif 10 ** 6 < total_samples <= 10 ** 7:
centroids = 65536
training_samples = min(total_samples, 64 * centroids)
else:
centroids = 262144
training_samples = min(total_samples, 64 * centroids)
return centroids, training_samples
def _initialize_index(self, dimension: int, centroids: int) -> faiss.Index:
template = re.compile(r"IVF\d*").sub(f"IVF{centroids}", self.factory_template)
if self.index_type == "L2":
index = faiss.index_factory(dimension, template, faiss.METRIC_L2)
else: # self.index_type == "IP"
index = faiss.index_factory(dimension, template, faiss.METRIC_INNER_PRODUCT)
if self.use_gpu:
index = faiss.index_cpu_to_all_gpus(index)
# index_ivf = faiss.extract_index_ivf(index)
# if self.index_type == "L2":
# clustering_index = faiss.index_cpu_to_all_gpus(faiss.IndexFlatL2(dimension))
# else: # self.index_type == "IP"
# clustering_index = faiss.index_cpu_to_all_gpus(faiss.IndexFlatIP(dimension))
# index_ivf.clustering_index = clustering_index
return index
def train(self, embeddings_path: str) -> None:
embeddings = np.load(embeddings_path, mmap_mode="r")
total_samples, dimension = embeddings.shape
del embeddings
centroids, training_samples = self._get_clustering_parameters(total_samples)
self.index = self._initialize_index(dimension, centroids)
training_embeddings = self._get_training_embeddings(embeddings_path, training_samples).astype(np.float32)
self.index.train(training_embeddings)
self._is_trained = True
def _get_training_embeddings(self, embeddings_path: str, training_samples: int) -> np.ndarray:
embeddings = np.load(embeddings_path, mmap_mode="r")
total_samples = embeddings.shape[0]
sample_indices = np.random.choice(total_samples, training_samples, replace=False)
sample_indices.sort()
training_embeddings = embeddings[sample_indices]
return training_embeddings
def add(self, embeddings_path: str, batch_size: int = 10000) -> None:
assert self.is_trained
embeddings = np.load(embeddings_path)
total_samples = embeddings.shape[0]
for i in range(0, total_samples, batch_size):
start = i
end = min(total_samples, i + batch_size)
batch_embeddings = embeddings[start: end].astype(np.float32)
self.index.add(batch_embeddings)
del embeddings
def load(self, index_path: str) -> faiss.Index:
self.index = faiss.read_index(index_path)
if self.use_gpu:
self.index = faiss.index_cpu_to_all_gpus(self.index)
self._is_trained = True
def export(self, index_path: str) -> None:
assert self.is_trained
if self.use_gpu:
index = faiss.index_gpu_to_cpu(self.index)
else:
index = self.index
faiss.write_index(index, index_path)
def search(self, embeddings: np.ndarray, top_k: int = 1) -> Tuple[np.ndarray, np.ndarray]:
assert self.is_trained
distances, indices = self.index.search(embeddings, k=top_k)
return distances, indices
def set_probe(self, nprobe):
self.index.nprobe = nprobe
@property
def total(self):
"""
inspect index volume
:return:
"""
return self.index.ntotal | 4,631 | 39.631579 | 145 | py |
KSTER | KSTER-main/joeynmt/initialization.py | # coding: utf-8
"""
Implements custom initialization
"""
import math
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn.init import _calculate_fan_in_and_fan_out
def orthogonal_rnn_init_(cell: nn.RNNBase, gain: float = 1.):
"""
Orthogonal initialization of recurrent weights
RNN parameters contain 3 or 4 matrices in one parameter, so we slice it.
"""
with torch.no_grad():
for _, hh, _, _ in cell.all_weights:
for i in range(0, hh.size(0), cell.hidden_size):
nn.init.orthogonal_(hh.data[i:i + cell.hidden_size], gain=gain)
def lstm_forget_gate_init_(cell: nn.RNNBase, value: float = 1.) -> None:
"""
Initialize LSTM forget gates with `value`.
:param cell: LSTM cell
:param value: initial value, default: 1
"""
with torch.no_grad():
for _, _, ih_b, hh_b in cell.all_weights:
l = len(ih_b)
ih_b.data[l // 4:l // 2].fill_(value)
hh_b.data[l // 4:l // 2].fill_(value)
def xavier_uniform_n_(w: Tensor, gain: float = 1., n: int = 4) -> None:
"""
Xavier initializer for parameters that combine multiple matrices in one
parameter for efficiency. This is e.g. used for GRU and LSTM parameters,
where e.g. all gates are computed at the same time by 1 big matrix.
:param w: parameter
:param gain: default 1
:param n: default 4
"""
with torch.no_grad():
fan_in, fan_out = _calculate_fan_in_and_fan_out(w)
assert fan_out % n == 0, "fan_out should be divisible by n"
fan_out //= n
std = gain * math.sqrt(2.0 / (fan_in + fan_out))
a = math.sqrt(3.0) * std
nn.init.uniform_(w, -a, a)
# pylint: disable=too-many-branches
def initialize_model(model: nn.Module, cfg: dict, src_padding_idx: int,
trg_padding_idx: int) -> None:
"""
This initializes a model based on the provided config.
All initializer configuration is part of the `model` section of the
configuration file.
For an example, see e.g. `https://github.com/joeynmt/joeynmt/
blob/master/configs/iwslt_envi_xnmt.yaml#L47`
The main initializer is set using the `initializer` key.
Possible values are `xavier`, `uniform`, `normal` or `zeros`.
(`xavier` is the default).
When an initializer is set to `uniform`, then `init_weight` sets the
range for the values (-init_weight, init_weight).
When an initializer is set to `normal`, then `init_weight` sets the
standard deviation for the weights (with mean 0).
The word embedding initializer is set using `embed_initializer` and takes
the same values. The default is `normal` with `embed_init_weight = 0.01`.
Biases are initialized separately using `bias_initializer`.
The default is `zeros`, but you can use the same initializers as
the main initializer.
Set `init_rnn_orthogonal` to True if you want RNN orthogonal initialization
(for recurrent matrices). Default is False.
`lstm_forget_gate` controls how the LSTM forget gate is initialized.
Default is `1`.
:param model: model to initialize
:param cfg: the model configuration
:param src_padding_idx: index of source padding token
:param trg_padding_idx: index of target padding token
"""
# defaults: xavier, embeddings: normal 0.01, biases: zeros, no orthogonal
gain = float(cfg.get("init_gain", 1.0)) # for xavier
init = cfg.get("initializer", "xavier")
init_weight = float(cfg.get("init_weight", 0.01))
embed_init = cfg.get("embed_initializer", "normal")
embed_init_weight = float(cfg.get("embed_init_weight", 0.01))
embed_gain = float(cfg.get("embed_init_gain", 1.0)) # for xavier
bias_init = cfg.get("bias_initializer", "zeros")
bias_init_weight = float(cfg.get("bias_init_weight", 0.01))
# pylint: disable=unnecessary-lambda, no-else-return
def _parse_init(s, scale, _gain):
scale = float(scale)
assert scale > 0., "incorrect init_weight"
if s.lower() == "xavier":
return lambda p: nn.init.xavier_uniform_(p, gain=_gain)
elif s.lower() == "uniform":
return lambda p: nn.init.uniform_(p, a=-scale, b=scale)
elif s.lower() == "normal":
return lambda p: nn.init.normal_(p, mean=0., std=scale)
elif s.lower() == "zeros":
return lambda p: nn.init.zeros_(p)
else:
raise ValueError("unknown initializer")
init_fn_ = _parse_init(init, init_weight, gain)
embed_init_fn_ = _parse_init(embed_init, embed_init_weight, embed_gain)
bias_init_fn_ = _parse_init(bias_init, bias_init_weight, gain)
with torch.no_grad():
for name, p in model.named_parameters():
if "embed" in name:
embed_init_fn_(p)
elif "bias" in name:
bias_init_fn_(p)
elif len(p.size()) > 1:
# RNNs combine multiple matrices is one, which messes up
# xavier initialization
if init == "xavier" and "rnn" in name:
n = 1
if "encoder" in name:
n = 4 if isinstance(model.encoder.rnn, nn.LSTM) else 3
elif "decoder" in name:
n = 4 if isinstance(model.decoder.rnn, nn.LSTM) else 3
xavier_uniform_n_(p.data, gain=gain, n=n)
else:
init_fn_(p)
# zero out paddings
model.src_embed.lut.weight.data[src_padding_idx].zero_()
model.trg_embed.lut.weight.data[trg_padding_idx].zero_()
orthogonal = cfg.get("init_rnn_orthogonal", False)
lstm_forget_gate = cfg.get("lstm_forget_gate", 1.)
# encoder rnn orthogonal initialization & LSTM forget gate
if hasattr(model.encoder, "rnn"):
if orthogonal:
orthogonal_rnn_init_(model.encoder.rnn)
if isinstance(model.encoder.rnn, nn.LSTM):
lstm_forget_gate_init_(model.encoder.rnn, lstm_forget_gate)
# decoder rnn orthogonal initialization & LSTM forget gate
if hasattr(model.decoder, "rnn"):
if orthogonal:
orthogonal_rnn_init_(model.decoder.rnn)
if isinstance(model.decoder.rnn, nn.LSTM):
lstm_forget_gate_init_(model.decoder.rnn, lstm_forget_gate)
| 6,419 | 35.271186 | 79 | py |
KSTER | KSTER-main/joeynmt/builders.py | # coding: utf-8
"""
Collection of builder functions
"""
from typing import Callable, Optional, Generator
import torch
from torch import nn
from torch.optim.lr_scheduler import _LRScheduler, ReduceLROnPlateau, \
StepLR, ExponentialLR
from torch.optim import Optimizer
from joeynmt.helpers import ConfigurationError
def build_gradient_clipper(config: dict) -> Optional[Callable]:
"""
Define the function for gradient clipping as specified in configuration.
If not specified, returns None.
Current options:
- "clip_grad_val": clip the gradients if they exceed this value,
see `torch.nn.utils.clip_grad_value_`
- "clip_grad_norm": clip the gradients if their norm exceeds this value,
see `torch.nn.utils.clip_grad_norm_`
:param config: dictionary with training configurations
:return: clipping function (in-place) or None if no gradient clipping
"""
clip_grad_fun = None
if "clip_grad_val" in config.keys():
clip_value = config["clip_grad_val"]
clip_grad_fun = lambda params: \
nn.utils.clip_grad_value_(parameters=params,
clip_value=clip_value)
elif "clip_grad_norm" in config.keys():
max_norm = config["clip_grad_norm"]
clip_grad_fun = lambda params: \
nn.utils.clip_grad_norm_(parameters=params, max_norm=max_norm)
if "clip_grad_val" in config.keys() and "clip_grad_norm" in config.keys():
raise ConfigurationError(
"You can only specify either clip_grad_val or clip_grad_norm.")
return clip_grad_fun
def build_optimizer(config: dict, parameters: Generator) -> Optimizer:
"""
Create an optimizer for the given parameters as specified in config.
Except for the weight decay and initial learning rate,
default optimizer settings are used.
Currently supported configuration settings for "optimizer":
- "sgd" (default): see `torch.optim.SGD`
- "adam": see `torch.optim.adam`
- "adagrad": see `torch.optim.adagrad`
- "adadelta": see `torch.optim.adadelta`
- "rmsprop": see `torch.optim.RMSprop`
The initial learning rate is set according to "learning_rate" in the config.
The weight decay is set according to "weight_decay" in the config.
If they are not specified, the initial learning rate is set to 3.0e-4, the
weight decay to 0.
Note that the scheduler state is saved in the checkpoint, so if you load
a model for further training you have to use the same type of scheduler.
:param config: configuration dictionary
:param parameters:
:return: optimizer
"""
optimizer_name = config.get("optimizer", "sgd").lower()
learning_rate = config.get("learning_rate", 3.0e-4)
weight_decay = config.get("weight_decay", 0)
if optimizer_name == "adam":
adam_betas = config.get("adam_betas", (0.9, 0.999))
optimizer = torch.optim.Adam(parameters,
weight_decay=weight_decay,
lr=learning_rate,
betas=adam_betas)
elif optimizer_name == "adagrad":
optimizer = torch.optim.Adagrad(parameters,
weight_decay=weight_decay,
lr=learning_rate)
elif optimizer_name == "adadelta":
optimizer = torch.optim.Adadelta(parameters,
weight_decay=weight_decay,
lr=learning_rate)
elif optimizer_name == "rmsprop":
optimizer = torch.optim.RMSprop(parameters,
weight_decay=weight_decay,
lr=learning_rate)
elif optimizer_name == "sgd":
# default
optimizer = torch.optim.SGD(parameters,
weight_decay=weight_decay,
lr=learning_rate)
else:
raise ConfigurationError("Invalid optimizer. Valid options: 'adam', "
"'adagrad', 'adadelta', 'rmsprop', 'sgd'.")
return optimizer
def build_scheduler(config: dict, optimizer: Optimizer, scheduler_mode: str,
hidden_size: int = 0) \
-> (Optional[_LRScheduler], Optional[str]):
"""
Create a learning rate scheduler if specified in config and
determine when a scheduler step should be executed.
Current options:
- "plateau": see `torch.optim.lr_scheduler.ReduceLROnPlateau`
- "decaying": see `torch.optim.lr_scheduler.StepLR`
- "exponential": see `torch.optim.lr_scheduler.ExponentialLR`
- "noam": see `joeynmt.builders.NoamScheduler`
- "warmupexponentialdecay": see
`joeynmt.builders.WarmupExponentialDecayScheduler`
If no scheduler is specified, returns (None, None) which will result in
a constant learning rate.
:param config: training configuration
:param optimizer: optimizer for the scheduler, determines the set of
parameters which the scheduler sets the learning rate for
:param scheduler_mode: "min" or "max", depending on whether the validation
score should be minimized or maximized.
Only relevant for "plateau".
:param hidden_size: encoder hidden size (required for NoamScheduler)
:return:
- scheduler: scheduler object,
- scheduler_step_at: either "validation" or "epoch"
"""
scheduler, scheduler_step_at = None, None
if "scheduling" in config.keys() and \
config["scheduling"]:
if config["scheduling"].lower() == "plateau":
# learning rate scheduler
scheduler = ReduceLROnPlateau(optimizer=optimizer,
mode=scheduler_mode,
verbose=False,
threshold_mode='abs',
factor=config.get(
"decrease_factor", 0.1),
patience=config.get("patience", 10))
# scheduler step is executed after every validation
scheduler_step_at = "validation"
elif config["scheduling"].lower() == "decaying":
scheduler = StepLR(optimizer=optimizer,
step_size=config.get("decaying_step_size", 1))
# scheduler step is executed after every epoch
scheduler_step_at = "epoch"
elif config["scheduling"].lower() == "exponential":
scheduler = ExponentialLR(optimizer=optimizer,
gamma=config.get("decrease_factor", 0.99))
# scheduler step is executed after every epoch
scheduler_step_at = "epoch"
elif config["scheduling"].lower() == "noam":
factor = config.get("learning_rate_factor", 1)
warmup = config.get("learning_rate_warmup", 4000)
scheduler = NoamScheduler(hidden_size=hidden_size,
factor=factor,
warmup=warmup,
optimizer=optimizer)
scheduler_step_at = "step"
elif config["scheduling"].lower() == "warmupexponentialdecay":
min_rate = config.get("learning_rate_min", 1.0e-5)
decay_rate = config.get("learning_rate_decay", 0.1)
warmup = config.get("learning_rate_warmup", 4000)
peak_rate = config.get("learning_rate_peak", 1.0e-3)
decay_length = config.get("learning_rate_decay_length", 10000)
scheduler = WarmupExponentialDecayScheduler(
min_rate=min_rate,
decay_rate=decay_rate,
warmup=warmup,
optimizer=optimizer,
peak_rate=peak_rate,
decay_length=decay_length)
scheduler_step_at = "step"
return scheduler, scheduler_step_at
class NoamScheduler:
"""
The Noam learning rate scheduler used in "Attention is all you need"
See Eq. 3 in https://arxiv.org/pdf/1706.03762.pdf
"""
def __init__(self,
hidden_size: int,
optimizer: torch.optim.Optimizer,
factor: float = 1,
warmup: int = 4000):
"""
Warm-up, followed by learning rate decay.
:param hidden_size:
:param optimizer:
:param factor: decay factor
:param warmup: number of warmup steps
"""
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.hidden_size = hidden_size
self._rate = 0
def step(self):
"""Update parameters and rate"""
self._step += 1
rate = self._compute_rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
def _compute_rate(self):
"""Implement `lrate` above"""
step = self._step
return self.factor * \
(self.hidden_size ** (-0.5) *
min(step ** (-0.5), step * self.warmup ** (-1.5)))
def state_dict(self):
"""Returns dictionary of values necessary to reconstruct scheduler"""
state_dict = {
"step": self._step,
"warmup": self.warmup,
"factor": self.factor,
"hidden_size": self.hidden_size,
"rate": self._rate
}
return state_dict
def load_state_dict(self, state_dict):
"""Given a state_dict, this function loads scheduler's state"""
self._step = state_dict["step"]
self.warmup = state_dict["warmup"]
self.factor = state_dict["factor"]
self.hidden_size = state_dict["hidden_size"]
self._rate = state_dict["rate"]
class WarmupExponentialDecayScheduler:
"""
A learning rate scheduler similar to Noam, but modified:
Keep the warm up period but make it so that the decay rate can be tuneable.
The decay is exponential up to a given minimum rate.
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
peak_rate: float = 1.0e-3,
decay_length: int = 10000,
warmup: int = 4000,
decay_rate: float = 0.5,
min_rate: float = 1.0e-5):
"""
Warm-up, followed by exponential learning rate decay.
:param peak_rate: maximum learning rate at peak after warmup
:param optimizer:
:param decay_length: decay length after warmup
:param decay_rate: decay rate after warmup
:param warmup: number of warmup steps
:param min_rate: minimum learning rate
"""
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.decay_length = decay_length
self.peak_rate = peak_rate
self._rate = 0
self.decay_rate = decay_rate
self.min_rate = min_rate
def step(self):
"""Update parameters and rate"""
self._step += 1
rate = self._compute_rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
def _compute_rate(self):
"""Implement `lrate` above"""
step = self._step
warmup = self.warmup
if step < warmup:
rate = step * self.peak_rate / warmup
else:
exponent = (step - warmup) / self.decay_length
rate = self.peak_rate * (self.decay_rate**exponent)
return max(rate, self.min_rate)
def state_dict(self):
"""Returns dictionary of values necessary to reconstruct scheduler"""
state_dict = {
"warmup": self.warmup,
"step": self._step,
"decay_length": self.decay_length,
"peak_rate": self.peak_rate,
"rate": self._rate,
"decay_rate": self.decay_rate,
"min_rate": self.min_rate
}
return state_dict
def load_state_dict(self, state_dict):
"""Given a state_dict, this function loads scheduler's state"""
self.warmup = state_dict['warmup']
self._step = state_dict['step']
self.decay_length = state_dict['decay_length']
self.peak_rate = state_dict['peak_rate']
self._rate = state_dict['rate']
self.decay_rate = state_dict['decay_rate']
self.min_rate = state_dict['min_rate']
| 12,622 | 38.446875 | 80 | py |
KSTER | KSTER-main/joeynmt/database.py | # -*- coding: utf-8 -*-
# create@ 2021-01-26 18:02
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from typing import Tuple
import numpy as np
from joeynmt.faiss_index import FaissIndex
class Database(object):
"""
Initialize this class with index path, which is built offline,
and token path which mapping retrieval indices to token id.
"""
def __init__(self, index_path: str, token_path: str, nprobe: int = 16) -> None:
super(Database, self).__init__()
self.index = FaissIndex(load_index_path=index_path, use_gpu=True)
self.index.set_probe(nprobe)
self.token_map = self.load_token_mapping(token_path)
@staticmethod
def load_token_mapping(token_path: str) -> np.ndarray:
"""
This function is used to load token mapping from a text file.
:param token_path: the path of token_map text file.
:return token_map: np.ndarray shape (index_size,)
token_map[i] is the index of i-th embedding in the vocabulary.
"""
with open(token_path) as f:
token_map = [int(token_id) for token_id in f.readlines()]
token_map = np.asarray(token_map).astype(np.int32)
return token_map
def search(self, embeddings: np.ndarray, top_k: int = 16) -> Tuple[np.ndarray, np.ndarray]:
"""
This function is use to search nearest top_k embeddings from the Faiss index.
:param embeddings: np.ndarray (batch_size, d)
:param top_k: int
:return distances: np.ndarray (batch_size, top_k)
:return token_indices: np.ndarray (batch_size, top_k)
"""
# D, I has shape of batch * top_k
distances, indices = self.index.search(embeddings, top_k)
token_indices = self.token_map[indices]
return distances, token_indices
class EnhancedDatabase(Database):
def __init__(self, index_path: str, token_path: str, embedding_path: str, in_memory: bool = True, nprobe: int = 16) -> None:
super(EnhancedDatabase, self).__init__(index_path, token_path, nprobe)
if in_memory:
self.embeddings = np.load(embedding_path)
else:
self.embeddings = np.load(embedding_path, mmap_mode="r")
def enhanced_search(self, embeddings: np.ndarray, top_k: int = 16, retrieval_dropout: bool = False) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
This function is use to search nearest top_k embeddings from the Faiss index.
:param embeddings: np.ndarray (batch_size, d)
:param top_k: int
:param mask_num: int
:return distances: np.ndarray (batch_size, top_k)
:return token_indices: np.ndarray (batch_size, top_k)
:return hidden: np.ndarray (batch_size, top_k, d)
"""
# D, I has shape of batch * top_k
if retrieval_dropout:
distances, indices = self.index.search(embeddings, top_k + 1)
distances = distances[:, 1:]
indices = indices[:, 1:]
else:
distances, indices = self.index.search(embeddings, top_k)
token_indices = self.token_map[indices]
batch_size = indices.shape[0]
indices = indices.reshape(-1)
hidden = self.embeddings[indices]
d = hidden.shape[-1]
hidden = hidden.reshape(batch_size, top_k, d)
return distances, token_indices, hidden | 3,496 | 37.01087 | 149 | py |
KSTER | KSTER-main/joeynmt/combiners.py | import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import init
import numpy as np
import math
from typing import Tuple
from joeynmt.database import Database, EnhancedDatabase
from joeynmt.kernel import Kernel, GaussianKernel, LaplacianKernel
class Combiner(nn.Module):
def __init__(self) -> None:
super(Combiner, self).__init__()
def forward(self, hidden: torch.Tensor, logits: torch.Tensor) -> torch.Tensor:
"""
:param hidden: hidden states FloatTensor (batch_size, seq_len, hidden_size)
:param logits: hidden states FloatTensor (batch_size, seq_len, hidden_size)
:return log_probs: FloatTensor (batch_size, seq_len, vocab_size)
"""
raise NotImplementedError("The forward method is not implemented in the Combiner class.")
def detailed_forward(self, hidden: torch.Tensor, logits: torch.Tensor) -> Tuple[torch.Tensor,
torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
:param hidden: hidden states FloatTensor (batch_size, seq_len, hidden_size)
:param logits: hidden states FloatTensor (batch_size, seq_len, hidden_size)
:param trg: true targets for force decoding.
:return mixed_distribution: FloatTensor (batch_size, seq_len, vocab_size)
:return model_based_distribution: FloatTensor (batch_size, seq_len, vocab_size)
:return example_based_distribution: FloatTensor (batch_size, seq_len, vocab_size)
:return mixing_weight: FloatTensor (batch_size, seq_len)
:return bandwidth: FloatTensor (batch_size, seq_len)
"""
raise NotImplementedError("The forward method is not implemented in the Combiner class.")
class NoCombiner(Combiner):
def __init__(self) -> None:
super(NoCombiner, self).__init__()
def forward(self, hidden: torch.Tensor, logits: torch.Tensor) -> torch.Tensor:
log_probs = F.log_softmax(logits, dim=-1)
return log_probs
class StaticCombiner(Combiner):
def __init__(self, database: Database, top_k: int, mixing_weight: float, kernel: Kernel, bandwidth: float) -> None:
super(StaticCombiner, self).__init__()
self.database = database
self.top_k = top_k
self.mixing_weight = mixing_weight
self.kernel = kernel
self.bandwidth = bandwidth
def forward(self, hidden: torch.Tensor, logits: torch.Tensor) -> torch.Tensor:
batch_size, seq_len, hidden_size = hidden.size()
vocab_size = logits.size(-1)
hidden = hidden.view(batch_size * seq_len, hidden_size)
logits = logits.view(batch_size * seq_len, vocab_size)
model_based_distribution = F.softmax(logits, dim=-1)
vocab_size = model_based_distribution.size(-1)
distances, token_indices = self.database.search(hidden.cpu().numpy(), top_k=self.top_k)
distances = torch.FloatTensor(distances).to(hidden.device)
token_indices = torch.LongTensor(token_indices).to(hidden.device)
example_based_distribution, _ = self.kernel.compute_example_based_distribution(distances, self.bandwidth, token_indices, vocab_size)
mixed_distribution = (1 - self.mixing_weight) * model_based_distribution + self.mixing_weight * example_based_distribution
log_probs = torch.log(mixed_distribution)
log_probs = log_probs.view(batch_size, seq_len, vocab_size).contiguous()
return log_probs
class DynamicCombiner(Combiner):
def __init__(self, database: EnhancedDatabase, top_k: int, kernel: Kernel) -> None:
super(DynamicCombiner, self).__init__()
self.database = database
self.top_k = top_k
self.kernel = kernel
dimension = database.index.index.d
self.bandwidth_estimator = nn.Linear(2 * dimension, 1)
if isinstance(kernel, GaussianKernel):
self.bandwidth_estimator.bias.data[0] = math.log(100)
else:
self.bandwidth_estimator.bias.data[0] = math.log(10)
self.mixing_weight_estimator = nn.Sequential(
nn.Linear(2 * dimension, dimension),
nn.ReLU(),
nn.Linear(dimension, 1)
)
def compute_bandwidth(self, hidden: torch.Tensor, searched_hidden: torch.Tensor) -> torch.Tensor:
"""
:param hidden: torch.FloatTensor (batch_size * seq_len, hidden_size)
:param searched_hidden: torch.FloatTensor (batch_size * seq_len, top_k, hidden_size)
:return bandwidth: torch.FloatTensor (batch_size * seq_len,)
"""
mean_hidden = searched_hidden.mean(dim=1)
bandwidth = torch.exp(self.bandwidth_estimator(torch.cat([hidden, mean_hidden], dim=-1)))
return bandwidth
def compute_mixing_weight(self, hidden: torch.Tensor, searched_hidden: torch.Tensor, sparse_probs: torch.Tensor) -> torch.Tensor:
"""
:param hidden: torch.FloatTensor (batch_size * seq_len, hidden_size)
:param searched_hidden: torch.FloatTensor (batch_size * seq_len, top_k, hidden_size)
:param sparse_probs: torch.FloatTensor (batch_size * seq_len, top_k)
:return mixing_weight: torch.FloatTensor (batch_size * seq_len,)
"""
merged_hidden = searched_hidden.transpose(1, 2).matmul(sparse_probs.unsqueeze(-1)).squeeze(-1)
mixing_weight = torch.sigmoid(self.mixing_weight_estimator(torch.cat([hidden, merged_hidden], dim=-1)))
return mixing_weight
def forward(self, hidden: torch.Tensor, logits: torch.Tensor) -> torch.Tensor:
# reshape hidden and logits for database retrieval
batch_size, seq_len, hidden_size = hidden.size()
vocab_size = logits.size(-1)
hidden = hidden.view(batch_size * seq_len, hidden_size)
logits = logits.view(batch_size * seq_len, vocab_size)
# retrieve examples from database
if self.training:
distances, token_indices, searched_hidden = self.database.enhanced_search(hidden.cpu().numpy(),
top_k=self.top_k, retrieval_dropout=True)
else:
distances, token_indices, searched_hidden = self.database.enhanced_search(hidden.cpu().numpy(),
top_k=self.top_k, retrieval_dropout=False)
distances = torch.FloatTensor(distances).to(hidden.device)
token_indices = torch.LongTensor(token_indices).to(hidden.device)
searched_hidden = torch.FloatTensor(searched_hidden).to(hidden.device)
# compute dynamic database bandwidth
bandwidth = self.compute_bandwidth(hidden, searched_hidden)
model_based_distribution = F.softmax(logits, dim=-1)
vocab_size = model_based_distribution.size(-1)
example_based_distribution, sparse_example_based_distribution = self.kernel.compute_example_based_distribution(distances,
bandwidth, token_indices, vocab_size)
mixing_weight = self.compute_mixing_weight(hidden, searched_hidden, sparse_example_based_distribution)
# compute prediction distribution by interpolating between model distribution and database distribution
mixed_distribution = (1 - mixing_weight) * model_based_distribution + mixing_weight * example_based_distribution
log_probs = torch.log(mixed_distribution)
log_probs = log_probs.view(batch_size, seq_len, vocab_size).contiguous()
return log_probs
def detailed_forward(self, hidden: torch.Tensor, logits: torch.Tensor) -> Tuple[torch.Tensor,
torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
:param hidden: pre-softmax hidden states FloatTensor (batch_size, seq_len, hidden_size)
:param logits: pre-softmax hidden states FloatTensor (batch_size, seq_len, hidden_size)
:param trg: true targets for force decoding.
:return mixed_distribution: FloatTensor (batch_size, seq_len, vocab_size)
:return model_based_distribution: FloatTensor (batch_size, seq_len, vocab_size)
:return example_based_distribution: FloatTensor (batch_size, seq_len, vocab_size)
:return mixing_weight: FloatTensor (batch_size, seq_len)
:return bandwidth: FloatTensor (batch_size, seq_len)
"""
# reshape hidden and logits for knn retrieval
batch_size, seq_len, hidden_size = hidden.size()
vocab_size = logits.size(-1)
hidden = hidden.view(batch_size * seq_len, hidden_size)
logits = logits.view(batch_size * seq_len, vocab_size)
# retrieve examples from database
if self.training:
distances, token_indices, searched_hidden = self.database.enhanced_search(hidden.cpu().numpy(),
top_k=self.top_k, retrieval_dropout=True)
else:
distances, token_indices, searched_hidden = self.database.enhanced_search(hidden.cpu().numpy(),
top_k=self.top_k, retrieval_dropout=False)
distances = torch.FloatTensor(distances).to(hidden.device)
token_indices = torch.LongTensor(token_indices).to(hidden.device)
searched_hidden = torch.FloatTensor(searched_hidden).to(hidden.device)
# compute dynamic database bandwidth
bandwidth = self.compute_bandwidth(hidden, searched_hidden)
model_based_distribution = F.softmax(logits, dim=-1)
vocab_size = model_based_distribution.size(-1)
example_based_distribution, sparse_example_based_distribution = self.kernel.compute_example_based_distribution(distances,
bandwidth, token_indices, vocab_size)
mixing_weight = self.compute_mixing_weight(hidden, searched_hidden, sparse_example_based_distribution)
# compute prediction distribution by interpolating between model distribution and database distribution
mixed_distribution = (1 - mixing_weight) * model_based_distribution + mixing_weight * example_based_distribution
mixed_distribution = mixed_distribution.view(batch_size, seq_len, vocab_size).contiguous()
model_based_distribution = model_based_distribution.view(batch_size, seq_len, vocab_size).contiguous()
example_based_distribution = example_based_distribution.view(batch_size, seq_len, vocab_size).contiguous()
mixing_weight = mixing_weight.squeeze(-1).view(batch_size, seq_len).contiguous()
bandwidth = bandwidth.squeeze(-1).view(batch_size, seq_len).contiguous()
return mixed_distribution, model_based_distribution, example_based_distribution, mixing_weight, bandwidth
def build_combiner(cfg: dict) -> Combiner:
combiner_cfg = cfg["combiner"]
combiner_type = combiner_cfg["type"]
if combiner_type == "no_combiner":
combiner = NoCombiner()
elif combiner_type == "static_combiner":
database = Database(
index_path=combiner_cfg["index_path"],
token_path=combiner_cfg["token_map_path"]
)
combiner = StaticCombiner(
database=database,
top_k=combiner_cfg["top_k"],
mixing_weight=combiner_cfg["mixing_weight"],
bandwidth=combiner_cfg["bandwidth"],
kernel=GaussianKernel() if combiner_cfg["kernel"] == "gaussian" else LaplacianKernel()
)
elif "dynamic" in combiner_type:
database = EnhancedDatabase(
index_path=combiner_cfg["index_path"],
token_path=combiner_cfg["token_map_path"],
embedding_path=combiner_cfg["embedding_path"],
in_memory=combiner_cfg["in_memory"]
)
combiner = DynamicCombiner(
database=database,
top_k=combiner_cfg["top_k"],
kernel=GaussianKernel() if combiner_cfg["kernel"] == "gaussian" else LaplacianKernel()
)
else:
raise ValueError("The %s is not supported currently." % combiner_type)
return combiner | 11,897 | 46.214286 | 140 | py |
KSTER | KSTER-main/joeynmt/metrics.py | # coding: utf-8
"""
This module holds various MT evaluation metrics.
"""
from typing import List
import sacrebleu
def chrf(hypotheses, references, remove_whitespace=True):
"""
Character F-score from sacrebleu
:param hypotheses: list of hypotheses (strings)
:param references: list of references (strings)
:param remove_whitespace: (bool)
:return:
"""
return sacrebleu.corpus_chrf(hypotheses=hypotheses, references=[references],
remove_whitespace=remove_whitespace).score
def bleu(hypotheses, references, tokenize="13a"):
"""
Raw corpus BLEU from sacrebleu (without tokenization)
:param hypotheses: list of hypotheses (strings)
:param references: list of references (strings)
:param tokenize: one of {'none', '13a', 'intl', 'zh', 'ja-mecab'}
:return:
"""
return sacrebleu.corpus_bleu(hypotheses, [references], tokenize=tokenize).score
def token_accuracy(hypotheses: List[List[str]], references: List[List[str]]) \
-> float:
"""
Compute the accuracy of hypothesis tokens: correct tokens / all tokens
Tokens are correct if they appear in the same position in the reference.
:param hypotheses: list of tokenized hypotheses (List[List[str]])
:param references: list of tokenized references (List[List[str]])
:return: token accuracy (float)
"""
correct_tokens = 0
all_tokens = 0
assert len(hypotheses) == len(references)
for hyp, ref in zip(hypotheses, references):
all_tokens += len(hyp)
for h_i, r_i in zip(hyp, ref):
# min(len(h), len(r)) tokens considered
if h_i == r_i:
correct_tokens += 1
return (correct_tokens / all_tokens)*100 if all_tokens > 0 else 0.0
def sequence_accuracy(hypotheses, references):
"""
Compute the accuracy of hypothesis tokens: correct tokens / all tokens
Tokens are correct if they appear in the same position in the reference.
:param hypotheses: list of hypotheses (strings)
:param references: list of references (strings)
:return:
"""
assert len(hypotheses) == len(references)
correct_sequences = sum([1 for (hyp, ref) in zip(hypotheses, references)
if hyp == ref])
return (correct_sequences / len(hypotheses))*100 if hypotheses else 0.0
| 2,355 | 32.657143 | 83 | py |
KSTER | KSTER-main/joeynmt/__init__.py | 0 | 0 | 0 | py | |
KSTER | KSTER-main/joeynmt/search.py | # coding: utf-8
import torch
import torch.nn.functional as F
from torch import Tensor
import numpy as np
from joeynmt.decoders import TransformerDecoder
from joeynmt.model import Model
from joeynmt.batch import Batch
from joeynmt.helpers import tile
__all__ = ["greedy", "transformer_greedy", "beam_search", "run_batch"]
def greedy(src_mask: Tensor, max_output_length: int, model: Model,
encoder_output: Tensor, encoder_hidden: Tensor)\
-> (np.array, np.array):
"""
Greedy decoding. Select the token word highest probability at each time
step. This function is a wrapper that calls recurrent_greedy for
recurrent decoders and transformer_greedy for transformer decoders.
:param src_mask: mask for source inputs, 0 for positions after </s>
:param max_output_length: maximum length for the hypotheses
:param model: model to use for greedy decoding
:param encoder_output: encoder hidden states for attention
:param encoder_hidden: encoder last state for decoder initialization
:return:
"""
if isinstance(model.decoder, TransformerDecoder):
# Transformer greedy decoding
greedy_fun = transformer_greedy
else:
# Recurrent greedy decoding
greedy_fun = recurrent_greedy
return greedy_fun(
src_mask, max_output_length, model, encoder_output, encoder_hidden)
def recurrent_greedy(
src_mask: Tensor, max_output_length: int, model: Model,
encoder_output: Tensor, encoder_hidden: Tensor) -> (np.array, np.array):
"""
Greedy decoding: in each step, choose the word that gets highest score.
Version for recurrent decoder.
:param src_mask: mask for source inputs, 0 for positions after </s>
:param max_output_length: maximum length for the hypotheses
:param model: model to use for greedy decoding
:param encoder_output: encoder hidden states for attention
:param encoder_hidden: encoder last state for decoder initialization
:return:
- stacked_output: output hypotheses (2d array of indices),
- stacked_attention_scores: attention scores (3d array)
"""
bos_index = model.bos_index
eos_index = model.eos_index
batch_size = src_mask.size(0)
prev_y = src_mask.new_full(size=[batch_size, 1], fill_value=bos_index,
dtype=torch.long)
output = []
attention_scores = []
hidden = None
prev_att_vector = None
finished = src_mask.new_zeros((batch_size, 1)).byte()
# pylint: disable=unused-variable
for t in range(max_output_length):
# decode one single step
with torch.no_grad():
logits, hidden, att_probs, prev_att_vector = model(
return_type="decode",
trg_input=prev_y,
encoder_output=encoder_output,
encoder_hidden=encoder_hidden,
src_mask=src_mask,
unroll_steps=1,
decoder_hidden=hidden,
att_vector=prev_att_vector)
# logits: batch x time=1 x vocab (logits)
# greedy decoding: choose arg max over vocabulary in each step
next_word = torch.argmax(logits, dim=-1) # batch x time=1
output.append(next_word.squeeze(1).detach().cpu().numpy())
prev_y = next_word
attention_scores.append(att_probs.squeeze(1).detach().cpu().numpy())
# batch, max_src_length
# check if previous symbol was <eos>
is_eos = torch.eq(next_word, eos_index)
finished += is_eos
# stop predicting if <eos> reached for all elements in batch
if (finished >= 1).sum() == batch_size:
break
stacked_output = np.stack(output, axis=1) # batch, time
stacked_attention_scores = np.stack(attention_scores, axis=1)
return stacked_output, stacked_attention_scores
# pylint: disable=unused-argument
def transformer_greedy(
src_mask: Tensor, max_output_length: int, model: Model,
encoder_output: Tensor, encoder_hidden: Tensor) -> (np.array, None):
"""
Special greedy function for transformer, since it works differently.
The transformer remembers all previous states and attends to them.
:param src_mask: mask for source inputs, 0 for positions after </s>
:param max_output_length: maximum length for the hypotheses
:param model: model to use for greedy decoding
:param encoder_output: encoder hidden states for attention
:param encoder_hidden: encoder final state (unused in Transformer)
:return:
- stacked_output: output hypotheses (2d array of indices),
- stacked_attention_scores: attention scores (3d array)
"""
bos_index = model.bos_index
eos_index = model.eos_index
batch_size = src_mask.size(0)
# start with BOS-symbol for each sentence in the batch
ys = encoder_output.new_full([batch_size, 1], bos_index, dtype=torch.long)
# a subsequent mask is intersected with this in decoder forward pass
trg_mask = src_mask.new_ones([1, 1, 1])
if isinstance(model, torch.nn.DataParallel):
trg_mask = torch.stack(
[src_mask.new_ones([1, 1]) for _ in model.device_ids])
finished = src_mask.new_zeros(batch_size).byte()
for _ in range(max_output_length):
# pylint: disable=unused-variable
with torch.no_grad():
logits, hidden, _, _ = model(
return_type="decode",
trg_input=ys, # model.trg_embed(ys) # embed the previous tokens
encoder_output=encoder_output,
encoder_hidden=None,
src_mask=src_mask,
unroll_steps=None,
decoder_hidden=None,
trg_mask=trg_mask
)
logits = logits[:, -1:]
hidden = hidden[:, -1:]
log_probs = model.combiner(hidden, logits).squeeze(1)
_, next_word = torch.max(log_probs, dim=1)
next_word = next_word.data
ys = torch.cat([ys, next_word.unsqueeze(-1)], dim=1)
# check if previous symbol was <eos>
is_eos = torch.eq(next_word, eos_index)
finished += is_eos
# stop predicting if <eos> reached for all elements in batch
if (finished >= 1).sum() == batch_size:
break
ys = ys[:, 1:] # remove BOS-symbol
return ys.detach().cpu().numpy(), None
# pylint: disable=too-many-statements,too-many-branches
def beam_search(model: Model, size: int,
encoder_output: Tensor, encoder_hidden: Tensor,
src_mask: Tensor, max_output_length: int,
alpha: float, n_best: int = 1) -> (np.array, np.array):
"""
Beam search with size k.
Inspired by OpenNMT-py, adapted for Transformer.
In each decoding step, find the k most likely partial hypotheses.
:param model:
:param size: size of the beam
:param encoder_output:
:param encoder_hidden:
:param src_mask:
:param max_output_length:
:param alpha: `alpha` factor for length penalty
:param n_best: return this many hypotheses, <= beam (currently only 1)
:return:
- stacked_output: output hypotheses (2d array of indices),
- stacked_attention_scores: attention scores (3d array)
"""
assert size > 0, 'Beam size must be >0.'
assert n_best <= size, 'Can only return {} best hypotheses.'.format(size)
# init
bos_index = model.bos_index
eos_index = model.eos_index
pad_index = model.pad_index
trg_vocab_size = model.decoder.output_size
device = encoder_output.device
transformer = isinstance(model.decoder, TransformerDecoder)
batch_size = src_mask.size(0)
att_vectors = None # not used for Transformer
hidden = None # not used for Transformer
trg_mask = None # not used for RNN
# Recurrent models only: initialize RNN hidden state
# pylint: disable=protected-access
if not transformer:
# tile encoder states and decoder initial states beam_size times
hidden = model.decoder._init_hidden(encoder_hidden)
hidden = tile(hidden, size, dim=1) # layers x batch*k x dec_hidden_size
# DataParallel splits batch along the 0th dim.
# Place back the batch_size to the 1st dim here.
if isinstance(hidden, tuple):
h, c = hidden
hidden = (h.permute(1, 0, 2), c.permute(1, 0, 2))
else:
hidden = hidden.permute(1, 0, 2)
# batch*k x layers x dec_hidden_size
encoder_output = tile(encoder_output.contiguous(), size,
dim=0) # batch*k x src_len x enc_hidden_size
src_mask = tile(src_mask, size, dim=0) # batch*k x 1 x src_len
# Transformer only: create target mask
if transformer:
trg_mask = src_mask.new_ones([1, 1, 1]) # transformer only
if isinstance(model, torch.nn.DataParallel):
trg_mask = torch.stack(
[src_mask.new_ones([1, 1]) for _ in model.device_ids])
# numbering elements in the batch
batch_offset = torch.arange(batch_size, dtype=torch.long, device=device)
# numbering elements in the extended batch, i.e. beam size copies of each
# batch element
beam_offset = torch.arange(
0,
batch_size * size,
step=size,
dtype=torch.long,
device=device)
# keeps track of the top beam size hypotheses to expand for each element
# in the batch to be further decoded (that are still "alive")
alive_seq = torch.full(
[batch_size * size, 1],
bos_index,
dtype=torch.long,
device=device)
# Give full probability to the first beam on the first step.
topk_log_probs = torch.zeros(batch_size, size, device=device)
topk_log_probs[:, 1:] = float("-inf")
# Structure that holds finished hypotheses.
hypotheses = [[] for _ in range(batch_size)]
results = {
"predictions": [[] for _ in range(batch_size)],
"scores": [[] for _ in range(batch_size)],
"gold_score": [0] * batch_size,
}
for step in range(max_output_length):
# This decides which part of the predicted sentence we feed to the
# decoder to make the next prediction.
# For Transformer, we feed the complete predicted sentence so far.
# For Recurrent models, only feed the previous target word prediction
if transformer: # Transformer
decoder_input = alive_seq # complete prediction so far
else: # Recurrent
decoder_input = alive_seq[:, -1].view(-1, 1) # only the last word
# expand current hypotheses
# decode one single step
# logits: logits for final softmax
# pylint: disable=unused-variable
with torch.no_grad():
logits, hidden, att_scores, att_vectors = model(
return_type="decode",
encoder_output=encoder_output,
encoder_hidden=None, # used to initialize decoder_hidden only
src_mask=src_mask,
trg_input=decoder_input, #trg_embed = embed(decoder_input)
decoder_hidden=hidden,
att_vector=att_vectors,
unroll_steps=1,
trg_mask=trg_mask # subsequent mask for Transformer only
)
# For the Transformer we made predictions for all time steps up to
# this point, so we only want to know about the last time step.
if transformer:
logits = logits[:, -1:] # keep only the last time step
hidden = hidden[:, -1:] # we don't need to keep it for transformer
log_probs = model.combiner(hidden, logits).squeeze(1)
else: # RNN
log_probs = F.log_softmax(logits, dim=-1).squeeze(1)
# multiply probs by the beam probability (=add logprobs)
log_probs += topk_log_probs.view(-1).unsqueeze(1)
curr_scores = log_probs.clone()
# compute length penalty
if alpha > -1:
length_penalty = ((5.0 + (step + 1)) / 6.0) ** alpha
curr_scores /= length_penalty
# flatten log_probs into a list of possibilities
curr_scores = curr_scores.reshape(-1, size * trg_vocab_size)
# pick currently best top k hypotheses (flattened order)
topk_scores, topk_ids = curr_scores.topk(size, dim=-1)
if alpha > -1:
# recover original log probs
topk_log_probs = topk_scores * length_penalty
else:
topk_log_probs = topk_scores.clone()
# reconstruct beam origin and true word ids from flattened order
topk_beam_index = topk_ids.floor_divide(trg_vocab_size)
topk_ids = topk_ids.fmod(trg_vocab_size)
# map beam_index to batch_index in the flat representation
batch_index = (
topk_beam_index
+ beam_offset[:topk_beam_index.size(0)].unsqueeze(1))
select_indices = batch_index.view(-1)
# append latest prediction
alive_seq = torch.cat(
[alive_seq.index_select(0, select_indices),
topk_ids.view(-1, 1)], -1) # batch_size*k x hyp_len
is_finished = topk_ids.eq(eos_index)
if step + 1 == max_output_length:
is_finished.fill_(True)
# end condition is whether the top beam is finished
end_condition = is_finished[:, 0].eq(True)
# save finished hypotheses
if is_finished.any():
predictions = alive_seq.view(-1, size, alive_seq.size(-1))
for i in range(is_finished.size(0)):
b = batch_offset[i]
if end_condition[i]:
is_finished[i].fill_(1)
finished_hyp = is_finished[i].nonzero(as_tuple=False).view(-1)
# store finished hypotheses for this batch
for j in finished_hyp:
# Check if the prediction has more than one EOS.
# If it has more than one EOS, it means that the
# prediction should have already been added to
# the hypotheses, so you don't have to add them again.
if (predictions[i, j, 1:] == eos_index).nonzero(
as_tuple=False).numel() < 2:
# ignore start_token
hypotheses[b].append(
(topk_scores[i, j], predictions[i, j, 1:])
)
# if the batch reached the end, save the n_best hypotheses
if end_condition[i]:
best_hyp = sorted(
hypotheses[b], key=lambda x: x[0], reverse=True)
for n, (score, pred) in enumerate(best_hyp):
if n >= n_best:
break
results["scores"][b].append(score)
results["predictions"][b].append(pred)
non_finished = end_condition.eq(False).nonzero(
as_tuple=False).view(-1)
# if all sentences are translated, no need to go further
# pylint: disable=len-as-condition
if len(non_finished) == 0:
break
# remove finished batches for the next step
topk_log_probs = topk_log_probs.index_select(0, non_finished)
batch_index = batch_index.index_select(0, non_finished)
batch_offset = batch_offset.index_select(0, non_finished)
alive_seq = predictions.index_select(0, non_finished) \
.view(-1, alive_seq.size(-1))
# reorder indices, outputs and masks
select_indices = batch_index.view(-1)
encoder_output = encoder_output.index_select(0, select_indices)
src_mask = src_mask.index_select(0, select_indices)
if hidden is not None and not transformer:
if isinstance(hidden, tuple):
# for LSTMs, states are tuples of tensors
h, c = hidden
h = h.index_select(0, select_indices)
c = c.index_select(0, select_indices)
hidden = (h, c)
else:
# for GRUs, states are single tensors
hidden = hidden.index_select(0, select_indices)
if att_vectors is not None:
att_vectors = att_vectors.index_select(0, select_indices)
def pad_and_stack_hyps(hyps, pad_value):
filled = np.ones((len(hyps), max([h.shape[0] for h in hyps])),
dtype=int) * pad_value
for j, h in enumerate(hyps):
for k, i in enumerate(h):
filled[j, k] = i
return filled
# from results to stacked outputs
assert n_best == 1
# only works for n_best=1 for now
final_outputs = pad_and_stack_hyps([r[0].cpu().numpy() for r in
results["predictions"]],
pad_value=pad_index)
return final_outputs, None
def run_batch(model: Model, batch: Batch, max_output_length: int,
beam_size: int, beam_alpha: float) -> (np.array, np.array):
"""
Get outputs and attentions scores for a given batch
:param model: Model class
:param batch: batch to generate hypotheses for
:param max_output_length: maximum length of hypotheses
:param beam_size: size of the beam for beam search, if 0 use greedy
:param beam_alpha: alpha value for beam search
:return: stacked_output: hypotheses for batch,
stacked_attention_scores: attention scores for batch
"""
with torch.no_grad():
encoder_output, encoder_hidden, _, _ = model(
return_type="encode", **vars(batch))
# if maximum output length is not globally specified, adapt to src len
if max_output_length is None:
max_output_length = int(max(batch.src_length.cpu().numpy()) * 1.5)
# greedy decoding
if beam_size < 2:
stacked_output, stacked_attention_scores = greedy(
src_mask=batch.src_mask,
max_output_length=max_output_length,
model=model,
encoder_output=encoder_output,
encoder_hidden=encoder_hidden
)
# batch, time, max_src_length
else: # beam search
stacked_output, stacked_attention_scores = beam_search(
model=model,
size=beam_size,
encoder_output=encoder_output,
encoder_hidden=encoder_hidden,
src_mask=batch.src_mask,
max_output_length=max_output_length,
alpha=beam_alpha
)
return stacked_output, stacked_attention_scores
| 18,708 | 39.321121 | 88 | py |
KSTER | KSTER-main/joeynmt/attention.py | # coding: utf-8
"""
Attention modules
"""
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
class AttentionMechanism(nn.Module):
"""
Base attention class
"""
def forward(self, *inputs):
raise NotImplementedError("Implement this.")
class BahdanauAttention(AttentionMechanism):
"""
Implements Bahdanau (MLP) attention
Section A.1.2 in https://arxiv.org/pdf/1409.0473.pdf.
"""
def __init__(self, hidden_size=1, key_size=1, query_size=1):
"""
Creates attention mechanism.
:param hidden_size: size of the projection for query and key
:param key_size: size of the attention input keys
:param query_size: size of the query
"""
super().__init__()
self.key_layer = nn.Linear(key_size, hidden_size, bias=False)
self.query_layer = nn.Linear(query_size, hidden_size, bias=False)
self.energy_layer = nn.Linear(hidden_size, 1, bias=False)
self.proj_keys = None # to store projected keys
self.proj_query = None # projected query
#pylint: disable=arguments-differ
def forward(self, query: Tensor = None,
mask: Tensor = None,
values: Tensor = None):
"""
Bahdanau MLP attention forward pass.
:param query: the item (decoder state) to compare with the keys/memory,
shape (batch_size, 1, decoder.hidden_size)
:param mask: mask out keys position (0 in invalid positions, 1 else),
shape (batch_size, 1, src_length)
:param values: values (encoder states),
shape (batch_size, src_length, encoder.hidden_size)
:return: context vector of shape (batch_size, 1, value_size),
attention probabilities of shape (batch_size, 1, src_length)
"""
self._check_input_shapes_forward(query=query, mask=mask, values=values)
assert mask is not None, "mask is required"
assert self.proj_keys is not None,\
"projection keys have to get pre-computed"
# We first project the query (the decoder state).
# The projected keys (the encoder states) were already pre-computated.
self.compute_proj_query(query)
# Calculate scores.
# proj_keys: batch x src_len x hidden_size
# proj_query: batch x 1 x hidden_size
scores = self.energy_layer(torch.tanh(self.proj_query + self.proj_keys))
# scores: batch x src_len x 1
scores = scores.squeeze(2).unsqueeze(1)
# scores: batch x 1 x time
# mask out invalid positions by filling the masked out parts with -inf
scores = torch.where(mask, scores, scores.new_full([1], float('-inf')))
# turn scores to probabilities
alphas = F.softmax(scores, dim=-1) # batch x 1 x time
# the context vector is the weighted sum of the values
context = alphas @ values # batch x 1 x value_size
return context, alphas
def compute_proj_keys(self, keys: Tensor):
"""
Compute the projection of the keys.
Is efficient if pre-computed before receiving individual queries.
:param keys:
:return:
"""
self.proj_keys = self.key_layer(keys)
def compute_proj_query(self, query: Tensor):
"""
Compute the projection of the query.
:param query:
:return:
"""
self.proj_query = self.query_layer(query)
def _check_input_shapes_forward(self, query: torch.Tensor,
mask: torch.Tensor,
values: torch.Tensor):
"""
Make sure that inputs to `self.forward` are of correct shape.
Same input semantics as for `self.forward`.
:param query:
:param mask:
:param values:
:return:
"""
assert query.shape[0] == values.shape[0] == mask.shape[0]
assert query.shape[1] == 1 == mask.shape[1]
assert query.shape[2] == self.query_layer.in_features
assert values.shape[2] == self.key_layer.in_features
assert mask.shape[2] == values.shape[1]
def __repr__(self):
return "BahdanauAttention"
class LuongAttention(AttentionMechanism):
"""
Implements Luong (bilinear / multiplicative) attention.
Eq. 8 ("general") in http://aclweb.org/anthology/D15-1166.
"""
def __init__(self, hidden_size: int = 1, key_size: int = 1):
"""
Creates attention mechanism.
:param hidden_size: size of the key projection layer, has to be equal
to decoder hidden size
:param key_size: size of the attention input keys
"""
super().__init__()
self.key_layer = nn.Linear(in_features=key_size,
out_features=hidden_size,
bias=False)
self.proj_keys = None # projected keys
# pylint: disable=arguments-differ
def forward(self, query: torch.Tensor = None,
mask: torch.Tensor = None,
values: torch.Tensor = None):
"""
Luong (multiplicative / bilinear) attention forward pass.
Computes context vectors and attention scores for a given query and
all masked values and returns them.
:param query: the item (decoder state) to compare with the keys/memory,
shape (batch_size, 1, decoder.hidden_size)
:param mask: mask out keys position (0 in invalid positions, 1 else),
shape (batch_size, 1, src_length)
:param values: values (encoder states),
shape (batch_size, src_length, encoder.hidden_size)
:return: context vector of shape (batch_size, 1, value_size),
attention probabilities of shape (batch_size, 1, src_length)
"""
self._check_input_shapes_forward(query=query, mask=mask, values=values)
assert self.proj_keys is not None,\
"projection keys have to get pre-computed"
assert mask is not None, "mask is required"
# scores: batch_size x 1 x src_length
scores = query @ self.proj_keys.transpose(1, 2)
# mask out invalid positions by filling the masked out parts with -inf
scores = torch.where(mask, scores, scores.new_full([1], float('-inf')))
# turn scores to probabilities
alphas = F.softmax(scores, dim=-1) # batch x 1 x src_len
# the context vector is the weighted sum of the values
context = alphas @ values # batch x 1 x values_size
return context, alphas
def compute_proj_keys(self, keys: Tensor):
"""
Compute the projection of the keys and assign them to `self.proj_keys`.
This pre-computation is efficiently done for all keys
before receiving individual queries.
:param keys: shape (batch_size, src_length, encoder.hidden_size)
"""
# proj_keys: batch x src_len x hidden_size
self.proj_keys = self.key_layer(keys)
def _check_input_shapes_forward(self, query: torch.Tensor,
mask: torch.Tensor,
values: torch.Tensor):
"""
Make sure that inputs to `self.forward` are of correct shape.
Same input semantics as for `self.forward`.
:param query:
:param mask:
:param values:
:return:
"""
assert query.shape[0] == values.shape[0] == mask.shape[0]
assert query.shape[1] == 1 == mask.shape[1]
assert query.shape[2] == self.key_layer.out_features
assert values.shape[2] == self.key_layer.in_features
assert mask.shape[2] == values.shape[1]
def __repr__(self):
return "LuongAttention"
| 7,824 | 33.933036 | 80 | py |
KSTER | KSTER-main/joeynmt/helpers.py | # coding: utf-8
"""
Collection of helper functions
"""
import copy
import glob
import os
import os.path
import errno
import shutil
import random
import logging
from typing import Optional, List
import pathlib
import numpy as np
import pkg_resources
import torch
from torch import nn, Tensor
from torch.utils.tensorboard import SummaryWriter
from torchtext.data import Dataset
import yaml
from joeynmt.vocabulary import Vocabulary
from joeynmt.plotting import plot_heatmap
from sacremoses import MosesDetokenizer
class ConfigurationError(Exception):
""" Custom exception for misspecifications of configuration """
def make_model_dir(model_dir: str, overwrite=False) -> str:
"""
Create a new directory for the model.
:param model_dir: path to model directory
:param overwrite: whether to overwrite an existing directory
:return: path to model directory
"""
if os.path.isdir(model_dir):
if not overwrite:
raise FileExistsError(
"Model directory exists and overwriting is disabled.")
# delete previous directory to start with empty dir again
shutil.rmtree(model_dir)
os.makedirs(model_dir)
return model_dir
def make_logger(log_dir: str = None, mode: str = "train") -> str:
"""
Create a logger for logging the training/testing process.
:param log_dir: path to file where log is stored as well
:param mode: log file name. 'train', 'test' or 'translate'
:return: joeynmt version number
"""
logger = logging.getLogger("") # root logger
version = pkg_resources.require("joeynmt")[0].version
# add handlers only once.
if len(logger.handlers) == 0:
logger.setLevel(level=logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(name)s - %(message)s')
if log_dir is not None:
if os.path.exists(log_dir):
log_file = f'{log_dir}/{mode}.log'
fh = logging.FileHandler(log_file)
fh.setLevel(level=logging.DEBUG)
logger.addHandler(fh)
fh.setFormatter(formatter)
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
sh.setFormatter(formatter)
logger.addHandler(sh)
logger.info("Hello! This is Joey-NMT (version %s).", version)
return version
def log_cfg(cfg: dict, prefix: str = "cfg") -> None:
"""
Write configuration to log.
:param cfg: configuration to log
:param prefix: prefix for logging
"""
logger = logging.getLogger(__name__)
for k, v in cfg.items():
if isinstance(v, dict):
p = '.'.join([prefix, k])
log_cfg(v, prefix=p)
else:
p = '.'.join([prefix, k])
logger.info("{:34s} : {}".format(p, v))
def clones(module: nn.Module, n: int) -> nn.ModuleList:
"""
Produce N identical layers. Transformer helper function.
:param module: the module to clone
:param n: clone this many times
:return cloned modules
"""
return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])
def subsequent_mask(size: int) -> Tensor:
"""
Mask out subsequent positions (to prevent attending to future positions)
Transformer helper function.
:param size: size of mask (2nd and 3rd dim)
:return: Tensor with 0s and 1s of shape (1, size, size)
"""
mask = np.triu(np.ones((1, size, size)), k=1).astype('uint8')
return torch.from_numpy(mask) == 0
def set_seed(seed: int) -> None:
"""
Set the random seed for modules torch, numpy and random.
:param seed: random seed
"""
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
if torch.cuda.is_available() and torch.cuda.device_count() > 0:
torch.backends.cudnn.deterministic = True
torch.cuda.manual_seed_all(seed)
def log_data_info(train_data: Dataset, valid_data: Dataset, test_data: Dataset,
src_vocab: Vocabulary, trg_vocab: Vocabulary) -> None:
"""
Log statistics of data and vocabulary.
:param train_data:
:param valid_data:
:param test_data:
:param src_vocab:
:param trg_vocab:
"""
logger = logging.getLogger(__name__)
logger.info("Data set sizes: \n\ttrain %d,\n\tvalid %d,\n\ttest %d",
len(train_data), len(valid_data),
len(test_data) if test_data is not None else 0)
logger.info("First training example:\n\t[SRC] %s\n\t[TRG] %s",
" ".join(vars(train_data[0])['src']),
" ".join(vars(train_data[0])['trg']))
logger.info(
"First 10 words (src): %s",
" ".join('(%d) %s' % (i, t) for i, t in enumerate(src_vocab.itos[:10])))
logger.info(
"First 10 words (trg): %s",
" ".join('(%d) %s' % (i, t) for i, t in enumerate(trg_vocab.itos[:10])))
logger.info("Number of Src words (types): %d", len(src_vocab))
logger.info("Number of Trg words (types): %d", len(trg_vocab))
def load_config(path="configs/default.yaml") -> dict:
"""
Loads and parses a YAML configuration file.
:param path: path to YAML configuration file
:return: configuration dictionary
"""
with open(path, 'r') as ymlfile:
cfg = yaml.safe_load(ymlfile)
return cfg
def bpe_postprocess(string, bpe_type="subword-nmt") -> str:
"""
Post-processor for BPE output. Recombines BPE-split tokens.
:param string:
:param bpe_type: one of {"sentencepiece", "subword-nmt"}
:return: post-processed string
"""
if bpe_type == "sentencepiece":
ret = string.replace(" ", "").replace("▁", " ").strip()
elif bpe_type == "subword-nmt":
ret = string.replace("@@ ", "").strip()
else:
ret = string.strip()
return ret
def store_attention_plots(attentions: np.array,
targets: List[List[str]],
sources: List[List[str]],
output_prefix: str,
indices: List[int],
tb_writer: Optional[SummaryWriter] = None,
steps: int = 0) -> None:
"""
Saves attention plots.
:param attentions: attention scores
:param targets: list of tokenized targets
:param sources: list of tokenized sources
:param output_prefix: prefix for attention plots
:param indices: indices selected for plotting
:param tb_writer: Tensorboard summary writer (optional)
:param steps: current training steps, needed for tb_writer
:param dpi: resolution for images
"""
for i in indices:
if i >= len(sources):
continue
plot_file = "{}.{}.pdf".format(output_prefix, i)
src = sources[i]
trg = targets[i]
attention_scores = attentions[i].T
try:
fig = plot_heatmap(scores=attention_scores,
column_labels=trg,
row_labels=src,
output_path=plot_file,
dpi=100)
if tb_writer is not None:
# lower resolution for tensorboard
fig = plot_heatmap(scores=attention_scores,
column_labels=trg,
row_labels=src,
output_path=None,
dpi=50)
tb_writer.add_figure("attention/{}.".format(i),
fig,
global_step=steps)
# pylint: disable=bare-except
except:
print("Couldn't plot example {}: src len {}, trg len {}, "
"attention scores shape {}".format(i, len(src), len(trg),
attention_scores.shape))
continue
def get_latest_checkpoint(ckpt_dir: str) -> Optional[str]:
"""
Returns the latest checkpoint (by time) from the given directory.
If there is no checkpoint in this directory, returns None
:param ckpt_dir:
:return: latest checkpoint file
"""
list_of_files = glob.glob("{}/*.ckpt".format(ckpt_dir))
latest_checkpoint = None
if list_of_files:
latest_checkpoint = max(list_of_files, key=os.path.getctime)
# check existence
if latest_checkpoint is None:
raise FileNotFoundError(
"No checkpoint found in directory {}.".format(ckpt_dir))
return latest_checkpoint
def load_checkpoint(path: str, use_cuda: bool = True) -> dict:
"""
Load model from saved checkpoint.
:param path: path to checkpoint
:param use_cuda: using cuda or not
:return: checkpoint (dict)
"""
assert os.path.isfile(path), "Checkpoint %s not found" % path
checkpoint = torch.load(path, map_location='cuda' if use_cuda else 'cpu')
return checkpoint
# from onmt
def tile(x: Tensor, count: int, dim=0) -> Tensor:
"""
Tiles x on dimension dim count times. From OpenNMT. Used for beam search.
:param x: tensor to tile
:param count: number of tiles
:param dim: dimension along which the tensor is tiled
:return: tiled tensor
"""
if isinstance(x, tuple):
h, c = x
return tile(h, count, dim=dim), tile(c, count, dim=dim)
perm = list(range(len(x.size())))
if dim != 0:
perm[0], perm[dim] = perm[dim], perm[0]
x = x.permute(perm).contiguous()
out_size = list(x.size())
out_size[0] *= count
batch = x.size(0)
x = x.view(batch, -1) \
.transpose(0, 1) \
.repeat(count, 1) \
.transpose(0, 1) \
.contiguous() \
.view(*out_size)
if dim != 0:
x = x.permute(perm).contiguous()
return x
def freeze_params(module: nn.Module) -> None:
"""
Freeze the parameters of this module,
i.e. do not update them during training
:param module: freeze parameters of this module
"""
for _, p in module.named_parameters():
p.requires_grad = False
def symlink_update(target, link_name):
try:
os.symlink(target, link_name)
except FileExistsError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def latest_checkpoint_update(target: pathlib.Path,
link_name: str) -> Optional[pathlib.Path]:
"""
This function finds the file that the symlink currently points to, sets it
to the new target, and returns the previous target if it exists.
:param target: A path to a file that we want the symlink to point to.
:param link_name: This is the name of the symlink that we want to update.
:return:
- current_last: This is the previous target of the symlink, before it is
updated in this function. If the symlink did not exist before or did
not have a target, None is returned instead.
"""
link = pathlib.Path(link_name)
if link.is_symlink():
current_last = link.resolve()
link.unlink()
link.symlink_to(target)
return current_last
link.symlink_to(target)
return None
def get_sacrebleu_description(cfg: dict) -> dict:
"""
This function extracts sacrebleu description dict from the configuration dict.
:param cfg: the configuration dict
"""
data_config = cfg["data"]
test_config = cfg["testing"]
sacrebleu_description = {"remove_whitespace": True, "tokenize": "13a"}
if "sacrebleu" in test_config.keys():
sacrebleu_description["remove_whitespace"] = test_config["sacrebleu"] \
.get("remove_whitespace", True)
sacrebleu_description["tokenize"] = test_config["sacrebleu"] \
.get("tokenize", "13a")
sacrebleu_description["use_detokenization"] = test_config["sacrebleu"] \
.get("use_detokenization", False)
if sacrebleu_description["use_detokenization"]:
src_detokenizer = MosesDetokenizer(lang=data_config["src"])
trg_detokenizer = MosesDetokenizer(lang=data_config["trg"])
def batch_src_detokenize(sentences):
results = []
for sentence in sentences:
results.append(src_detokenizer.detokenize(sentence.split()))
return results
def batch_trg_detokenize(sentences):
results = []
for sentence in sentences:
results.append(trg_detokenizer.detokenize(sentence.split()))
return results
sacrebleu_description["batch_src_detokenize"] = batch_src_detokenize
sacrebleu_description["batch_trg_detokenize"] = batch_trg_detokenize
return sacrebleu_description
def check_combiner_cfg(combiner_cfg: dict) -> None:
"""
This function is used to validate that the merged combiner config is valid.
:param combiner_cfg: the merged combiner config dict
"""
assert combiner_cfg["type"] in ["no_combiner", "static_combiner",
"dynamic_combiner"], "combiner type %s is not supported currently." % combiner_cfg["type"]
if combiner_cfg["type"] in ["static_combiner", "dynamic_combiner"]:
for key in ["top_k", "kernel"]:
assert combiner_cfg[key] is not None, "%s is needed in %s" % (key, combiner_cfg["type"])
for key in ["index_path", "token_map_path"]:
assert combiner_cfg[key] is not None, "%s is needed in %s" % (key, combiner_cfg["type"])
path = combiner_cfg[key]
assert os.path.exists(path), "%s does not exist" % path
if combiner_cfg["type"] == "static_combiner":
for key in ["mixing_weight", "bandwidth"]:
assert combiner_cfg[key] is not None, "%s is needed in %s" % (key, combiner_cfg["type"])
if combiner_cfg["type"] == "dynamic_combiner":
assert combiner_cfg["embedding_path"] is not None, "%s is needed in %s" % ("embedding_path", combiner_cfg["type"])
path = combiner_cfg["embedding_path"]
assert os.path.exists(path), "%s does not exist" % path | 14,240 | 32.587264 | 122 | py |
KSTER | KSTER-main/joeynmt/combiner_training.py | # coding: utf-8
"""
Training module
"""
import argparse
import time
import shutil
from typing import List
import logging
import os
import sys
import collections
import pathlib
import numpy as np
import torch
from torch import Tensor
from torch.utils.tensorboard import SummaryWriter
from torchtext.data import Dataset
from joeynmt.model import build_model
from joeynmt.batch import Batch
from joeynmt.helpers import log_data_info, load_config, log_cfg, \
store_attention_plots, load_checkpoint, make_model_dir, \
make_logger, set_seed, symlink_update, latest_checkpoint_update, \
ConfigurationError, get_sacrebleu_description, check_combiner_cfg
from joeynmt.model import Model, _DataParallel
from joeynmt.prediction import validate_on_data
from joeynmt.loss import XentLoss
from joeynmt.data import load_data, make_data_iter
from joeynmt.builders import build_optimizer, build_scheduler, \
build_gradient_clipper
from joeynmt.prediction import test
from joeynmt.combiners import build_combiner
# for fp16 training
try:
from apex import amp
amp.register_half_function(torch, "einsum")
except ImportError as no_apex:
# error handling in CombinerTrainManager object construction
pass
logger = logging.getLogger(__name__)
# pylint: disable=too-many-instance-attributes
class CombinerTrainManager:
""" Manages training loop, validations, learning rate scheduling
and early stopping."""
def __init__(self, model: Model, config: dict,
batch_class: Batch = Batch) -> None:
"""
Creates a new CombinerTrainManager for a model, specified as in configuration.
:param model: torch module defining the model
:param config: dictionary containing the training configurations
:param batch_class: batch class to encapsulate the torch class
"""
combiner_train_config = config["combiner_training"]
self.batch_class = batch_class
# files for logging and storing
self.model_dir = combiner_train_config["model_dir"]
assert os.path.exists(self.model_dir)
self.logging_freq = combiner_train_config.get("logging_freq", 100)
self.valid_report_file = "{}/validations.txt".format(self.model_dir)
self.tb_writer = SummaryWriter(log_dir=self.model_dir + "/tensorboard/")
self.save_latest_checkpoint = combiner_train_config.get("save_latest_ckpt", True)
# model
self.model = model
self._log_parameters_list()
# objective
self.label_smoothing = combiner_train_config.get("label_smoothing", 0.0)
self.model.loss_function = XentLoss(pad_index=self.model.pad_index,
smoothing=self.label_smoothing)
self.normalization = combiner_train_config.get("normalization", "batch")
if self.normalization not in ["batch", "tokens", "none"]:
raise ConfigurationError("Invalid normalization option."
"Valid options: "
"'batch', 'tokens', 'none'.")
# optimization
self.learning_rate_min = combiner_train_config.get("learning_rate_min", 1.0e-8)
self.clip_grad_fun = build_gradient_clipper(config=combiner_train_config)
self.optimizer = build_optimizer(config=combiner_train_config,
parameters=self.model.combiner.parameters())
# validation & early stopping
self.validation_freq = combiner_train_config.get("validation_freq", 1000)
self.log_valid_sents = combiner_train_config.get("print_valid_sents", [0, 1, 2])
self.ckpt_queue = collections.deque(
maxlen=combiner_train_config.get("keep_last_ckpts", 5))
self.eval_metric = combiner_train_config.get("eval_metric", "bleu")
if self.eval_metric not in [
'bleu', 'chrf', 'token_accuracy', 'sequence_accuracy'
]:
raise ConfigurationError("Invalid setting for 'eval_metric', "
"valid options: 'bleu', 'chrf', "
"'token_accuracy', 'sequence_accuracy'.")
self.early_stopping_metric = combiner_train_config.get("early_stopping_metric",
"eval_metric")
# early_stopping_metric decides on how to find the early stopping point:
# ckpts are written when there's a new high/low score for this metric.
# If we schedule after BLEU/chrf/accuracy, we want to maximize the
# score, else we want to minimize it.
if self.early_stopping_metric in ["ppl", "loss"]:
self.minimize_metric = True
elif self.early_stopping_metric == "eval_metric":
if self.eval_metric in [
"bleu", "chrf", "token_accuracy", "sequence_accuracy"
]:
self.minimize_metric = False
# eval metric that has to get minimized (not yet implemented)
else:
self.minimize_metric = True
else:
raise ConfigurationError(
"Invalid setting for 'early_stopping_metric', "
"valid options: 'loss', 'ppl', 'eval_metric'.")
# eval options
test_config = config["testing"]
self.bpe_type = test_config.get("bpe_type", "subword-nmt")
self.sacrebleu = get_sacrebleu_description(config)
# learning rate scheduling
self.scheduler, self.scheduler_step_at = build_scheduler(
config=combiner_train_config,
scheduler_mode="min" if self.minimize_metric else "max",
optimizer=self.optimizer,
hidden_size=config["model"]["encoder"]["hidden_size"])
# data & batch handling
self.level = config["data"]["level"]
if self.level not in ["word", "bpe", "char"]:
raise ConfigurationError("Invalid segmentation level. "
"Valid options: 'word', 'bpe', 'char'.")
self.shuffle = combiner_train_config.get("shuffle", True)
self.epochs = combiner_train_config["epochs"]
self.batch_size = combiner_train_config["batch_size"]
# Placeholder so that we can use the train_iter in other functions.
self.train_iter = None
self.train_iter_state = None
# per-device batch_size = self.batch_size // self.n_gpu
self.batch_type = combiner_train_config.get("batch_type", "sentence")
self.eval_batch_size = combiner_train_config.get("eval_batch_size",
self.batch_size)
# per-device eval_batch_size = self.eval_batch_size // self.n_gpu
self.eval_batch_type = combiner_train_config.get("eval_batch_type",
self.batch_type)
self.batch_multiplier = combiner_train_config.get("batch_multiplier", 1)
# generation
self.max_output_length = combiner_train_config.get("max_output_length", None)
# CPU / GPU
self.use_cuda = combiner_train_config["use_cuda"] and torch.cuda.is_available()
self.n_gpu = torch.cuda.device_count() if self.use_cuda else 0
self.device = torch.device("cuda" if self.use_cuda else "cpu")
if self.use_cuda:
self.model.to(self.device)
# fp16
self.fp16 = combiner_train_config.get("fp16", False)
if self.fp16:
if 'apex' not in sys.modules:
raise ImportError("Please install apex from "
"https://www.github.com/nvidia/apex "
"to use fp16 training.") from no_apex
self.model, self.optimizer = amp.initialize(self.model,
self.optimizer,
opt_level='O1')
# opt level: one of {"O0", "O1", "O2", "O3"}
# see https://nvidia.github.io/apex/amp.html#opt-levels
# initialize training statistics
self.stats = self.TrainStatistics(
steps=0,
stop=False,
total_tokens=0,
best_ckpt_iter=0,
best_ckpt_score=np.inf if self.minimize_metric else -np.inf,
minimize_metric=self.minimize_metric,
max_steps=combiner_train_config["max_steps"])
# model parameters
if "load_model" in combiner_train_config.keys():
self.init_from_checkpoint(
combiner_train_config["load_model"],
reset_best_ckpt=combiner_train_config.get("reset_best_ckpt", False),
reset_scheduler=combiner_train_config.get("reset_scheduler", False),
reset_optimizer=combiner_train_config.get("reset_optimizer", False),
reset_iter_state=combiner_train_config.get("reset_iter_state", False))
# multi-gpu training (should be after apex fp16 initialization)
if self.n_gpu > 1:
self.model = _DataParallel(self.model)
def _save_checkpoint(self, new_best: bool = True) -> None:
"""
Save the model's current parameters and the training state to a
checkpoint.
The training state contains the total number of training steps,
the total number of training tokens,
the best checkpoint score and iteration so far,
and optimizer and scheduler states.
:param new_best: This boolean signals which symlink we will use for the
new checkpoint. If it is true, we update best.ckpt, else latest.ckpt.
"""
combiner_path = os.path.join(self.model_dir,
"{}.ckpt".format(self.stats.steps))
combiner_state_dict = self.model.combiner.module.state_dict() \
if isinstance(self.model.combiner, torch.nn.DataParallel) \
else self.model.combiner.state_dict()
state = {
"steps":
self.stats.steps,
"total_tokens":
self.stats.total_tokens,
"best_ckpt_score":
self.stats.best_ckpt_score,
"best_ckpt_iteration":
self.stats.best_ckpt_iter,
"model_state":
combiner_state_dict,
"optimizer_state":
self.optimizer.state_dict(),
"scheduler_state":
self.scheduler.state_dict() if self.scheduler is not None else None,
'amp_state':
amp.state_dict() if self.fp16 else None,
"train_iter_state":
self.train_iter.state_dict()
}
torch.save(state, combiner_path)
symlink_target = "{}.ckpt".format(self.stats.steps)
if new_best:
if len(self.ckpt_queue) == self.ckpt_queue.maxlen:
to_delete = self.ckpt_queue.popleft() # delete oldest ckpt
try:
os.remove(to_delete)
except FileNotFoundError:
logger.warning(
"Wanted to delete old checkpoint %s but "
"file does not exist.", to_delete)
self.ckpt_queue.append(combiner_path)
best_path = "{}/best.ckpt".format(self.model_dir)
try:
# create/modify symbolic link for best checkpoint
symlink_update(symlink_target, best_path)
except OSError:
# overwrite best.ckpt
torch.save(state, best_path)
if self.save_latest_checkpoint:
last_path = "{}/latest.ckpt".format(self.model_dir)
previous_path = latest_checkpoint_update(symlink_target, last_path)
# If the last ckpt is in the ckpt_queue, we don't want to delete it.
can_delete = True
for ckpt_path in self.ckpt_queue:
if pathlib.Path(ckpt_path).resolve() == previous_path:
can_delete = False
break
if can_delete and previous_path is not None:
os.remove(previous_path)
def init_from_checkpoint(self,
path: str,
reset_best_ckpt: bool = False,
reset_scheduler: bool = False,
reset_optimizer: bool = False,
reset_iter_state: bool = False) -> None:
"""
Initialize the trainer from a given checkpoint file.
This checkpoint file contains not only model parameters, but also
scheduler and optimizer states, see `self._save_checkpoint`.
:param path: path to checkpoint
:param reset_best_ckpt: reset tracking of the best checkpoint,
use for domain adaptation with a new dev
set or when using a new metric for fine-tuning.
:param reset_scheduler: reset the learning rate scheduler, and do not
use the one stored in the checkpoint.
:param reset_optimizer: reset the optimizer, and do not use the one
stored in the checkpoint.
:param reset_iter_state: reset the sampler's internal state and do not
use the one stored in the checkpoint.
"""
logger.info("Loading model from %s", path)
model_checkpoint = load_checkpoint(path=path, use_cuda=self.use_cuda)
# restore model and optimizer parameters
self.model.load_state_dict(model_checkpoint["model_state"])
if not reset_optimizer:
self.optimizer.load_state_dict(model_checkpoint["optimizer_state"])
else:
logger.info("Reset optimizer.")
if not reset_scheduler:
if model_checkpoint["scheduler_state"] is not None and \
self.scheduler is not None:
self.scheduler.load_state_dict(
model_checkpoint["scheduler_state"])
else:
logger.info("Reset scheduler.")
# restore counts
self.stats.steps = model_checkpoint["steps"]
self.stats.total_tokens = model_checkpoint["total_tokens"]
if not reset_best_ckpt:
self.stats.best_ckpt_score = model_checkpoint["best_ckpt_score"]
self.stats.best_ckpt_iter = model_checkpoint["best_ckpt_iteration"]
else:
logger.info("Reset tracking of the best checkpoint.")
if (not reset_iter_state
and model_checkpoint.get('train_iter_state', None) is not None):
self.train_iter_state = model_checkpoint["train_iter_state"]
# move parameters to cuda
if self.use_cuda:
self.model.to(self.device)
# fp16
if self.fp16 and model_checkpoint.get("amp_state", None) is not None:
amp.load_state_dict(model_checkpoint['amp_state'])
# pylint: disable=unnecessary-comprehension
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
def train_and_validate(self, train_data: Dataset, valid_data: Dataset) \
-> None:
"""
Train the model and validate it from time to time on the validation set.
:param train_data: training data
:param valid_data: validation data
"""
self.train_iter = make_data_iter(train_data,
batch_size=self.batch_size,
batch_type=self.batch_type,
train=True,
shuffle=self.shuffle)
if self.train_iter_state is not None:
self.train_iter.load_state_dict(self.train_iter_state)
#################################################################
# simplify accumulation logic:
#################################################################
# for epoch in range(epochs):
# self.model.zero_grad()
# epoch_loss = 0.0
# batch_loss = 0.0
# for i, batch in enumerate(iter(self.train_iter)):
#
# # gradient accumulation:
# # loss.backward() inside _train_step()
# batch_loss += self._train_step(inputs)
#
# if (i + 1) % self.batch_multiplier == 0:
# self.optimizer.step() # update!
# self.model.zero_grad() # reset gradients
# self.steps += 1 # increment counter
#
# epoch_loss += batch_loss # accumulate batch loss
# batch_loss = 0 # reset batch loss
#
# # leftovers are just ignored.
#################################################################
logger.info(
"Train stats:\n"
"\tdevice: %s\n"
"\tn_gpu: %d\n"
"\t16-bits training: %r\n"
"\tgradient accumulation: %d\n"
"\tbatch size per device: %d\n"
"\ttotal batch size (w. parallel & accumulation): %d", self.device,
self.n_gpu, self.fp16, self.batch_multiplier, self.batch_size //
self.n_gpu if self.n_gpu > 1 else self.batch_size,
self.batch_size * self.batch_multiplier)
self.model.eval()
for epoch_no in range(self.epochs):
logger.info("EPOCH %d", epoch_no + 1)
if self.scheduler is not None and self.scheduler_step_at == "epoch":
self.scheduler.step(epoch=epoch_no)
self.model.combiner.train()
# Reset statistics for each epoch.
start = time.time()
total_valid_duration = 0
start_tokens = self.stats.total_tokens
self.model.combiner.zero_grad()
epoch_loss = 0
batch_loss = 0
for i, batch in enumerate(iter(self.train_iter)):
# create a Batch object from torchtext batch
batch = self.batch_class(batch, self.model.pad_index,
use_cuda=self.use_cuda)
# get batch loss
batch_loss += self._train_step(batch)
# update!
if (i + 1) % self.batch_multiplier == 0:
# clip gradients (in-place)
if self.clip_grad_fun is not None:
if self.fp16:
self.clip_grad_fun(
params=amp.master_params(self.optimizer))
else:
self.clip_grad_fun(params=self.model.combiner.parameters())
# make gradient step
self.optimizer.step()
# decay lr
if self.scheduler is not None \
and self.scheduler_step_at == "step":
self.scheduler.step()
# reset gradients
self.model.combiner.zero_grad()
# increment step counter
self.stats.steps += 1
if self.stats.steps >= self.stats.max_steps:
self.stats.stop = True
# log learning progress
if self.stats.steps % self.logging_freq == 0:
self.tb_writer.add_scalar("train/train_batch_loss",
batch_loss, self.stats.steps)
elapsed = time.time() - start - total_valid_duration
elapsed_tokens = self.stats.total_tokens - start_tokens
logger.info(
"Epoch %3d, Step: %8d, Batch Loss: %12.6f, "
"Tokens per Sec: %8.0f, Lr: %.6f", epoch_no + 1,
self.stats.steps, batch_loss,
elapsed_tokens / elapsed,
self.optimizer.param_groups[0]["lr"])
start = time.time()
total_valid_duration = 0
start_tokens = self.stats.total_tokens
# Only add complete loss of full mini-batch to epoch_loss
epoch_loss += batch_loss # accumulate epoch_loss
batch_loss = 0 # rest batch_loss
# validate on the entire dev set
if self.stats.steps % self.validation_freq == 0:
valid_duration = self._validate(valid_data, epoch_no)
total_valid_duration += valid_duration
if self.stats.stop:
break
if self.stats.stop:
logger.info('Training ended since minimum lr %f was reached.',
self.learning_rate_min)
break
logger.info('Epoch %3d: total training loss %.2f', epoch_no + 1,
epoch_loss)
else:
logger.info('Training ended after %3d epochs.', epoch_no + 1)
logger.info('Best validation result (greedy) at step %8d: %6.2f %s.',
self.stats.best_ckpt_iter, self.stats.best_ckpt_score,
self.early_stopping_metric)
self.tb_writer.close() # close Tensorboard writer
def _train_step(self, batch: Batch) -> Tensor:
"""
Train the model on one batch: Compute the loss.
:param batch: training batch
:return: loss for batch (sum)
"""
# reactivate training
self.model.combiner.train()
# get loss
batch_loss, _, _, _ = self.model(return_type="combiner_loss", **vars(batch))
# sum multi-gpu losses
if self.n_gpu > 1:
batch_loss = batch_loss.sum()
# normalize batch loss
if self.normalization == "batch":
normalizer = batch.nseqs
elif self.normalization == "tokens":
normalizer = batch.ntokens
elif self.normalization == "none":
normalizer = 1
else:
raise NotImplementedError("Only normalize by 'batch' or 'tokens' "
"or summation of loss 'none' implemented")
norm_batch_loss = batch_loss / normalizer
if self.n_gpu > 1:
norm_batch_loss = norm_batch_loss / self.n_gpu
if self.batch_multiplier > 1:
norm_batch_loss = norm_batch_loss / self.batch_multiplier
# accumulate gradients
if self.fp16:
with amp.scale_loss(norm_batch_loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
norm_batch_loss.backward()
# increment token counter
self.stats.total_tokens += batch.ntokens
return norm_batch_loss.item()
def _validate(self, valid_data, epoch_no):
valid_start_time = time.time()
valid_score, valid_loss, valid_ppl, valid_sources, \
valid_sources_raw, valid_references, valid_hypotheses, \
valid_hypotheses_raw, valid_attention_scores = \
validate_on_data(
batch_size=self.eval_batch_size,
batch_class=self.batch_class,
data=valid_data,
eval_metric=self.eval_metric,
level=self.level, model=self.model,
use_cuda=self.use_cuda,
max_output_length=self.max_output_length,
compute_loss=True,
beam_size=1, # greedy validations
batch_type=self.eval_batch_type,
postprocess=True, # always remove BPE for validation
bpe_type=self.bpe_type, # "subword-nmt" or "sentencepiece"
sacrebleu=self.sacrebleu, # sacrebleu options
n_gpu=self.n_gpu
)
self.tb_writer.add_scalar("valid/valid_loss", valid_loss,
self.stats.steps)
self.tb_writer.add_scalar("valid/valid_score", valid_score,
self.stats.steps)
self.tb_writer.add_scalar("valid/valid_ppl", valid_ppl,
self.stats.steps)
if self.early_stopping_metric == "loss":
ckpt_score = valid_loss
elif self.early_stopping_metric in ["ppl", "perplexity"]:
ckpt_score = valid_ppl
else:
ckpt_score = valid_score
if self.scheduler is not None \
and self.scheduler_step_at == "validation":
self.scheduler.step(ckpt_score)
new_best = False
if self.stats.is_best(ckpt_score):
self.stats.best_ckpt_score = ckpt_score
self.stats.best_ckpt_iter = self.stats.steps
logger.info('Hooray! New best validation result [%s]!',
self.early_stopping_metric)
if self.ckpt_queue.maxlen > 0:
logger.info("Saving new checkpoint.")
new_best = True
self._save_checkpoint(new_best)
elif self.save_latest_checkpoint:
self._save_checkpoint(new_best)
# append to validation report
self._add_report(valid_score=valid_score,
valid_loss=valid_loss,
valid_ppl=valid_ppl,
eval_metric=self.eval_metric,
new_best=new_best)
self._log_examples(sources_raw=[v for v in valid_sources_raw],
sources=valid_sources,
hypotheses_raw=valid_hypotheses_raw,
hypotheses=valid_hypotheses,
references=valid_references)
valid_duration = time.time() - valid_start_time
logger.info(
'Validation result (greedy) at epoch %3d, '
'step %8d: %s: %6.2f, loss: %8.4f, ppl: %8.4f, '
'duration: %.4fs', epoch_no + 1, self.stats.steps, self.eval_metric,
valid_score, valid_loss, valid_ppl, valid_duration)
# store validation set outputs
self._store_outputs(valid_hypotheses)
# store attention plots for selected valid sentences
if valid_attention_scores:
store_attention_plots(attentions=valid_attention_scores,
targets=valid_hypotheses_raw,
sources=[s for s in valid_data.src],
indices=self.log_valid_sents,
output_prefix="{}/att.{}".format(
self.model_dir, self.stats.steps),
tb_writer=self.tb_writer,
steps=self.stats.steps)
return valid_duration
def _add_report(self,
valid_score: float,
valid_ppl: float,
valid_loss: float,
eval_metric: str,
new_best: bool = False) -> None:
"""
Append a one-line report to validation logging file.
:param valid_score: validation evaluation score [eval_metric]
:param valid_ppl: validation perplexity
:param valid_loss: validation loss (sum over whole validation set)
:param eval_metric: evaluation metric, e.g. "bleu"
:param new_best: whether this is a new best model
"""
current_lr = -1
# ignores other param groups for now
for param_group in self.optimizer.param_groups:
current_lr = param_group['lr']
if current_lr < self.learning_rate_min:
self.stats.stop = True
with open(self.valid_report_file, 'a') as opened_file:
opened_file.write(
"Steps: {}\tLoss: {:.5f}\tPPL: {:.5f}\t{}: {:.5f}\t"
"LR: {:.8f}\t{}\n".format(self.stats.steps, valid_loss,
valid_ppl, eval_metric, valid_score,
current_lr, "*" if new_best else ""))
def _log_parameters_list(self) -> None:
"""
Write all model parameters (name, shape) to the log.
"""
combiner_parameters = filter(lambda p: p.requires_grad,
self.model.combiner.parameters())
n_params = sum([np.prod(p.size()) for p in combiner_parameters])
logger.info("Total params: %d", n_params)
trainable_params = [
n for (n, p) in self.model.combiner.named_parameters() if p.requires_grad
]
logger.debug("Trainable parameters: %s", sorted(trainable_params))
assert trainable_params
def _log_examples(self,
sources: List[str],
hypotheses: List[str],
references: List[str],
sources_raw: List[List[str]] = None,
hypotheses_raw: List[List[str]] = None,
references_raw: List[List[str]] = None) -> None:
"""
Log a the first `self.log_valid_sents` sentences from given examples.
:param sources: decoded sources (list of strings)
:param hypotheses: decoded hypotheses (list of strings)
:param references: decoded references (list of strings)
:param sources_raw: raw sources (list of list of tokens)
:param hypotheses_raw: raw hypotheses (list of list of tokens)
:param references_raw: raw references (list of list of tokens)
"""
for p in self.log_valid_sents:
if p >= len(sources):
continue
logger.info("Example #%d", p)
if sources_raw is not None:
logger.debug("\tRaw source: %s", sources_raw[p])
if references_raw is not None:
logger.debug("\tRaw reference: %s", references_raw[p])
if hypotheses_raw is not None:
logger.debug("\tRaw hypothesis: %s", hypotheses_raw[p])
logger.info("\tSource: %s", sources[p])
logger.info("\tReference: %s", references[p])
logger.info("\tHypothesis: %s", hypotheses[p])
def _store_outputs(self, hypotheses: List[str]) -> None:
"""
Write current validation outputs to file in `self.model_dir.`
:param hypotheses: list of strings
"""
current_valid_output_file = "{}/{}.hyps".format(self.model_dir,
self.stats.steps)
with open(current_valid_output_file, 'w') as opened_file:
for hyp in hypotheses:
opened_file.write("{}\n".format(hyp))
class TrainStatistics:
def __init__(self,
steps: int = 0,
stop: bool = False,
total_tokens: int = 0,
best_ckpt_iter: int = 0,
best_ckpt_score: float = np.inf,
minimize_metric: bool = True,
max_steps: int = 200000) -> None:
# global update step counter
self.steps = steps
# stop training if this flag is True
# by reaching learning rate minimum
self.stop = stop
# number of total tokens seen so far
self.total_tokens = total_tokens
# store iteration point of best ckpt
self.best_ckpt_iter = best_ckpt_iter
# initial values for best scores
self.best_ckpt_score = best_ckpt_score
# minimize or maximize score
self.minimize_metric = minimize_metric
self.max_steps = max_steps
def is_best(self, score):
if self.minimize_metric:
is_best = score < self.best_ckpt_score
else:
is_best = score > self.best_ckpt_score
return is_best
def combiner_train(cfg_file: str, ckpt: str, combiner_cfg: dict) -> None:
"""
Main training function. After training, also test on test data if given.
:param cfg_file: path to configuration yaml file
"""
cfg = load_config(cfg_file)
# make logger
model_dir = make_model_dir(cfg["combiner_training"]["model_dir"],
overwrite=cfg["combiner_training"].get(
"overwrite", False))
_ = make_logger(model_dir, mode="train") # version string returned
# TODO: save version number in model checkpoints
check_combiner_cfg(combiner_cfg)
cfg["combiner"] = combiner_cfg
# set the random seed
set_seed(seed=cfg["combiner_training"].get("random_seed", 42))
# load the data
train_data, dev_data, test_data, src_vocab, trg_vocab = load_data(
data_cfg=cfg["data"])
# load model state from disk
use_cuda = cfg["combiner_training"]["use_cuda"]
model_checkpoint = load_checkpoint(ckpt, use_cuda=use_cuda)
# build model and load parameters into it
model = build_model(cfg["model"], src_vocab=src_vocab, trg_vocab=trg_vocab)
model.load_state_dict(model_checkpoint["model_state"])
for p in model.parameters():
p.requires_grad = False
combiner = build_combiner(cfg)
# load combiner from checkpoint for dynamic combiners
model.combiner = combiner
# for training management, e.g. early stopping and model selection
trainer = CombinerTrainManager(model=model, config=cfg)
# store copy of original training config in model dir
shutil.copy2(cfg_file, model_dir + "/config.yaml")
# log all entries of config
log_cfg(cfg)
log_data_info(train_data=train_data,
valid_data=dev_data,
test_data=test_data,
src_vocab=src_vocab,
trg_vocab=trg_vocab)
logger.info(str(model))
logger.info(str(combiner))
# store the vocabs
src_vocab_file = "{}/src_vocab.txt".format(cfg["combiner_training"]["model_dir"])
src_vocab.to_file(src_vocab_file)
trg_vocab_file = "{}/trg_vocab.txt".format(cfg["combiner_training"]["model_dir"])
trg_vocab.to_file(trg_vocab_file)
# train the model
trainer.train_and_validate(train_data=train_data, valid_data=dev_data) | 34,477 | 40.893074 | 89 | py |
KSTER | KSTER-main/joeynmt/decoders.py | # coding: utf-8
"""
Various decoders
"""
from typing import Optional
import torch
import torch.nn as nn
from torch import Tensor
from joeynmt.attention import BahdanauAttention, LuongAttention
from joeynmt.encoders import Encoder
from joeynmt.helpers import freeze_params, ConfigurationError, subsequent_mask
from joeynmt.transformer_layers import PositionalEncoding, \
TransformerDecoderLayer
# pylint: disable=abstract-method
class Decoder(nn.Module):
"""
Base decoder class
"""
@property
def output_size(self):
"""
Return the output size (size of the target vocabulary)
:return:
"""
return self._output_size
# pylint: disable=arguments-differ,too-many-arguments
# pylint: disable=too-many-instance-attributes, unused-argument
class RecurrentDecoder(Decoder):
"""A conditional RNN decoder with attention."""
def __init__(self,
rnn_type: str = "gru",
emb_size: int = 0,
hidden_size: int = 0,
encoder: Encoder = None,
attention: str = "bahdanau",
num_layers: int = 1,
vocab_size: int = 0,
dropout: float = 0.,
emb_dropout: float = 0.,
hidden_dropout: float = 0.,
init_hidden: str = "bridge",
input_feeding: bool = True,
freeze: bool = False,
**kwargs) -> None:
"""
Create a recurrent decoder with attention.
:param rnn_type: rnn type, valid options: "lstm", "gru"
:param emb_size: target embedding size
:param hidden_size: size of the RNN
:param encoder: encoder connected to this decoder
:param attention: type of attention, valid options: "bahdanau", "luong"
:param num_layers: number of recurrent layers
:param vocab_size: target vocabulary size
:param hidden_dropout: Is applied to the input to the attentional layer.
:param dropout: Is applied between RNN layers.
:param emb_dropout: Is applied to the RNN input (word embeddings).
:param init_hidden: If "bridge" (default), the decoder hidden states are
initialized from a projection of the last encoder state,
if "zeros" they are initialized with zeros,
if "last" they are identical to the last encoder state
(only if they have the same size)
:param input_feeding: Use Luong's input feeding.
:param freeze: Freeze the parameters of the decoder during training.
:param kwargs:
"""
super().__init__()
self.emb_dropout = torch.nn.Dropout(p=emb_dropout, inplace=False)
self.type = rnn_type
self.hidden_dropout = torch.nn.Dropout(p=hidden_dropout, inplace=False)
self.hidden_size = hidden_size
self.emb_size = emb_size
rnn = nn.GRU if rnn_type == "gru" else nn.LSTM
self.input_feeding = input_feeding
if self.input_feeding: # Luong-style
# combine embedded prev word +attention vector before feeding to rnn
self.rnn_input_size = emb_size + hidden_size
else:
# just feed prev word embedding
self.rnn_input_size = emb_size
# the decoder RNN
self.rnn = rnn(self.rnn_input_size, hidden_size, num_layers,
batch_first=True,
dropout=dropout if num_layers > 1 else 0.)
# combine output with context vector before output layer (Luong-style)
self.att_vector_layer = nn.Linear(
hidden_size + encoder.output_size, hidden_size, bias=True)
self.output_layer = nn.Linear(hidden_size, vocab_size, bias=False)
self._output_size = vocab_size
if attention == "bahdanau":
self.attention = BahdanauAttention(hidden_size=hidden_size,
key_size=encoder.output_size,
query_size=hidden_size)
elif attention == "luong":
self.attention = LuongAttention(hidden_size=hidden_size,
key_size=encoder.output_size)
else:
raise ConfigurationError("Unknown attention mechanism: %s. "
"Valid options: 'bahdanau', 'luong'."
% attention)
self.num_layers = num_layers
self.hidden_size = hidden_size
# to initialize from the final encoder state of last layer
self.init_hidden_option = init_hidden
if self.init_hidden_option == "bridge":
self.bridge_layer = nn.Linear(
encoder.output_size, hidden_size, bias=True)
elif self.init_hidden_option == "last":
if encoder.output_size != self.hidden_size:
if encoder.output_size != 2*self.hidden_size: # bidirectional
raise ConfigurationError(
"For initializing the decoder state with the "
"last encoder state, their sizes have to match "
"(encoder: {} vs. decoder: {})".format(
encoder.output_size, self.hidden_size))
if freeze:
freeze_params(self)
def _check_shapes_input_forward_step(self,
prev_embed: Tensor,
prev_att_vector: Tensor,
encoder_output: Tensor,
src_mask: Tensor,
hidden: Tensor) -> None:
"""
Make sure the input shapes to `self._forward_step` are correct.
Same inputs as `self._forward_step`.
:param prev_embed:
:param prev_att_vector:
:param encoder_output:
:param src_mask:
:param hidden:
"""
assert prev_embed.shape[1:] == torch.Size([1, self.emb_size])
assert prev_att_vector.shape[1:] == torch.Size(
[1, self.hidden_size])
assert prev_att_vector.shape[0] == prev_embed.shape[0]
assert encoder_output.shape[0] == prev_embed.shape[0]
assert len(encoder_output.shape) == 3
assert src_mask.shape[0] == prev_embed.shape[0]
assert src_mask.shape[1] == 1
assert src_mask.shape[2] == encoder_output.shape[1]
if isinstance(hidden, tuple): # for lstm
hidden = hidden[0]
assert hidden.shape[0] == self.num_layers
assert hidden.shape[1] == prev_embed.shape[0]
assert hidden.shape[2] == self.hidden_size
def _check_shapes_input_forward(self,
trg_embed: Tensor,
encoder_output: Tensor,
encoder_hidden: Tensor,
src_mask: Tensor,
hidden: Tensor = None,
prev_att_vector: Tensor = None) -> None:
"""
Make sure that inputs to `self.forward` are of correct shape.
Same input semantics as for `self.forward`.
:param trg_embed:
:param encoder_output:
:param encoder_hidden:
:param src_mask:
:param hidden:
:param prev_att_vector:
"""
assert len(encoder_output.shape) == 3
if encoder_hidden is not None:
assert len(encoder_hidden.shape) == 2
assert encoder_hidden.shape[-1] == encoder_output.shape[-1]
assert src_mask.shape[1] == 1
assert src_mask.shape[0] == encoder_output.shape[0]
assert src_mask.shape[2] == encoder_output.shape[1]
assert trg_embed.shape[0] == encoder_output.shape[0]
assert trg_embed.shape[2] == self.emb_size
if hidden is not None:
if isinstance(hidden, tuple): # for lstm
hidden = hidden[0]
assert hidden.shape[1] == encoder_output.shape[0]
assert hidden.shape[2] == self.hidden_size
if prev_att_vector is not None:
assert prev_att_vector.shape[0] == encoder_output.shape[0]
assert prev_att_vector.shape[2] == self.hidden_size
assert prev_att_vector.shape[1] == 1
def _forward_step(self,
prev_embed: Tensor,
prev_att_vector: Tensor, # context or att vector
encoder_output: Tensor,
src_mask: Tensor,
hidden: Tensor) -> (Tensor, Tensor, Tensor):
"""
Perform a single decoder step (1 token).
1. `rnn_input`: concat(prev_embed, prev_att_vector [possibly empty])
2. update RNN with `rnn_input`
3. calculate attention and context/attention vector
:param prev_embed: embedded previous token,
shape (batch_size, 1, embed_size)
:param prev_att_vector: previous attention vector,
shape (batch_size, 1, hidden_size)
:param encoder_output: encoder hidden states for attention context,
shape (batch_size, src_length, encoder.output_size)
:param src_mask: src mask, 1s for area before <eos>, 0s elsewhere
shape (batch_size, 1, src_length)
:param hidden: previous hidden state,
shape (num_layers, batch_size, hidden_size)
:return:
- att_vector: new attention vector (batch_size, 1, hidden_size),
- hidden: new hidden state with shape (batch_size, 1, hidden_size),
- att_probs: attention probabilities (batch_size, 1, src_len)
"""
# shape checks
self._check_shapes_input_forward_step(prev_embed=prev_embed,
prev_att_vector=prev_att_vector,
encoder_output=encoder_output,
src_mask=src_mask,
hidden=hidden)
if self.input_feeding:
# concatenate the input with the previous attention vector
rnn_input = torch.cat([prev_embed, prev_att_vector], dim=2)
else:
rnn_input = prev_embed
rnn_input = self.emb_dropout(rnn_input)
# rnn_input: batch x 1 x emb+2*enc_size
_, hidden = self.rnn(rnn_input, hidden)
# use new (top) decoder layer as attention query
if isinstance(hidden, tuple):
query = hidden[0][-1].unsqueeze(1)
else:
query = hidden[-1].unsqueeze(1) # [#layers, B, D] -> [B, 1, D]
# compute context vector using attention mechanism
# only use last layer for attention mechanism
# key projections are pre-computed
context, att_probs = self.attention(
query=query, values=encoder_output, mask=src_mask)
# return attention vector (Luong)
# combine context with decoder hidden state before prediction
att_vector_input = torch.cat([query, context], dim=2)
# batch x 1 x 2*enc_size+hidden_size
att_vector_input = self.hidden_dropout(att_vector_input)
att_vector = torch.tanh(self.att_vector_layer(att_vector_input))
# output: batch x 1 x hidden_size
return att_vector, hidden, att_probs
def forward(self,
trg_embed: Tensor,
encoder_output: Tensor,
encoder_hidden: Tensor,
src_mask: Tensor,
unroll_steps: int,
hidden: Tensor = None,
prev_att_vector: Tensor = None,
**kwargs) \
-> (Tensor, Tensor, Tensor, Tensor):
"""
Unroll the decoder one step at a time for `unroll_steps` steps.
For every step, the `_forward_step` function is called internally.
During training, the target inputs (`trg_embed') are already known for
the full sequence, so the full unrol is done.
In this case, `hidden` and `prev_att_vector` are None.
For inference, this function is called with one step at a time since
embedded targets are the predictions from the previous time step.
In this case, `hidden` and `prev_att_vector` are fed from the output
of the previous call of this function (from the 2nd step on).
`src_mask` is needed to mask out the areas of the encoder states that
should not receive any attention,
which is everything after the first <eos>.
The `encoder_output` are the hidden states from the encoder and are
used as context for the attention.
The `encoder_hidden` is the last encoder hidden state that is used to
initialize the first hidden decoder state
(when `self.init_hidden_option` is "bridge" or "last").
:param trg_embed: embedded target inputs,
shape (batch_size, trg_length, embed_size)
:param encoder_output: hidden states from the encoder,
shape (batch_size, src_length, encoder.output_size)
:param encoder_hidden: last state from the encoder,
shape (batch_size, encoder.output_size)
:param src_mask: mask for src states: 0s for padded areas,
1s for the rest, shape (batch_size, 1, src_length)
:param unroll_steps: number of steps to unroll the decoder RNN
:param hidden: previous decoder hidden state,
if not given it's initialized as in `self.init_hidden`,
shape (batch_size, num_layers, hidden_size)
:param prev_att_vector: previous attentional vector,
if not given it's initialized with zeros,
shape (batch_size, 1, hidden_size)
:return:
- outputs: shape (batch_size, unroll_steps, vocab_size),
- hidden: last hidden state (batch_size, num_layers, hidden_size),
- att_probs: attention probabilities
with shape (batch_size, unroll_steps, src_length),
- att_vectors: attentional vectors
with shape (batch_size, unroll_steps, hidden_size)
"""
# initialize decoder hidden state from final encoder hidden state
if hidden is None and encoder_hidden is not None:
hidden = self._init_hidden(encoder_hidden)
else:
# DataParallel splits batch along the 0th dim.
# Place back the batch_size to the 1st dim here.
if isinstance(hidden, tuple):
h, c = hidden
hidden = (h.permute(1, 0, 2).contiguous(),
c.permute(1, 0, 2).contiguous())
else:
hidden = hidden.permute(1, 0, 2).contiguous()
# shape (num_layers, batch_size, hidden_size)
# shape checks
self._check_shapes_input_forward(
trg_embed=trg_embed,
encoder_output=encoder_output,
encoder_hidden=encoder_hidden,
src_mask=src_mask,
hidden=hidden,
prev_att_vector=prev_att_vector)
# pre-compute projected encoder outputs
# (the "keys" for the attention mechanism)
# this is only done for efficiency
if hasattr(self.attention, "compute_proj_keys"):
self.attention.compute_proj_keys(keys=encoder_output)
# here we store all intermediate attention vectors (used for prediction)
att_vectors = []
att_probs = []
batch_size = encoder_output.size(0)
if prev_att_vector is None:
with torch.no_grad():
prev_att_vector = encoder_output.new_zeros(
[batch_size, 1, self.hidden_size])
# unroll the decoder RNN for `unroll_steps` steps
for i in range(unroll_steps):
prev_embed = trg_embed[:, i].unsqueeze(1) # batch, 1, emb
prev_att_vector, hidden, att_prob = self._forward_step(
prev_embed=prev_embed,
prev_att_vector=prev_att_vector,
encoder_output=encoder_output,
src_mask=src_mask,
hidden=hidden)
att_vectors.append(prev_att_vector)
att_probs.append(att_prob)
att_vectors = torch.cat(att_vectors, dim=1)
# att_vectors: batch, unroll_steps, hidden_size
att_probs = torch.cat(att_probs, dim=1)
# att_probs: batch, unroll_steps, src_length
outputs = self.output_layer(att_vectors)
# outputs: batch, unroll_steps, vocab_size
# DataParallel gathers batches along the 0th dim.
# Put batch_size dim to the 0th position.
if isinstance(hidden, tuple):
h, c = hidden
hidden = (h.permute(1, 0, 2).contiguous(),
c.permute(1, 0, 2).contiguous())
assert hidden[0].size(0) == batch_size
else:
hidden = hidden.permute(1, 0, 2).contiguous()
assert hidden.size(0) == batch_size
# shape (batch_size, num_layers, hidden_size)
return outputs, hidden, att_probs, att_vectors
def _init_hidden(self, encoder_final: Tensor = None) \
-> (Tensor, Optional[Tensor]):
"""
Returns the initial decoder state,
conditioned on the final encoder state of the last encoder layer.
In case of `self.init_hidden_option == "bridge"`
and a given `encoder_final`, this is a projection of the encoder state.
In case of `self.init_hidden_option == "last"`
and a size-matching `encoder_final`, this is set to the encoder state.
If the encoder is twice as large as the decoder state (e.g. when
bi-directional), just use the forward hidden state.
In case of `self.init_hidden_option == "zero"`, it is initialized with
zeros.
For LSTMs we initialize both the hidden state and the memory cell
with the same projection/copy of the encoder hidden state.
All decoder layers are initialized with the same initial values.
:param encoder_final: final state from the last layer of the encoder,
shape (batch_size, encoder_hidden_size)
:return: hidden state if GRU, (hidden state, memory cell) if LSTM,
shape (batch_size, hidden_size)
"""
batch_size = encoder_final.size(0)
# for multiple layers: is the same for all layers
if self.init_hidden_option == "bridge" and encoder_final is not None:
# num_layers x batch_size x hidden_size
hidden = torch.tanh(
self.bridge_layer(encoder_final)).unsqueeze(0).repeat(
self.num_layers, 1, 1)
elif self.init_hidden_option == "last" and encoder_final is not None:
# special case: encoder is bidirectional: use only forward state
if encoder_final.shape[1] == 2*self.hidden_size: # bidirectional
encoder_final = encoder_final[:, :self.hidden_size]
hidden = encoder_final.unsqueeze(0).repeat(self.num_layers, 1, 1)
else: # initialize with zeros
with torch.no_grad():
hidden = encoder_final.new_zeros(
self.num_layers, batch_size, self.hidden_size)
return (hidden, hidden) if isinstance(self.rnn, nn.LSTM) else hidden
def __repr__(self):
return "RecurrentDecoder(rnn=%r, attention=%r)" % (
self.rnn, self.attention)
# pylint: disable=arguments-differ,too-many-arguments
# pylint: disable=too-many-instance-attributes, unused-argument
class TransformerDecoder(Decoder):
"""
A transformer decoder with N masked layers.
Decoder layers are masked so that an attention head cannot see the future.
"""
def __init__(self,
num_layers: int = 4,
num_heads: int = 8,
hidden_size: int = 512,
ff_size: int = 2048,
dropout: float = 0.1,
emb_dropout: float = 0.1,
vocab_size: int = 1,
freeze: bool = False,
**kwargs):
"""
Initialize a Transformer decoder.
:param num_layers: number of Transformer layers
:param num_heads: number of heads for each layer
:param hidden_size: hidden size
:param ff_size: position-wise feed-forward size
:param dropout: dropout probability (1-keep)
:param emb_dropout: dropout probability for embeddings
:param vocab_size: size of the output vocabulary
:param freeze: set to True keep all decoder parameters fixed
:param kwargs:
"""
super().__init__()
self._hidden_size = hidden_size
self._output_size = vocab_size
# create num_layers decoder layers and put them in a list
self.layers = nn.ModuleList([TransformerDecoderLayer(
size=hidden_size, ff_size=ff_size, num_heads=num_heads,
dropout=dropout) for _ in range(num_layers)])
self.pe = PositionalEncoding(hidden_size)
self.layer_norm = nn.LayerNorm(hidden_size, eps=1e-6)
self.emb_dropout = nn.Dropout(p=emb_dropout)
self.output_layer = nn.Linear(hidden_size, vocab_size, bias=False)
if freeze:
freeze_params(self)
def forward(self,
trg_embed: Tensor = None,
encoder_output: Tensor = None,
encoder_hidden: Tensor = None,
src_mask: Tensor = None,
unroll_steps: int = None,
hidden: Tensor = None,
trg_mask: Tensor = None,
**kwargs):
"""
Transformer decoder forward pass.
:param trg_embed: embedded targets
:param encoder_output: source representations
:param encoder_hidden: unused
:param src_mask:
:param unroll_steps: unused
:param hidden: unused
:param trg_mask: to mask out target paddings
Note that a subsequent mask is applied here.
:param kwargs:
:return:
"""
assert trg_mask is not None, "trg_mask required for Transformer"
x = self.pe(trg_embed) # add position encoding to word embedding
x = self.emb_dropout(x)
trg_mask = trg_mask & subsequent_mask(
trg_embed.size(1)).type_as(trg_mask)
stl_x = None
for layer in self.layers:
stl_x = x
x = layer(x=x, memory=encoder_output,
src_mask=src_mask, trg_mask=trg_mask)
c = self.layers[-1].context_representations(stl_x,
memory=encoder_output, src_mask=src_mask, trg_mask=trg_mask)
x = self.layer_norm(x)
output = self.output_layer(x)
return output, c, None, None
def __repr__(self):
return "%s(num_layers=%r, num_heads=%r)" % (
self.__class__.__name__, len(self.layers),
self.layers[0].trg_trg_att.num_heads)
| 23,155 | 40.647482 | 80 | py |
KSTER | KSTER-main/joeynmt/encoders.py | # coding: utf-8
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from joeynmt.helpers import freeze_params
from joeynmt.transformer_layers import \
TransformerEncoderLayer, PositionalEncoding
#pylint: disable=abstract-method
class Encoder(nn.Module):
"""
Base encoder class
"""
@property
def output_size(self):
"""
Return the output size
:return:
"""
return self._output_size
class RecurrentEncoder(Encoder):
"""Encodes a sequence of word embeddings"""
#pylint: disable=unused-argument
def __init__(self,
rnn_type: str = "gru",
hidden_size: int = 1,
emb_size: int = 1,
num_layers: int = 1,
dropout: float = 0.,
emb_dropout: float = 0.,
bidirectional: bool = True,
freeze: bool = False,
**kwargs) -> None:
"""
Create a new recurrent encoder.
:param rnn_type: RNN type: `gru` or `lstm`.
:param hidden_size: Size of each RNN.
:param emb_size: Size of the word embeddings.
:param num_layers: Number of encoder RNN layers.
:param dropout: Is applied between RNN layers.
:param emb_dropout: Is applied to the RNN input (word embeddings).
:param bidirectional: Use a bi-directional RNN.
:param freeze: freeze the parameters of the encoder during training
:param kwargs:
"""
super().__init__()
self.emb_dropout = torch.nn.Dropout(p=emb_dropout, inplace=False)
self.type = rnn_type
self.emb_size = emb_size
rnn = nn.GRU if rnn_type == "gru" else nn.LSTM
self.rnn = rnn(
emb_size, hidden_size, num_layers, batch_first=True,
bidirectional=bidirectional,
dropout=dropout if num_layers > 1 else 0.)
self._output_size = 2 * hidden_size if bidirectional else hidden_size
if freeze:
freeze_params(self)
#pylint: disable=invalid-name, unused-argument
def _check_shapes_input_forward(self, embed_src: Tensor, src_length: Tensor,
mask: Tensor) -> None:
"""
Make sure the shape of the inputs to `self.forward` are correct.
Same input semantics as `self.forward`.
:param embed_src: embedded source tokens
:param src_length: source length
:param mask: source mask
"""
assert embed_src.shape[0] == src_length.shape[0]
assert embed_src.shape[2] == self.emb_size
# assert mask.shape == embed_src.shape
assert len(src_length.shape) == 1
#pylint: disable=arguments-differ
def forward(self, embed_src: Tensor, src_length: Tensor, mask: Tensor,
**kwargs) -> (Tensor, Tensor):
"""
Applies a bidirectional RNN to sequence of embeddings x.
The input mini-batch x needs to be sorted by src length.
x and mask should have the same dimensions [batch, time, dim].
:param embed_src: embedded src inputs,
shape (batch_size, src_len, embed_size)
:param src_length: length of src inputs
(counting tokens before padding), shape (batch_size)
:param mask: indicates padding areas (zeros where padding), shape
(batch_size, src_len, embed_size)
:return:
- output: hidden states with
shape (batch_size, max_length, directions*hidden),
- hidden_concat: last hidden state with
shape (batch_size, directions*hidden)
"""
self._check_shapes_input_forward(embed_src=embed_src,
src_length=src_length,
mask=mask)
total_length = embed_src.size(1)
# apply dropout to the rnn input
embed_src = self.emb_dropout(embed_src)
packed = pack_padded_sequence(embed_src, src_length.cpu(),
batch_first=True)
output, hidden = self.rnn(packed)
#pylint: disable=unused-variable
if isinstance(hidden, tuple):
hidden, memory_cell = hidden
output, _ = pad_packed_sequence(output, batch_first=True,
total_length=total_length)
# hidden: dir*layers x batch x hidden
# output: batch x max_length x directions*hidden
batch_size = hidden.size()[1]
# separate final hidden states by layer and direction
hidden_layerwise = hidden.view(self.rnn.num_layers,
2 if self.rnn.bidirectional else 1,
batch_size, self.rnn.hidden_size)
# final_layers: layers x directions x batch x hidden
# concatenate the final states of the last layer for each directions
# thanks to pack_padded_sequence final states don't include padding
fwd_hidden_last = hidden_layerwise[-1:, 0]
bwd_hidden_last = hidden_layerwise[-1:, 1]
# only feed the final state of the top-most layer to the decoder
#pylint: disable=no-member
hidden_concat = torch.cat(
[fwd_hidden_last, bwd_hidden_last], dim=2).squeeze(0)
# final: batch x directions*hidden
return output, hidden_concat
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.rnn)
class TransformerEncoder(Encoder):
"""
Transformer Encoder
"""
#pylint: disable=unused-argument
def __init__(self,
hidden_size: int = 512,
ff_size: int = 2048,
num_layers: int = 8,
num_heads: int = 4,
dropout: float = 0.1,
emb_dropout: float = 0.1,
freeze: bool = False,
**kwargs):
"""
Initializes the Transformer.
:param hidden_size: hidden size and size of embeddings
:param ff_size: position-wise feed-forward layer size.
(Typically this is 2*hidden_size.)
:param num_layers: number of layers
:param num_heads: number of heads for multi-headed attention
:param dropout: dropout probability for Transformer layers
:param emb_dropout: Is applied to the input (word embeddings).
:param freeze: freeze the parameters of the encoder during training
:param kwargs:
"""
super().__init__()
# build all (num_layers) layers
self.layers = nn.ModuleList([
TransformerEncoderLayer(size=hidden_size, ff_size=ff_size,
num_heads=num_heads, dropout=dropout)
for _ in range(num_layers)])
self.layer_norm = nn.LayerNorm(hidden_size, eps=1e-6)
self.pe = PositionalEncoding(hidden_size)
self.emb_dropout = nn.Dropout(p=emb_dropout)
self._output_size = hidden_size
if freeze:
freeze_params(self)
#pylint: disable=arguments-differ
def forward(self,
embed_src: Tensor,
src_length: Tensor,
mask: Tensor, **kwargs) -> (Tensor, Tensor):
"""
Pass the input (and mask) through each layer in turn.
Applies a Transformer encoder to sequence of embeddings x.
The input mini-batch x needs to be sorted by src length.
x and mask should have the same dimensions [batch, time, dim].
:param embed_src: embedded src inputs,
shape (batch_size, src_len, embed_size)
:param src_length: length of src inputs
(counting tokens before padding), shape (batch_size)
:param mask: indicates padding areas (zeros where padding), shape
(batch_size, 1, src_len)
:return:
- output: hidden states with
shape (batch_size, max_length, directions*hidden),
- hidden_concat: last hidden state with
shape (batch_size, directions*hidden)
"""
x = self.pe(embed_src) # add position encoding to word embeddings
x = self.emb_dropout(x)
for layer in self.layers:
x = layer(x, mask)
return self.layer_norm(x), None
def __repr__(self):
return "%s(num_layers=%r, num_heads=%r)" % (
self.__class__.__name__, len(self.layers),
self.layers[0].src_src_att.num_heads)
| 8,571 | 36.432314 | 80 | py |
KSTER | KSTER-main/joeynmt/kernel.py | import torch
from typing import Tuple, Union
class Kernel(object):
def __init__(self) -> None:
super(Kernel, self).__init__()
def similarity(self, distances: torch.Tensor, bandwidth: Union[float, torch.Tensor]) -> torch.Tensor:
raise NotImplementedError
def compute_example_based_distribution(self, distances: torch.Tensor, bandwidth: Union[float, torch.Tensor], token_indices: torch.Tensor,
vocab_size: int) -> Tuple[torch.Tensor, torch.Tensor]:
scores = self.similarity(distances, bandwidth)
sparse_distribution = torch.softmax(scores, dim=-1)
zeros = torch.zeros(size=(sparse_distribution.size(0), vocab_size), device=sparse_distribution.device, dtype=sparse_distribution.dtype)
distribution = torch.scatter_add(zeros, -1, token_indices, sparse_distribution)
return distribution, sparse_distribution
class GaussianKernel(Kernel):
def __init__(self) -> None:
super(GaussianKernel, self).__init__()
def similarity(self, distances: torch.Tensor, bandwidth: Union[float, torch.Tensor]) -> torch.Tensor:
return - distances / bandwidth
class LaplacianKernel(Kernel):
def __init__(self) -> None:
super(LaplacianKernel, self).__init__()
def similarity(self, distances: torch.Tensor, bandwidth: Union[float, torch.Tensor]) -> torch.Tensor:
return - torch.sqrt(distances) / bandwidth | 1,423 | 40.882353 | 143 | py |
wsireg | wsireg-master/setup.py | #!/usr/bin/env python
"""The setup script."""
from setuptools import find_packages, setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup_requirements = [
'pytest-runner',
]
test_requirements = [
'pytest>=3',
]
setup(
author="Nathan Heath Patterson",
author_email='heath.patterson@vanderbilt.edu',
python_requires='>=3.8',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
description="python package for registering multimodal whole slide microscopy images",
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='wsireg',
name='wsireg',
packages=find_packages(include=['wsireg', 'wsireg.*']),
setup_requires=setup_requirements,
entry_points={
"console_scripts": [
"wsireg2d = wsireg.wsireg2d:main",
]
},
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/nhpatterson/wsireg',
version='0.3.8',
zip_safe=False,
)
| 1,572 | 26.12069 | 90 | py |
wsireg | wsireg-master/wsireg/wsireg2d.py | import json
import tempfile
import time
from copy import copy, deepcopy
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
from warnings import warn
import shutil
import numpy as np
import yaml
from wsireg.parameter_maps.preprocessing import ImagePreproParams
from wsireg.parameter_maps.reg_model import RegModel
from wsireg.reg_images import MergeRegImage
from wsireg.reg_images.reg_image import RegImage
from wsireg.reg_images.loader import reg_image_loader
from wsireg.reg_shapes import RegShapes
from wsireg.reg_transforms import RegTransform
from wsireg.reg_transforms import RegTransformSeq
from wsireg.utils.config_utils import parse_check_reg_config
from wsireg.utils.im_utils import ARRAYLIKE_CLASSES
from wsireg.utils.output_utils import (
read_elastix_iteration_dir,
read_elastix_transform_dir,
write_iteration_plots,
)
from wsireg.utils.reg_utils import (
_prepare_reg_models,
register_2d_images_itkelx,
sitk_pmap_to_dict,
)
from wsireg.utils.shape_utils import invert_nonrigid_transforms
from wsireg.utils.tform_utils import identity_elx_transform
from wsireg.writers.merge_ome_tiff_writer import MergeOmeTiffWriter
from wsireg.writers.ome_tiff_writer import OmeTiffWriter
from wsireg.writers.tiled_ome_tiff_writer import OmeTiffTiledWriter
class WsiReg2D(object):
"""
Class to define a 2D registration graph and execute the registrations and transformations of the graph
Parameters
----------
project_name: str
Project name will prefix all output files and directories
output_dir: str
Directory where registration data will be stored
cache_images: bool
whether to store images as they are preprocessed for registration (if you need to repeat or modify settings
this will avoid image io and preprocessing)
config: str or Path
path to a 2D wsireg YAML configuration
Attributes
----------
project_name: str
Global project name, will be appended to all output files and folders
output_dir: Path
Directory where registration data will be stored
image_cache: Path
Directory where images are cached after preprocessing
modalities: dict
dictionary of modality information (file path, spatial res., preprocessing), defines a graph node
modalities: list
list of all modality names
n_modalities: int
number of modalities (nodes) in the graphs
reg_paths: dict
dictionary of a modalities path from node to node
reg_graph_edges: dict
generated dictionary of necessary registrations to move modalities to their target modality
n_registrations: int
number of explicit registrations (edges) in the graphs
transformation_paths: dict
generated dictionary of necessary source - target transformations to transform modalities to their target modality
transformations: dict
per modality dictionary containing transformation parameters for each registration
attachment_images: dict
images to be transformed along the path of the defined graph, assoicated to a given modality (masks, other registered images)
attachment_shapes: dict
shape data attached to a modality to be transformed along the graph
registration_iter_data: Dict[str,Dict[int, Dict[int, Dict[str, np.ndarray]]]]
elastix data for each iteration in the registration sorted by transformation model and resolution
registration_tform_data: Dict[str, Dict[int, Dict[int, Dict[str, str]]]]
elastix transform data for each resolution sorted by transformation model and resolution
"""
def __init__(
self,
project_name: Optional[str] = None,
output_dir: Optional[Union[str, Path]] = None,
cache_images: bool = True,
config: Optional[Union[str, Path]] = None,
):
self.project_name: Optional[str] = None
self.output_dir: Optional[Union[str, Path]] = None
self.image_cache: Optional[Path] = None
self.setup_project_output(project_name, output_dir)
self.cache_images = cache_images
self.pairwise = False
self._modalities = dict()
self._modality_names = []
self._reg_paths = dict()
self._reg_graph_edges = []
self._transform_paths = dict()
self._transformations = None
self._preprocessed_image_sizes: Dict[str, Tuple[int, int]] = dict()
self._preprocessed_image_spacings: Dict[
str, Union[Tuple[int, int], Tuple[float, float]]
] = dict()
self._transformed_shapes_spacings: Dict[
str, Tuple[float, float]
] = dict()
self.n_modalities: Optional[int] = None
self.n_registrations: Optional[int] = None
self.attachment_images = dict()
self._shape_sets = dict()
self._shape_set_names = []
self.merge_modalities = dict()
self.original_size_transforms = dict()
self.registration_iter_data: Dict[
str, Dict[int, Dict[int, Dict[str, np.ndarray]]]
] = dict()
self.registration_tform_data: Dict[
str, Dict[int, Dict[int, Dict[str, str]]]
] = dict()
if config:
self.add_data_from_config(config)
def setup_project_output(
self,
project_name: Optional[str] = None,
output_dir: Optional[Union[str, Path]] = None,
) -> None:
"""
Set up the project directory and image cache.
Parameters
----------
project_name: str
Project name will prefix all output files and directories
output_dir: str
Directory where registration data will be stored
"""
if project_name is None:
self.project_name = 'RegProj'
else:
self.project_name = project_name
if output_dir is None:
output_dir = "./"
self.output_dir = Path(output_dir)
self.image_cache = self.output_dir / ".imcache_{}".format(project_name)
@property
def modalities(self) -> Dict[str, Any]:
"""Image modality information stored as a dictionary where keys are images and values
are all image modality related information."""
return self._modalities
@modalities.setter
def modalities(self, modality):
self._modalities.update(modality)
self.n_modalities = len(self._modalities)
@property
def shape_sets(self):
"""Shape data stored as a dictionary where keys are shape sets and values
are all shape related information."""
return self._shape_sets
@shape_sets.setter
def shape_sets(self, shape_set):
self._shape_sets.update(shape_set)
@property
def shape_set_names(self) -> List[str]:
"""Name of all shape sets"""
return self._shape_set_names
@shape_set_names.setter
def shape_set_names(self, shape_set_name):
self._shape_set_names.append(shape_set_name)
@property
def modality_names(self) -> List[str]:
"""List of all the modality names."""
return self._modality_names
@modality_names.setter
def modality_names(self, modality_name):
self._modality_names.append(modality_name)
def add_modality(
self,
modality_name: str,
image_fp: Union[Path, str, np.ndarray],
image_res: Union[int, float] = 1,
channel_names: Optional[List[str]] = None,
channel_colors: Optional[List[str]] = None,
preprocessing: Optional[
Union[ImagePreproParams, Dict[str, Any]]
] = None,
mask: Optional[Union[str, Path, np.ndarray]] = None,
prepro_dict: Optional[Dict[str, Any]] = None,
output_res: Optional[
Union[int, float, Tuple[int, int], Tuple[float, float]]
] = None,
) -> None:
"""
Add an image modality (node) to the registration graph
Parameters
----------
modality_name : str
Unique name identifier for the modality
image_fp : str
file path to the image to be read
image_res : float
spatial resolution of image in units per px (i.e. 0.9 um / px)
channel_names: List[str]
names for the channels to go into the OME-TIFF
channel_colors: List[str]
channels colors for OME-TIFF (not implemented)
mask: Union[str, Path, np.ndarray]
path to binary mask (>0 is in) image for registration and/or cropping or a geoJSON with shapes
that will be processed to a binary mask
preprocessing: ImagePreproParams
preprocessing parameters for the modality for registration. Registration images should be a xy single plane
so many modalities (multi-channel, RGB) must "create" a single channel.
Defaults: multi-channel images -> max intensity project image
RGB -> greyscale then intensity inversion (black background, white foreground)
output_res : Union[Tuple[int,int], Tuple[float,float]]
change output spacing/resolution when resampling images, default will be the spacing
of the final target image
prepro_dict: dict
deprecated version kept temporarily
"""
if modality_name in self._modality_names:
raise ValueError(
'modality named \"{}\" is already in modality_names'.format(
modality_name
)
)
if prepro_dict:
preprocessing = prepro_dict
DeprecationWarning(
"'prepro_dict' argument is deprepcated and will be removed\
in a future release, use 'preprocessing' instead"
)
if preprocessing:
if isinstance(preprocessing, dict):
image_prepro = ImagePreproParams(**preprocessing)
else:
image_prepro = preprocessing
else:
image_prepro = ImagePreproParams()
if isinstance(output_res, (int, float)):
output_res = (output_res, output_res)
self.modalities = {
modality_name: {
"image_filepath": image_fp,
"image_res": image_res,
"channel_names": channel_names,
"channel_colors": channel_colors,
"preprocessing": image_prepro,
"mask": mask,
"output_res": output_res,
}
}
self.modality_names = modality_name
def add_shape_set(
self,
attachment_modality: str,
shape_set_name: str,
shape_files: List[Union[str, Path]],
image_res: Union[int, float],
) -> None:
"""
Add a shape set to the graph
Parameters
----------
attachment_modality : str
image modality to which the shapes are attached
shape_set_name : str
Unique name identifier for the shape set
shape_files : list of file paths
list of shape data in geoJSON format or list of dicts containing following keys:
"array" = np.ndarray of xy coordinates, "shape_type" = geojson shape type (Polygon, MultiPolygon, etc.),
"shape_name" = class of the shape("tumor","normal",etc.)
image_res : float
spatial resolution of shape data's associated image in units per px (i.e. 0.9 um / px)
"""
if shape_set_name in self._shape_set_names:
raise ValueError(
'shape named \"{}\" is already in shape_set_names'.format(
shape_set_name
)
)
self.shape_sets = {
shape_set_name: {
"shape_files": shape_files,
"image_res": image_res,
"attachment_modality": attachment_modality,
}
}
self.shape_set_names = shape_set_name
def add_attachment_images(
self,
attachment_modality: str,
modality_name: str,
image_fp: Union[str, Path],
image_res: Union[int, float] = 1,
channel_names: Optional[List[str]] = None,
channel_colors: Optional[List[str]] = None,
) -> None:
"""
Images which are unregistered between modalities, but are transformed following the path of one of the graph's
modalities.
Parameters
----------
attachment_modality : str
image modality to which the new image are attached
modality_name : str
name of the added attachment image
image_fp : str
path to the attachment modality, it will be imported and transformed without preprocessing
image_res : float
spatial resolution of attachment image data's in units per px (i.e. 0.9 um / px)
channel_names: List[str]
names for the channels to go into the OME-TIFF
channel_colors: List[str]
channels colors for OME-TIFF (not implemented)
"""
if attachment_modality not in self.modality_names:
raise ValueError(
'attachment modality named \"{}\" not found in modality_names'.format(
attachment_modality
)
)
self.add_modality(
modality_name,
image_fp,
image_res,
channel_names=channel_names,
channel_colors=channel_colors,
)
self.attachment_images[modality_name] = attachment_modality
def add_attachment_shapes(
self,
attachment_modality: str,
shape_set_name: str,
shape_files: List[Union[str, Path]],
) -> None:
"""
Add attached shapes
Parameters
----------
attachment_modality : str
image modality to which the shapes are attached
shape_set_name : str
Unique name identifier for the shape set
shape_files : list of file paths
list of shape data in geoJSON format or list of dicts containing following keys:
"array" = np.ndarray of xy coordinates, "shape_type" = geojson shape type (Polygon, MultiPolygon, etc.),
"shape_name" = class of the shape("tumor","normal",etc.)
"""
if attachment_modality not in self.modality_names:
raise ValueError(
'attachment modality \"{}\" for shapes \"{}\" not found in modality_names {}'.format(
attachment_modality, shape_set_name, self.modality_names
)
)
image_res = self.modalities[attachment_modality]["image_res"]
self.add_shape_set(
attachment_modality, shape_set_name, shape_files, image_res
)
@property
def reg_paths(self) -> Dict[str, List[str]]:
"""Dictionary of paths between modalities by modality name.
Keys are modality name and the values are a list of modalities it passes through."""
return self._reg_paths
@reg_paths.setter
def reg_paths(self, path_values):
(
src_modality,
tgt_modality,
thru_modality,
reg_params,
override_prepro,
) = path_values
if thru_modality != tgt_modality:
self._reg_paths.update(
{src_modality: [thru_modality, tgt_modality]}
)
else:
self._reg_paths.update({src_modality: [tgt_modality]})
if override_prepro:
if override_prepro.get("source"):
source_override = ImagePreproParams(
**override_prepro.get("source")
)
else:
source_override = None
if override_prepro.get("target"):
target_override = ImagePreproParams(
**override_prepro.get("target")
)
else:
target_override = None
else:
source_override = None
target_override = None
self.reg_graph_edges = {
'modalities': {'source': src_modality, 'target': thru_modality},
'params': reg_params,
"source_override": source_override,
"target_override": target_override,
}
self.transform_paths = self._reg_paths
def add_reg_path(
self,
src_modality_name: str,
tgt_modality_name: str,
thru_modality: Optional[str] = None,
reg_params: Union[str, RegModel, List[str], List[RegModel]] = [
"rigid"
],
override_prepro: dict = {"source": None, "target": None},
):
"""
Add registration path between modalities as well as a thru modality that describes where to attach edges.
Parameters
----------
src_modality_name : str
modality that has been added to graph to be transformed to tgt_modality
tgt_modality_name : str
modality that has been added to graph that is being aligned to
thru_modality: str
modality that has been added to graph by which another should be run through
reg_params: list of RegModel/str or str
Elastix registration parameters, from RegModel or as a string corresponding to one of the parameter
maps enumerated in wsireg.parameter_maps.reg_params.RegModel
override_prepro: dict
set specific preprocessing for a given registration edge for the source or target image that will override
the set modality preprocessing FOR THIS REGISTRATION ONLY.
"""
if src_modality_name not in self.modality_names:
raise ValueError("source modality not found!")
if tgt_modality_name not in self.modality_names:
raise ValueError("target modality not found!")
if thru_modality is None:
self.reg_paths = (
src_modality_name,
tgt_modality_name,
tgt_modality_name,
reg_params,
override_prepro,
)
else:
self.reg_paths = (
src_modality_name,
tgt_modality_name,
thru_modality,
reg_params,
override_prepro,
)
@property
def reg_graph_edges(self):
return self._reg_graph_edges
@reg_graph_edges.setter
def reg_graph_edges(self, edge):
self._reg_graph_edges.append(edge)
self.n_registrations = len(self._reg_graph_edges)
@property
def transform_paths(self):
return self._transform_paths
@transform_paths.setter
def transform_paths(self, reg_paths):
transform_path_dict = {}
for k, v in reg_paths.items():
tform_path = self.find_path(k, v[-1])
if self.pairwise is True:
tform_path_modalities = tform_path[:1]
else:
tform_path_modalities = tform_path
transform_edges = []
for modality in tform_path_modalities:
for edge in self.reg_graph_edges:
edge_modality = edge["modalities"]['source']
if modality == edge_modality:
transform_edges.append(edge["modalities"])
transform_path_dict.update({k: transform_edges})
self._transform_paths = transform_path_dict
def find_path(
self,
start_modality: str,
end_modality: str,
path: Optional[List[str]] = None,
) -> Optional[List[str]]:
"""
Find a path from start_modality to end_modality in the graph
"""
if path is None:
path = []
path = path + [start_modality]
if start_modality == end_modality:
return path
if start_modality not in self.reg_paths:
return None
for modality in self.reg_paths[start_modality]:
if modality not in path:
extended_path = self.find_path(modality, end_modality, path)
if extended_path:
return extended_path
return None
def _find_nonreg_modalities(self) -> None:
registered_modalities = [
edge.get("modalities").get("source")
for edge in self.reg_graph_edges
]
non_reg_modalities = list(
set(self.modality_names).difference(registered_modalities)
)
# remove attachment modalities
for attachment_modality in self.attachment_images.keys():
non_reg_modalities.pop(
non_reg_modalities.index(attachment_modality)
)
return non_reg_modalities
def save_config(
self,
output_file_path: Optional[Union[str, Path]] = None,
registered: bool = False,
) -> str:
ts = time.strftime('%Y%m%d-%H%M%S')
status = "registered" if registered is True else "setup"
reg_paths = {}
for idx, edge in enumerate(self.reg_graph_edges):
src_modality = edge.get("modalities").get("source")
if len(self.reg_paths[src_modality]) > 1:
thru_modality = self.reg_paths[src_modality][0]
else:
thru_modality = None
tgt_modality = self.reg_paths[src_modality][-1]
reg_paths.update(
{
f"reg_path_{idx}": {
"src_modality_name": edge.get("modalities").get(
"source"
),
"tgt_modality_name": tgt_modality,
"thru_modality": thru_modality,
"reg_params": edge.get("params"),
}
}
)
reg_graph_edges = deepcopy(self.reg_graph_edges)
[rge.pop("transforms", None) for rge in reg_graph_edges]
modalities_out = deepcopy(self.modalities)
for mod, data in modalities_out.items():
if isinstance(data["image_filepath"], ARRAYLIKE_CLASSES):
data["image_filepath"] = "ArrayLike"
if isinstance(data["preprocessing"], ImagePreproParams):
data["preprocessing"] = deepcopy(data["preprocessing"]).dict(
exclude_none=True, exclude_defaults=True
)
config = {
"project_name": self.project_name,
"output_dir": str(self.output_dir),
"cache_images": self.cache_images,
"modalities": modalities_out,
"reg_paths": reg_paths,
"reg_graph_edges": reg_graph_edges
if status == "registered"
else None,
"original_size_transforms": self.original_size_transforms
if status == "registered"
else None,
"attachment_shapes": self.shape_sets
if len(self._shape_sets) > 0
else None,
"attachment_images": self.attachment_images
if len(self.attachment_images) > 0
else None,
"merge_modalities": self.merge_modalities,
}
if not output_file_path:
output_file_path = (
self.output_dir
/ f"{ts}-{self.project_name}-configuration-{status}.yaml"
)
with open(str(output_file_path), "w") as f:
yaml.dump(config, f, sort_keys=False)
return str(output_file_path)
def register_images(self, parallel=False):
"""
Start image registration process for all modalities
Parameters
----------
parallel : bool
whether to run each edge in parallel (not implemented yet)
"""
if self.cache_images is True:
self.image_cache.mkdir(parents=False, exist_ok=True)
self.save_config(registered=False)
for reg_edge in self.reg_graph_edges:
if (
reg_edge.get("registered") is None
or reg_edge.get("registered") is False
):
src_name = reg_edge["modalities"]["source"]
tgt_name = reg_edge["modalities"]["target"]
src_mod_data = self.modalities[src_name].copy()
tgt_mod_data = self.modalities[tgt_name].copy()
src_reg_image = reg_image_loader(
src_mod_data["image_filepath"],
src_mod_data["image_res"],
preprocessing=src_mod_data["preprocessing"],
mask=src_mod_data["mask"],
)
tgt_reg_image = reg_image_loader(
tgt_mod_data["image_filepath"],
tgt_mod_data["image_res"],
preprocessing=tgt_mod_data["preprocessing"],
mask=tgt_mod_data["mask"],
)
src_override_prepro = reg_edge.get("source_override")
tgt_override_prepro = reg_edge.get("target_override")
src_cached = src_reg_image.check_cache_preprocessing(
self.image_cache, src_name
)
tgt_cached = src_reg_image.check_cache_preprocessing(
self.image_cache, tgt_name
)
if not src_cached and not src_override_prepro:
src_reg_image.read_reg_image()
if self.cache_images:
src_reg_image.cache_image_data(
self.image_cache, src_name, check=False
)
elif src_override_prepro:
if src_override_prepro:
src_reg_image._preprocessing = src_override_prepro
src_reg_image.read_reg_image()
if self.cache_images:
src_reg_image.cache_image_data(
self.image_cache,
f"{src_name}-{tgt_name}-override",
check=False,
)
else:
src_reg_image.load_from_cache(self.image_cache, src_name)
if not tgt_cached and not tgt_override_prepro:
tgt_reg_image.read_reg_image()
if self.cache_images:
tgt_reg_image.cache_image_data(
self.image_cache, tgt_name, check=False
)
elif tgt_override_prepro:
if tgt_override_prepro:
tgt_reg_image._preprocessing = tgt_override_prepro
tgt_reg_image.read_reg_image()
if self.cache_images:
src_reg_image.cache_image_data(
self.image_cache,
f"{tgt_name}-{src_name}-override",
check=False,
)
else:
tgt_reg_image.load_from_cache(self.image_cache, tgt_name)
self._preprocessed_image_spacings.update(
{src_name: src_reg_image.reg_image.GetSpacing()}
)
self._preprocessed_image_spacings.update(
{tgt_name: tgt_reg_image.reg_image.GetSpacing()}
)
self._preprocessed_image_sizes.update(
{src_name: src_reg_image.reg_image.GetSize()}
)
self._preprocessed_image_sizes.update(
{tgt_name: tgt_reg_image.reg_image.GetSize()}
)
reg_params = reg_edge["params"]
output_path = (
self.output_dir
/ "{}-{}_to_{}_reg_output".format(
self.project_name,
reg_edge["modalities"]["source"],
reg_edge["modalities"]["target"],
)
)
output_path.mkdir(parents=False, exist_ok=True)
reg_params_prepared = _prepare_reg_models(reg_params)
reg_tforms = register_2d_images_itkelx(
src_reg_image,
tgt_reg_image,
reg_params_prepared,
output_path,
)
reg_tforms = [sitk_pmap_to_dict(tf) for tf in reg_tforms]
initial_transforms = src_reg_image.pre_reg_transforms
if initial_transforms:
initial_transforms_rt = [
RegTransform(t) for t in initial_transforms
]
initial_transforms_idx = [
idx for idx, _ in enumerate(initial_transforms_rt)
]
initial_rt_seq = RegTransformSeq(
initial_transforms_rt, initial_transforms_idx
)
reg_tforms_rt = [RegTransform(t) for t in reg_tforms]
reg_tforms_idx = [0 for _ in reg_tforms_rt]
reg_rt_seq = RegTransformSeq(reg_tforms_rt, reg_tforms_idx)
reg_edge["transforms"] = {
'initial': initial_rt_seq if initial_transforms else None,
'registration': reg_rt_seq,
}
self.original_size_transforms.update(
{tgt_name: tgt_reg_image.original_size_transform}
)
reg_edge["registered"] = True
data_key = f'{reg_edge["modalities"]["source"]}_to_{reg_edge["modalities"]["target"]}'
all_transform_data = read_elastix_transform_dir(output_path)
all_iteration_data = read_elastix_iteration_dir(output_path)
write_iteration_plots(
all_iteration_data, data_key, output_path
)
self.registration_iter_data.update(
{data_key: all_iteration_data}
)
self.registration_tform_data.update(
{data_key: all_transform_data}
)
self.transformations = self.reg_graph_edges
@property
def transformations(self):
return self._transformations
@transformations.setter
def transformations(self, reg_graph_edges):
self._transformations = self._collate_transformations()
def add_merge_modalities(self, merge_name, modalities):
for modality in modalities:
try:
self.modalities[modality]
except KeyError:
raise ValueError(
f"Modality for merger [{modality}] is not a modality "
f"within the graph, current modalitles : "
f"{self.modality_names}"
)
self.merge_modalities.update({merge_name: modalities})
def _generate_reg_transforms(self):
self._reg_graph_edges["reg_transforms"]
def _collate_transformations(self):
transforms = dict()
edge_modality_pairs = [v['modalities'] for v in self.reg_graph_edges]
for modality, tform_edges in self.transform_paths.items():
full_tform_seq = RegTransformSeq()
for idx, tform_edge in enumerate(tform_edges):
reg_edge_tforms = self.reg_graph_edges[
edge_modality_pairs.index(tform_edge)
]["transforms"]
if idx == 0:
transforms[modality] = {
f"initial-{tform_edges[idx]['source']}": reg_edge_tforms[
'initial'
],
f"{str(idx).zfill(3)}-to-{tform_edges[idx]['target']}": reg_edge_tforms[
'registration'
],
}
if reg_edge_tforms['initial']:
full_tform_seq.append(reg_edge_tforms['initial'])
full_tform_seq.append(reg_edge_tforms['registration'])
else:
transforms[modality][
f"{str(idx).zfill(3)}-to-{tform_edges[idx]['target']}"
] = reg_edge_tforms['registration']
full_tform_seq.append(reg_edge_tforms['registration'])
transforms[modality]["full-transform-seq"] = full_tform_seq
return transforms
def _prepare_nonreg_image_transform(
self,
modality_key,
attachment=False,
attachment_modality=None,
to_original_size=True,
):
print(
"preparing transforms for non-registered modality : {} ".format(
modality_key
)
)
output_path = self.output_dir / "{}-{}_registered".format(
self.project_name, modality_key
)
if attachment:
im_data_key = copy(modality_key)
modality_key = attachment_modality
transformations = None
im_data = self.modalities[modality_key]
if (
im_data["preprocessing"].rot_cc != 0
or im_data["preprocessing"].flip
or im_data["preprocessing"].crop_to_mask_bbox
or im_data["preprocessing"].mask_bbox
):
im_initial_transforms = RegImage.load_orignal_size_transform(
self.image_cache, modality_key
)
if any(im_initial_transforms):
transformations = RegTransformSeq(
[RegTransform(t) for t in im_initial_transforms[0]],
transform_seq_idx=[
idx for idx in range(len(im_initial_transforms))
],
)
if to_original_size and self.original_size_transforms[modality_key]:
o_size_tform = self.original_size_transforms[modality_key]
if isinstance(o_size_tform, list):
o_size_tform = o_size_tform[0]
orig_size_rt = RegTransformSeq(
RegTransform(o_size_tform),
transform_seq_idx=[0],
)
if transformations:
transformations.append(orig_size_rt)
else:
transformations = orig_size_rt
if im_data["preprocessing"].downsampling > 1 and transformations:
if not im_data["output_res"]:
output_spacing_target = im_data["image_res"]
transformations.set_output_spacing(
(output_spacing_target, output_spacing_target)
)
else:
transformations.set_output_spacing(im_data["output_res"])
elif im_data["preprocessing"].downsampling > 1 and not transformations:
transformations = RegTransformSeq(
[
RegTransform(
identity_elx_transform(
self._preprocessed_image_sizes[modality_key],
self._preprocessed_image_spacings[modality_key],
)
)
],
transform_seq_idx=[0],
)
if not im_data["output_res"]:
output_spacing_target = im_data["image_res"]
transformations.set_output_spacing(
(output_spacing_target, output_spacing_target)
)
else:
transformations.set_output_spacing(im_data["output_res"])
if attachment:
im_data = self.modalities[im_data_key]
return im_data, transformations, output_path
def _prepare_reg_image_transform(
self,
edge_key,
attachment=False,
attachment_modality=None,
to_original_size=True,
):
if attachment:
final_modality = self.reg_paths[attachment_modality][-1]
transformations = copy(
self.transformations[attachment_modality]["full-transform-seq"]
)
else:
final_modality = self.reg_paths[edge_key][-1]
transformations = copy(
self.transformations[edge_key]["full-transform-seq"]
)
output_path = self.output_dir / "{}-{}_to_{}_registered".format(
self.project_name,
edge_key,
final_modality,
)
if attachment:
im_data_key = copy(edge_key)
edge_key = attachment_modality
im_data = self.modalities[edge_key]
if (
self.original_size_transforms.get(final_modality)
and to_original_size
):
print("adding transform to original size")
original_size_transform = self.original_size_transforms[
final_modality
]
if isinstance(original_size_transform, list):
original_size_transform = original_size_transform[0]
orig_size_rt = RegTransformSeq(
RegTransform(original_size_transform), transform_seq_idx=[0]
)
transformations.append(orig_size_rt)
if im_data["preprocessing"].downsampling > 1:
if not im_data["output_res"]:
output_spacing_target = self.modalities[final_modality][
"image_res"
]
transformations.set_output_spacing(
(output_spacing_target, output_spacing_target)
)
else:
transformations.set_output_spacing(im_data["output_res"])
elif im_data["output_res"]:
transformations.set_output_spacing(im_data["output_res"])
if attachment:
im_data = self.modalities[im_data_key]
return im_data, transformations, output_path
def _transform_write_image(
self, im_data, transformations, output_path, file_writer="ome.tiff"
):
tfregimage = reg_image_loader(
im_data["image_filepath"],
im_data["image_res"],
channel_names=im_data.get("channel_names"),
channel_colors=im_data.get("channel_colors"),
)
ometiffwriter = OmeTiffWriter(
tfregimage, reg_transform_seq=transformations
)
if (
file_writer == "ome.tiff-bytile"
and ometiffwriter.reg_image.reader not in ["czi", "sitk"]
):
ometiffwriter = OmeTiffTiledWriter(
tfregimage, reg_transform_seq=transformations
)
im_fp = ometiffwriter.write_image_by_tile(
output_path.stem,
output_dir=str(self.output_dir),
)
else:
im_fp = ometiffwriter.write_image_by_plane(
output_path.stem,
output_dir=str(self.output_dir),
)
return im_fp
def _transform_write_merge_images(self, to_original_size=True):
def determine_attachment(sub_image):
if sub_image in self.attachment_images.keys():
attachment_modality = self.attachment_images[sub_image]
attachment = True
else:
attachment_modality = None
attachment = False
return attachment, attachment_modality
for merge_name, sub_images in self.merge_modalities.items():
im_fps = []
im_res = []
im_ch_names = []
transformations = []
non_reg_modalities = self._find_nonreg_modalities()
for sub_image in sub_images:
attachment, attachment_modality = determine_attachment(
sub_image
)
im_data = self.modalities[sub_image]
im_fps.append(im_data["image_filepath"])
im_res.append(im_data["image_res"])
im_ch_names.append(im_data.get("channel_names"))
if (
sub_image not in non_reg_modalities
and attachment_modality not in non_reg_modalities
):
(
_,
sub_im_transforms,
_,
) = self._prepare_reg_image_transform(
sub_image,
attachment=attachment,
attachment_modality=attachment_modality,
to_original_size=to_original_size,
)
else:
(
_,
sub_im_transforms,
_,
) = self._prepare_nonreg_image_transform(
sub_image,
attachment=attachment,
attachment_modality=attachment_modality,
to_original_size=to_original_size,
)
transformations.append(sub_im_transforms)
output_path = self.output_dir / "{}-{}_merged-registered".format(
self.project_name,
merge_name,
)
merge_regimage = MergeRegImage(
im_fps,
im_res,
channel_names=im_ch_names,
)
merge_ometiffwriter = MergeOmeTiffWriter(
merge_regimage, reg_transform_seqs=transformations
)
im_fp = merge_ometiffwriter.merge_write_image_by_plane(
output_path.stem,
sub_images,
output_dir=str(self.output_dir),
)
return im_fp
def transform_images(
self,
file_writer="ome.tiff",
transform_non_reg=True,
remove_merged=True,
to_original_size=True,
):
"""
Transform and write images to disk after registration. Also transforms all attachment images
Parameters
----------
file_writer : str
output type to use, sitk writes a single resolution tiff, "zarr" writes an ome-zarr multiscale
zarr store
transform_non_reg : bool
whether to write the images that aren't transformed during registration as well
remove_merged: bool
whether to remove images that are stored in merged store, if True, images that are merged
will not be written as individual images as well
to_original_size: bool
write images that have been cropped for registration back to their original coordinate space
"""
image_fps = []
if all(
[reg_edge.get("registered") for reg_edge in self.reg_graph_edges]
):
# prepare workflow
merge_modalities = []
if len(self.merge_modalities.keys()) > 0:
for k, v in self.merge_modalities.items():
merge_modalities.extend(v)
reg_path_keys = list(self.reg_paths.keys())
nonreg_keys = self._find_nonreg_modalities()
if remove_merged:
for merge_mod in merge_modalities:
try:
m_idx = reg_path_keys.index(merge_mod)
reg_path_keys.pop(m_idx)
except ValueError:
pass
try:
m_idx = nonreg_keys.index(merge_mod)
nonreg_keys.pop(m_idx)
except ValueError:
pass
for modality in reg_path_keys:
(
im_data,
transformations,
output_path,
) = self._prepare_reg_image_transform(
modality,
attachment=False,
to_original_size=to_original_size,
)
im_fp = self._transform_write_image(
im_data,
transformations,
output_path,
file_writer=file_writer,
)
image_fps.append(im_fp)
for (
modality,
attachment_modality,
) in self.attachment_images.items():
if modality in merge_modalities and remove_merged:
continue
if attachment_modality in self._find_nonreg_modalities():
(
im_data,
transformations,
output_path,
) = self._prepare_nonreg_image_transform(
modality,
attachment=True,
attachment_modality=attachment_modality,
to_original_size=to_original_size,
)
else:
(
im_data,
transformations,
output_path,
) = self._prepare_reg_image_transform(
modality,
attachment=True,
attachment_modality=attachment_modality,
to_original_size=to_original_size,
)
im_fp = self._transform_write_image(
im_data,
transformations,
output_path,
file_writer=file_writer,
)
image_fps.append(im_fp)
if len(self.merge_modalities.items()) > 0:
im_fp = self._transform_write_merge_images(
to_original_size=to_original_size
)
image_fps.append(im_fp)
if transform_non_reg:
# preprocess and save unregistered nodes
for modality in nonreg_keys:
if modality in merge_modalities and remove_merged:
continue
(
im_data,
transformations,
output_path,
) = self._prepare_nonreg_image_transform(
modality,
to_original_size=to_original_size,
)
im_fp = self._transform_write_image(
im_data,
transformations,
output_path,
file_writer=file_writer,
)
image_fps.append(im_fp)
return image_fps
def transform_shapes(self):
"""
Transform all attached shapes and write out shape data to geoJSON.
"""
transformed_shapes_fps = []
# do inversion before any processing to ensure it is not computed twice
for set_name, set_data in self.shape_sets.items():
attachment_modality = set_data["attachment_modality"]
if attachment_modality in self.transformations.keys():
invert_nonrigid_transforms(
self.transformations[attachment_modality][
"full-transform-seq"
].reg_transforms_itk_order
)
else:
continue
for set_name, set_data in self.shape_sets.items():
attachment_modality = set_data["attachment_modality"]
rs = RegShapes(
set_data["shape_files"], source_res=set_data["image_res"]
)
if attachment_modality in self.transformations.keys():
im_data = self.modalities[attachment_modality]
final_modality = self.reg_paths[attachment_modality][-1]
print(
"transforming shape set {} associated with {} to {}".format(
set_name, attachment_modality, final_modality
)
)
transformations = copy(
self.transformations[attachment_modality][
"full-transform-seq"
]
)
if im_data.get("output_res"):
transformations.set_output_spacing(im_data["output_res"])
else:
output_spacing_target = self.modalities[final_modality][
"image_res"
]
transformations.set_output_spacing(
(output_spacing_target, output_spacing_target)
)
self._transformed_shapes_spacings.update(
{set_name: (output_spacing_target, output_spacing_target)}
)
rs.transform_shapes(transformations)
output_path = (
self.output_dir
/ "{}-{}-{}_to_{}-transformed_shapes.geojson".format(
self.project_name,
set_name,
attachment_modality,
final_modality,
)
)
rs.save_shape_data(output_path, transformed=True)
transformed_shapes_fps.append(output_path)
else:
output_path = (
self.output_dir
/ "{}-{}-{}-registered.geojson".format(
self.project_name,
set_name,
attachment_modality,
)
)
output_spacing_target = self.modalities[attachment_modality][
"image_res"
]
self._transformed_shapes_spacings.update(
{set_name: (output_spacing_target, output_spacing_target)}
)
rs.save_shape_data(output_path, transformed=False)
transformed_shapes_fps.append(output_path)
return transformed_shapes_fps
def _transforms_to_txt(
self, transformations: Dict[str, RegTransformSeq]
) -> Dict[str, List[str]]:
tform_txt = dict()
for k, v in transformations.items():
if k == "full-transform-seq":
continue
if "initial" in k:
if v:
for idx, rt in enumerate(v.reg_transforms):
tform_txt.update(
{f"{k}-{str(idx).zfill(2)}": rt.elastix_transform}
)
else:
tform_txt.update(
{f"{k}": [rt.elastix_transform for rt in v.reg_transforms]}
)
return tform_txt
def save_transformations(self):
"""
Save all transformations for a given modality as JSON
"""
if all(
[reg_edge.get("registered") for reg_edge in self.reg_graph_edges]
):
for key in self.reg_paths.keys():
final_modality = self.reg_paths[key][-1]
output_path = (
self.output_dir
/ "{}-{}_to_{}_transformations.json".format(
self.project_name,
key,
final_modality,
)
)
tform_txt = self._transforms_to_txt(self.transformations[key])
with open(output_path, 'w') as fp:
json.dump(tform_txt, fp, indent=1)
for (
modality,
attachment_modality,
) in self.attachment_images.items():
if attachment_modality not in self._find_nonreg_modalities():
final_modality = self.reg_paths[attachment_modality][-1]
output_path = (
self.output_dir
/ "{}-{}_to_{}_transformations.json".format(
self.project_name,
modality,
final_modality,
)
)
tform_txt = self._transforms_to_txt(
self.transformations[key]
)
with open(output_path, 'w') as fp:
json.dump(tform_txt, fp, indent=4)
else:
warn(
"registration has not been executed for the graph "
"no transformations to save"
)
def add_data_from_config(self, config_filepath):
reg_config = parse_check_reg_config(config_filepath)
if reg_config.get("modalities"):
for key, val in reg_config["modalities"].items():
image_filepath = (
val.get("image_filepath")
if val.get("image_filepath") is not None
else val.get("image_filepath")
)
preprocessing = (
"None"
if val.get("preprocessing") is None
else val.get("preprocessing")
)
self.add_modality(
key,
image_filepath,
image_res=val.get("image_res"),
channel_names=val.get("channel_names"),
channel_colors=val.get("channel_colors"),
preprocessing=preprocessing,
mask=val.get("mask"),
output_res=val.get("output_res"),
)
else:
print("warning: config file did not contain any image modalities")
if reg_config.get("reg_paths"):
for key, val in reg_config["reg_paths"].items():
self.add_reg_path(
val.get("src_modality_name"),
val.get("tgt_modality_name"),
val.get("thru_modality"),
reg_params=val.get("reg_params"),
override_prepro=val.get("override_prepro"),
)
else:
print(
"warning: config file did not contain any registration paths"
)
if reg_config.get("attachment_images"):
for key, val in reg_config["attachment_images"].items():
self.add_attachment_images(
val.get("attachment_modality"),
key,
val.get("image_filepath"),
val.get("image_res"),
channel_names=val.get("channel_names"),
channel_colors=val.get("channel_colors"),
)
if reg_config.get("attachment_shapes"):
for key, val in reg_config["attachment_shapes"].items():
self.add_attachment_shapes(
val.get("attachment_modality"), key, val.get("shape_files")
)
if reg_config.get("reg_graph_edges"):
self._reg_graph_edges = reg_config["reg_graph_edges"]
if all([re.get("registered") for re in self.reg_graph_edges]):
self.transformations = self.reg_graph_edges
if reg_config.get("merge_modalities"):
for mn, mm in reg_config["merge_modalities"].items():
self.add_merge_modalities(mn, mm)
def reset_registered_modality(self, modalities):
edge_keys = [
r.get("modalities").get("source") for r in self.reg_graph_edges
]
if isinstance(modalities, str):
modalities = [modalities]
for modality in modalities:
modality_idx = edge_keys.index(modality)
self.reg_graph_edges[modality_idx]["registered"] = False
def _remove_modality_data(self, modality: str) -> None:
if modality in self.modality_names:
# remove top level
self.modalities.pop(modality)
self.modality_names.pop(self.modality_names.index(modality))
to_rm = []
for merge_name, merge_mods in self.merge_modalities.items():
if modality in merge_mods:
to_rm.append(merge_name)
[self.merge_modalities.pop(k) for k in to_rm]
self.n_modalities = len(self.modality_names)
if modality in self.attachment_images.keys():
self.attachment_images.pop(modality)
def _remove_reg_paths(self, modality: str) -> None:
# remove registrations
if modality in self.reg_paths.keys():
self.reg_paths.pop(modality)
# remove any reg paths including modality as target or thru
to_rm = []
for k, v in self.reg_paths.items():
if modality in v:
to_rm.append(k)
[self.reg_paths.pop(k) for k in to_rm]
# remove any reg paths including modality as target or thru
to_rm = []
for k, v in self.transform_paths.items():
for reg in v:
if reg["source"] == modality or reg["target"] == modality:
to_rm.append(k)
to_rm = list(np.unique(to_rm))
edges_to_rm = []
for idx, edge in enumerate(self.reg_graph_edges):
if (
edge["modalities"]["source"] in to_rm
or edge["modalities"]["target"] in to_rm
):
edges_to_rm.append(idx)
[self.transform_paths.pop(k) for k in to_rm]
for idx in sorted(edges_to_rm)[::-1]:
self.reg_graph_edges.pop(idx)
self.n_registrations = len(self._reg_graph_edges)
def _remove_attachments(self, modality: str):
to_rm = []
for k, v in self.attachment_images.items():
if modality == v:
to_rm.append(k)
[self.attachment_images.pop(k) for k in to_rm]
[self.modalities.pop(k) for k in to_rm]
[self.modality_names.pop(self.modality_names.index(k)) for k in to_rm]
self.shape_sets.pop(modality)
self.shape_set_names.pop(self.shape_set_names.index(modality))
self.n_modalities = len(self.modality_names)
def remove_merge_modality(self, merge_modality: str) -> None:
try:
self.merge_modalities.pop(merge_modality)
except KeyError:
warn(f"merge modality {merge_modality} not found")
def remove_modality(self, modality: str) -> None:
if modality in self.modality_names:
self._remove_modality_data(modality)
self._remove_reg_paths(modality)
elif modality in self.shape_set_names:
self._remove_attachments(modality)
else:
warn(
f"{modality} not found in modalities: {', '.join(self.modality_names)}"
f" or shape sets {self.shape_set_names}. "
"It may have already been removed"
)
def wsireg_run(
graph_configuration: Union[str, Path, WsiReg2D],
write_images: bool = True,
to_original_size: bool = False,
transform_non_reg: bool = True,
remove_merged: bool = True,
file_writer: str = "ome.tiff",
testing: bool = False,
):
def config_to_WsiReg2D(config_filepath):
reg_config = parse_check_reg_config(config_filepath)
reg_graph = WsiReg2D(
reg_config.get("project_name"),
reg_config.get("output_dir"),
reg_config.get("cache_images"),
)
return reg_graph
if isinstance(graph_configuration, (str, Path)):
reg_graph = config_to_WsiReg2D(graph_configuration)
reg_graph.add_data_from_config(graph_configuration)
elif isinstance(graph_configuration, WsiReg2D):
reg_graph = graph_configuration
if testing:
temp_dir = str(tempfile.mkdtemp())
reg_graph.setup_project_output(reg_graph.project_name, temp_dir)
reg_graph.register_images()
reg_graph.save_transformations()
output_data = []
if write_images:
output_images = reg_graph.transform_images(
file_writer=file_writer,
to_original_size=to_original_size,
transform_non_reg=transform_non_reg,
remove_merged=remove_merged,
)
else:
output_images = []
if reg_graph.shape_sets:
output_shapes = reg_graph.transform_shapes()
else:
output_shapes = []
output_data.extend(output_images)
output_data.extend(output_shapes)
if testing:
shutil.rmtree(temp_dir)
return output_data
def main():
import argparse
parser = argparse.ArgumentParser(
description='Load Whole Slide Image 2D Registration Graph from configuration file'
)
parser.add_argument(
"config_filepath",
metavar="C",
type=str,
help="full filepath for .yaml configuration file",
)
parser.add_argument(
"--fw",
type=str,
help="how to write output registered images: ome.tiff, ome.zarr (default: ome.tiff)",
)
parser.add_argument('--write_im', dest='write_im', action='store_true')
parser.add_argument('--no_write_im', dest='write_im', action='store_false')
parser.add_argument(
'--remove_merged', dest='remove_merged', action='store_true'
)
parser.add_argument(
'--write_merge_indiv', dest='remove_merged', action='store_false'
)
parser.add_argument(
'--tform_non_reg', dest='transform_non_reg', action='store_true'
)
parser.add_argument(
'--no_tform_non_reg', dest='transform_non_reg', action='store_false'
)
parser.add_argument(
'--to_orig_size', dest='to_original_size', action='store_true'
)
parser.add_argument(
'--to_cropped', dest='to_original_size', action='store_false'
)
parser.add_argument('--testing', dest='testing', action='store_true')
parser.set_defaults(
write_im=True,
remove_merged=True,
transform_non_reg=True,
to_original_size=False,
testing=False,
)
args = parser.parse_args()
config_filepath = args.config_filepath
if args.fw is None:
file_writer = "ome.tiff"
else:
file_writer = args.fw
wsireg_run(
config_filepath,
write_images=args.write_im,
to_original_size=args.to_original_size,
transform_non_reg=args.transform_non_reg,
remove_merged=args.remove_merged,
file_writer=file_writer,
testing=args.testing,
)
if __name__ == "__main__":
import sys
sys.exit(main())
| 62,774 | 34.526316 | 133 | py |
wsireg | wsireg-master/wsireg/__init__.py | # flake8: noqa
from .wsireg2d import WsiReg2D
"""wsireg."""
__author__ = """Nathan Heath Patterson"""
__email__ = 'heath.patterson@vanderbilt.edu'
__version__ = '0.3.8'
| 171 | 18.111111 | 44 | py |
wsireg | wsireg-master/wsireg/reg_transforms/reg_transform.py | from warnings import warn
from typing import Optional
import numpy as np
import SimpleITK as sitk
from wsireg.utils.tform_conversion import convert_to_itk
class RegTransform:
"""Container for elastix transform that manages inversion and other metadata.
Converts elastix transformation dict to it's SimpleITK representation
Attributes
----------
elastix_transform: dict
elastix transform stored in a python dict
itk_transform: sitk.Transform
elastix transform in SimpleITK container
output_spacing: list of float
Spacing of the targeted image during registration
output_size: list of int
Size of the targeted image during registration
output_direction: list of float
Direction of the targeted image during registration (not relevant for 2D applications)
output_origin: list of float
Origin of the targeted image during registration
resampler_interpolator: str
elastix interpolator setting for resampling the image
is_linear: bool
Whether the given transform is linear or non-linear (non-rigid)
inverse_transform: sitk.Transform or None
Inverse of the itk transform used for transforming from moving to fixed space
Only calculated for non-rigid transforms when called by `compute_inverse_nonlinear`
as the process is quite memory and computationally intensive
"""
def __init__(self, elastix_transform):
"""
Parameters
----------
elastix_transform: dict
elastix transform stored in a python dict
"""
self.elastix_transform: dict = elastix_transform
self.itk_transform: sitk.Transform = convert_to_itk(
self.elastix_transform
)
self.output_spacing = [
float(p) for p in self.elastix_transform.get("Spacing")
]
self.output_size = [int(p) for p in self.elastix_transform.get("Size")]
self.output_origin = [
float(p) for p in self.elastix_transform.get("Origin")
]
self.output_direction = [
float(p) for p in self.elastix_transform.get("Direction")
]
self.resample_interpolator = self.elastix_transform.get(
"ResampleInterpolator"
)[0]
self.is_linear = self.itk_transform.IsLinear()
if self.is_linear is True:
self.inverse_transform = self.itk_transform.GetInverse()
transform_name = self.itk_transform.GetName()
if transform_name == "Euler2DTransform":
self.inverse_transform = sitk.Euler2DTransform(
self.inverse_transform
)
elif transform_name == "AffineTransform":
self.inverse_transform = sitk.AffineTransform(
self.inverse_transform
)
elif transform_name == "Similarity2DTransform":
self.inverse_transform = sitk.Similarity2DTransform(
self.inverse_transform
)
else:
self.inverse_transform = None
def compute_inverse_nonlinear(self) -> None:
"""Compute the inverse of a BSpline transform using ITK"""
tform_to_dfield = sitk.TransformToDisplacementFieldFilter()
tform_to_dfield.SetOutputSpacing(self.output_spacing)
tform_to_dfield.SetOutputOrigin(self.output_origin)
tform_to_dfield.SetOutputDirection(self.output_direction)
tform_to_dfield.SetSize(self.output_size)
displacement_field = tform_to_dfield.Execute(self.itk_transform)
displacement_field = sitk.InvertDisplacementField(displacement_field)
displacement_field = sitk.DisplacementFieldTransform(
displacement_field
)
self.inverse_transform = displacement_field
def as_np_matrix(
self,
use_np_ordering: bool = False,
n_dim: int = 3,
use_inverse: bool = False,
to_px_idx: bool = False,
) -> Optional[np.ndarray]:
"""
Creates a affine transform matrix as np.ndarray whether the center of rotation
is 0,0. Optionally in physical or pixel coordinates.
Parameters
----------
use_np_ordering: bool
Use numpy ordering of yx (napari-compatible)
n_dim: int
Number of dimensions in the affine matrix, using 3 creates a 3x3 array
use_inverse: bool
return the inverse affine transformation
to_px_idx: bool
return the transformation matrix specified in pixels or physical (microns)
Returns
-------
full_matrix: np.ndarray
Affine transformation matrix
"""
if self.is_linear:
if use_np_ordering is True:
order = slice(None, None, -1)
else:
order = slice(None, None, 1)
if use_inverse is True:
transform = self.inverse_transform
else:
transform = self.itk_transform
# pull transform values
tmatrix = np.array(transform.GetMatrix()[order]).reshape(2, 2)
center = np.array(transform.GetCenter()[order])
translation = np.array(transform.GetTranslation()[order])
if to_px_idx is True:
phys_to_index = 1 / np.asarray(self.output_spacing).astype(
np.float64
)
center *= phys_to_index
translation *= phys_to_index
# construct matrix
full_matrix = np.eye(n_dim)
full_matrix[0:2, 0:2] = tmatrix
full_matrix[0:2, n_dim - 1] = (
-np.dot(tmatrix, center) + translation + center
)
return full_matrix
else:
warn(
"Non-linear transformations can not be represented converted"
"to homogenous matrix"
)
return None
| 6,013 | 35.448485 | 94 | py |
wsireg | wsireg-master/wsireg/reg_transforms/reg_transform_seq.py | import json
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import SimpleITK as sitk
from wsireg.reg_transforms.reg_transform import RegTransform
from wsireg.utils.tform_utils import ELX_TO_ITK_INTERPOLATORS
class RegTransformSeq:
"""Class to concatenate and compose sequences of transformations"""
reg_transforms: List[RegTransform] = []
resampler: Optional[sitk.ResampleImageFilter] = None
composed_linear_mats: Optional[Dict[str, np.ndarray]] = None
reg_transforms_itk_order: List[RegTransform] = []
def __init__(
self,
reg_transforms: Optional[
Union[str, Path, Dict[str, List[str]]]
] = None,
transform_seq_idx: Optional[List[int]] = None,
) -> None:
"""
Parameters
----------
reg_transforms: List or single RegTransform or None
RegTransforms to be composed
transform_seq_idx: list of int
Order in sequence of the transform. If a pre-reg transform, it will not be reversed like a sequence
of elastix transforms would to make the composite ITK transform
"""
self._transform_seq_idx = []
if reg_transforms:
self.add_transforms(
reg_transforms, transform_seq_idx=transform_seq_idx
)
else:
self._composite_transform = None
self._n_transforms = 0
def add_transforms(
self,
transforms: Union[str, Path, dict, List[RegTransform], RegTransform],
transform_seq_idx: Optional[List[int]] = None,
) -> None:
"""
Add transforms to sequence.
Parameters
----------
transforms: path to wsireg transforms .json, elastix transform dict,RegTransform ot List of RegTransform
transform_seq_idx: list of int
Order in sequence of the transform. If a pre-reg transform, it will not be reversed like a sequence
of elastix transforms would to make the composite ITK transform
"""
if isinstance(transforms, (str, Path, dict)):
tform_list, tform_idx = _read_wsireg_transform(transforms)
self.transform_seq_idx = tform_idx
reg_transforms = [RegTransform(t) for t in tform_list]
self.reg_transforms = self.reg_transforms + reg_transforms
elif isinstance(transforms, (list, RegTransform)):
if isinstance(transforms, RegTransform):
transforms = [transforms]
self.reg_transforms = self.reg_transforms + transforms
self.transform_seq_idx = transform_seq_idx
self._update_transform_properties()
@property
def composite_transform(self) -> sitk.CompositeTransform:
"""Composite ITK transform from transformation sequence"""
return self._composite_transform
@composite_transform.setter
def composite_transform(self, transforms):
self._composite_transform = transforms
@property
def transform_seq_idx(self) -> List[int]:
"""Transformation sequence for all combined transformations."""
return self._transform_seq_idx
@transform_seq_idx.setter
def transform_seq_idx(self, transform_seq):
if len(self._transform_seq_idx) > 0:
reindex_val = np.max(self._transform_seq_idx) + 1
else:
reindex_val = 0
transform_seq = [x + reindex_val for x in transform_seq]
self._transform_seq_idx = self._transform_seq_idx + transform_seq
@property
def n_transforms(self) -> int:
"""Number of transformations in sequence."""
return self._n_transforms
@n_transforms.setter
def n_transforms(self) -> None:
self._n_transforms = len(self.reg_transforms)
@property
def output_size(self) -> Tuple[int, int]:
"""Output size of image resampled by transform, initially determined from the last
transformation in the chain"""
return self._output_size
@output_size.setter
def output_size(self, new_size: Tuple[int, int]) -> None:
self._output_size = new_size
@property
def output_spacing(self) -> Union[Tuple[float, float], Tuple[int, int]]:
"""Output spacing of image resampled by transform, initially determined from the last
transformation in the chain"""
return self._output_spacing
@output_size.setter
def output_size(
self, new_spacing: Union[Tuple[float, float], Tuple[int, int]]
) -> None:
self._output_spacing = new_spacing
def set_output_spacing(
self, spacing: Union[Tuple[float, float], Tuple[int, int]]
) -> None:
"""
Method that allows setting the output spacing of the resampler
to resampled to any pixel spacing desired. This will also change the output_size
to match.
Parameters
----------
spacing: tuple of float
Spacing to set the new image. Will also change the output size to match.
"""
output_size_scaling = np.asarray(self._output_spacing) / np.asarray(
spacing
)
new_output_size = np.ceil(
np.multiply(self._output_size, output_size_scaling)
)
new_output_size = tuple([int(i) for i in new_output_size])
self._output_spacing = spacing
self._output_size = new_output_size
self._build_resampler()
def _update_transform_properties(self) -> None:
self._output_size = self.reg_transforms[-1].output_size
self._output_spacing = self.reg_transforms[-1].output_spacing
self._build_transform_data()
def _build_transform_data(self) -> None:
self._build_composite_transform(
self.reg_transforms, self.transform_seq_idx
)
self._build_resampler()
def _build_composite_transform(
self, reg_transforms, reg_transform_seq_idx
) -> None:
composite_index = []
for unique_idx in np.unique(reg_transform_seq_idx):
in_seq_tform_idx = np.where(reg_transform_seq_idx == unique_idx)[0]
if len(in_seq_tform_idx) > 1:
composite_index = composite_index + list(
in_seq_tform_idx[::-1]
)
else:
composite_index = composite_index + list(in_seq_tform_idx)
composite_transform = sitk.CompositeTransform(2)
for tform_idx in composite_index:
composite_transform.AddTransform(
reg_transforms[tform_idx].itk_transform
)
self._composite_transform = composite_transform
self.reg_transforms_itk_order = [
self.reg_transforms[i] for i in composite_index
]
def _build_resampler(self) -> None:
resampler = sitk.ResampleImageFilter()
resampler.SetOutputOrigin(self.reg_transforms[-1].output_origin)
resampler.SetOutputDirection(self.reg_transforms[-1].output_direction)
resampler.SetSize(self.output_size)
resampler.SetOutputSpacing(self.output_spacing)
interpolator = ELX_TO_ITK_INTERPOLATORS.get(
self.reg_transforms[-1].resample_interpolator
)
resampler.SetInterpolator(interpolator)
resampler.SetTransform(self.composite_transform)
self.resampler = resampler
def transform_points(
self, pt_data: np.ndarray, px_idx=True, source_res=1, output_idx=True
) -> np.ndarray:
"""
Transform point sets using the transformation chain
Parameters
----------
pt_data: np.ndarray
Point data in xy order
px_idx: bool
Whether point data is in pixel or physical coordinate sapce
source_res: float
spacing of the pixels associated with pt_data if they are not in physical coordinate space
output_idx: bool
return transformed points to pixel indices in the output_spacing's reference space
Returns
-------
tformed_pts: np.ndarray
Transformed points
"""
tformed_pts = []
for pt in pt_data:
if px_idx is True:
pt = pt * source_res
for idx, t in enumerate(self.reg_transforms):
if idx == 0:
t_pt = t.inverse_transform.TransformPoint(pt)
else:
t_pt = t.inverse_transform.TransformPoint(t_pt)
t_pt = np.array(t_pt)
if output_idx is True:
t_pt *= 1 / self._output_spacing[0]
tformed_pts.append(t_pt)
return np.stack(tformed_pts)
def append(self, other) -> None:
"""
Concatenate transformation sequences.
Parameters
----------
other: RegTransformSeq
Append a RegTransformSeq to another
"""
self.add_transforms(other.reg_transforms, other.transform_seq_idx)
def _write_transforms(self, output_path: Union[str, Path]):
return
def _read_wsireg_transform(
parameter_data: Union[str, Path, Dict[Any, Any]]
) -> Tuple[List[Dict[str, List[str]]], List[int]]:
"""Convert wsireg transform dict or from file to List of RegTransforms"""
if isinstance(parameter_data, (str, Path)):
parameter_data_in = json.load(open(parameter_data, "r"))
else:
parameter_data_in = parameter_data
transform_list = []
transform_list_seq_id = []
seq_idx = 0
for k, v in parameter_data_in.items():
if k == "initial":
if isinstance(v, dict):
transform_list.append(v)
transform_list_seq_id.append(seq_idx)
seq_idx += 1
elif isinstance(v, list):
for init_tform in v:
transform_list.append(init_tform)
transform_list_seq_id.append(seq_idx)
seq_idx += 1
else:
if isinstance(v, dict):
transform_list.append(v)
transform_list_seq_id.append(seq_idx)
seq_idx += 1
elif isinstance(v, list):
for tform in v:
transform_list.append(tform)
transform_list_seq_id.append(seq_idx)
seq_idx += 1
return transform_list, transform_list_seq_id
| 10,422 | 33.39934 | 112 | py |
wsireg | wsireg-master/wsireg/reg_transforms/__init__.py | from .reg_transform import RegTransform # noqa: F401
from .reg_transform_seq import RegTransformSeq # noqa: F401
| 115 | 37.666667 | 60 | py |
wsireg | wsireg-master/wsireg/writers/merge_ome_tiff_writer.py | from pathlib import Path
from typing import List, Optional, Tuple, Union
import cv2
import numpy as np
import SimpleITK as sitk
from tifffile import TiffWriter
from wsireg.reg_images.reg_image import RegImage
from wsireg.reg_images.merge_reg_image import MergeRegImage
from wsireg.reg_transforms.reg_transform_seq import RegTransformSeq
from wsireg.utils.im_utils import (
SITK_TO_NP_DTYPE,
format_channel_names,
get_pyramid_info,
prepare_ome_xml_str,
)
class MergeOmeTiffWriter:
x_size: Optional[int] = None
y_size: Optional[int] = None
y_spacing: Optional[Union[int, float]] = None
x_spacing: Optional[Union[int, float]] = None
tile_size: int = 512
pyr_levels: Optional[List[Tuple[int, int]]] = None
n_pyr_levels: Optional[int] = None
PhysicalSizeY: Optional[Union[int, float]] = None
PhysicalSizeX: Optional[Union[int, float]] = None
subifds: Optional[int] = None
compression: str = "deflate"
def __init__(
self,
reg_images: MergeRegImage,
reg_transform_seqs: Optional[List[RegTransformSeq]] = None,
):
"""
Class for writing multiple images wiuth and without transforms to a singel OME-TIFF.
Parameters
----------
reg_image: MergeRegImage
MergeRegImage to be transformed
reg_transform_seqs: List of RegTransformSeq or None
Registration transformation sequences for each wsireg image to be merged
Attibutes
---------
x_size: int
Size of the merged image after transformation in x
y_size: int
Size of the merged image after transformation in y
y_spacing: float
Pixel spacing in microns after transformation in y
x_spacing: float
Pixel spacing in microns after transformation in x
tile_size: int
Size of tiles to be written
pyr_levels: list of tuples of int:
Size of downsampled images in pyramid
n_pyr_levels: int
Number of downsamples in pyramid
PhysicalSizeY: float
physical size of image in micron for OME-TIFF in Y
PhysicalSizeX: float
physical size of image in micron for OME-TIFF in X
subifds: int
Number of sub-resolutions for pyramidal OME-TIFF
compression: str
tifffile string to pass to compression argument, defaults to "deflate" for minisblack
and "jpeg" for RGB type images
"""
self.reg_image = reg_images
self.reg_transform_seqs = reg_transform_seqs
def _length_checks(self, sub_image_names):
"""Make sure incoming data is kosher in dimensions"""
if isinstance(sub_image_names, list) is False:
if sub_image_names is None:
sub_image_names = [
"" for i in range(len(self.reg_image.images))
]
else:
raise ValueError(
"MergeRegImage requires a list of image names for each image to merge"
)
if self.reg_transform_seqs is None:
transformations = [None for i in range(len(self.reg_image.images))]
else:
transformations = self.reg_transform_seqs
if len(transformations) != len(self.reg_image.images):
raise ValueError(
"MergeRegImage number of transforms does not match number of images"
)
def _create_channel_names(self, sub_image_names):
"""Create channel names for merge data."""
def prepare_channel_names(sub_image_name, channel_names):
return [f"{sub_image_name} - {c}" for c in channel_names]
self.reg_image.channel_names = [
prepare_channel_names(im_name, cnames)
for im_name, cnames in zip(
sub_image_names, self.reg_image.channel_names
)
]
self.reg_image.channel_names = [
item
for sublist in self.reg_image.channel_names
for item in sublist
]
def _transform_check(self):
"""Check that all transforms as currently loaded output to the same size/resolution"""
out_size = []
out_spacing = []
if not self.reg_transform_seqs:
rts = [None for _ in range(len(self.reg_image.images))]
else:
rts = self.reg_transform_seqs
for im, t in zip(self.reg_image.images, rts):
if t:
out_size.append(t.reg_transforms[-1].output_size)
out_spacing.append(t.reg_transforms[-1].output_spacing)
else:
out_im_size = (
(im.shape[0], im.shape[1])
if im.is_rgb
else (im.shape[1], im.shape[2])
)
out_im_spacing = im.image_res
out_size.append(out_im_size)
out_spacing.append(out_im_spacing)
if all(out_spacing) is False:
raise ValueError(
"MergeRegImage all transforms output spacings and untransformed image spacings must match"
)
if all(out_size) is False:
raise ValueError(
"MergeRegImage all transforms output sizes and untransformed image sizes must match"
)
def _prepare_image_info(
self,
reg_image: RegImage,
image_name: str,
im_dtype: np.dtype,
reg_transform_seq: Optional[RegTransformSeq] = None,
write_pyramid: bool = True,
tile_size: int = 512,
compression: str = "default",
):
"""Prepare OME-XML and other data needed for saving"""
if reg_transform_seq:
self.x_size, self.y_size = reg_transform_seq.output_size
self.x_spacing, self.y_spacing = reg_transform_seq.output_spacing
else:
self.y_size, self.x_size = (
(reg_image.shape[0], reg_image.shape[1])
if reg_image.is_rgb
else (reg_image.shape[1], reg_image.shape[2])
)
self.y_spacing, self.x_spacing = (
reg_image.image_res,
reg_image.image_res,
)
self.tile_size = tile_size
# protect against too large tile size
while (
self.y_size / self.tile_size <= 1
or self.x_size / self.tile_size <= 1
):
self.tile_size = self.tile_size // 2
self.pyr_levels, _ = get_pyramid_info(
self.y_size, self.x_size, reg_image.n_ch, self.tile_size
)
self.n_pyr_levels = len(self.pyr_levels)
if reg_transform_seq:
self.PhysicalSizeY = self.y_spacing
self.PhysicalSizeX = self.x_spacing
else:
self.PhysicalSizeY = reg_image.image_res
self.PhysicalSizeX = reg_image.image_res
channel_names = format_channel_names(
self.reg_image.channel_names, self.reg_image.n_ch
)
self.omexml = prepare_ome_xml_str(
self.y_size,
self.x_size,
len(channel_names),
im_dtype,
False,
PhysicalSizeX=self.PhysicalSizeX,
PhysicalSizeY=self.PhysicalSizeY,
PhysicalSizeXUnit="µm",
PhysicalSizeYUnit="µm",
Name=image_name,
Channel={"Name": channel_names},
)
self.subifds = self.n_pyr_levels - 1 if write_pyramid is True else None
if compression == "default":
print("using default compression")
self.compression = "deflate"
else:
self.compression = compression
def _get_merge_dtype(self):
"""Determine data type for merger. Will default to the largest
dtype. If one image is np.uint8 and another np.uint16, the image at np.uint8
will be cast to np.uint16"""
dtype_max_size = [
np.iinfo(r.im_dtype).max for r in self.reg_image.images
]
merge_dtype_np = self.reg_image.images[
np.argmax(dtype_max_size)
].im_dtype
for k, v in SITK_TO_NP_DTYPE.items():
if k < 12:
if v == merge_dtype_np:
merge_dtype_sitk = k
return merge_dtype_sitk, merge_dtype_np
def merge_write_image_by_plane(
self,
image_name: str,
sub_image_names: List[str],
output_dir: Union[Path, str] = "",
write_pyramid: bool = True,
tile_size: int = 512,
compression: Optional[str] = "default",
) -> str:
"""
Write merged OME-TIFF image plane-by-plane to disk.
RGB images will be de-interleaved with RGB channels written as separate planes.
Parameters
----------
image_name: str
Name to be written WITHOUT extension
for example if image_name = "cool_image" the file
would be "cool_image.ome.tiff"
sub_image_names: list of str
Names added before each channel of a given image to distinguish it.
output_dir: Path or str
Directory where the image will be saved
write_pyramid: bool
Whether to write the OME-TIFF with sub-resolutions or not
tile_size: int
What size to write OME-TIFF tiles to disk
compression: str
tifffile string to pass to compression argument, defaults to "deflate" for minisblack
and "jpeg" for RGB type images
Returns
-------
output_file_name: str
File path to the written OME-TIFF
"""
merge_dtype_sitk, merge_dtype_np = self._get_merge_dtype()
self._length_checks(sub_image_names)
self._create_channel_names(sub_image_names)
self._transform_check()
output_file_name = str(Path(output_dir) / f"{image_name}.ome.tiff")
self._prepare_image_info(
self.reg_image.images[0],
image_name,
merge_dtype_np,
reg_transform_seq=self.reg_transform_seqs[0],
write_pyramid=write_pyramid,
tile_size=tile_size,
compression=compression,
)
print(f"saving to {output_file_name}")
with TiffWriter(output_file_name, bigtiff=True) as tif:
for m_idx, merge_image in enumerate(self.reg_image.images):
if self.reg_image.images[m_idx].reader == "sitk":
full_image = sitk.ReadImage(
self.reg_image.images[m_idx].image_filepath
)
merge_n_ch = merge_image.n_ch
for channel_idx in range(merge_n_ch):
if self.reg_image.images[m_idx].reader != "sitk":
image = self.reg_image.images[
m_idx
].read_single_channel(channel_idx)
image = np.squeeze(image)
image = sitk.GetImageFromArray(image)
image.SetSpacing(
(
self.reg_image.images[m_idx].image_res,
self.reg_image.images[m_idx].image_res,
)
)
else:
if len(full_image.GetSize()) > 2:
image = full_image[:, :, channel_idx]
else:
image = full_image
if image.GetPixelIDValue() != merge_dtype_sitk:
image = sitk.Cast(image, merge_dtype_sitk)
if self.reg_transform_seqs[m_idx]:
image = self.reg_transform_seqs[
m_idx
].resampler.Execute(image)
if isinstance(image, sitk.Image):
image = sitk.GetArrayFromImage(image)
options = dict(
tile=(tile_size, tile_size),
compression=self.compression,
photometric="minisblack",
metadata=None,
)
# write OME-XML to the ImageDescription tag of the first page
description = (
self.omexml
if channel_idx == 0 and m_idx == 0
else None
)
# write channel data
print(
f" writing subimage index {m_idx} : {sub_image_names[m_idx]} - "
f"channel index - {channel_idx} - shape: {image.shape}"
)
tif.write(
image,
subifds=self.subifds,
description=description,
**options,
)
if write_pyramid:
for pyr_idx in range(1, self.n_pyr_levels):
resize_shape = (
self.pyr_levels[pyr_idx][0],
self.pyr_levels[pyr_idx][1],
)
image = cv2.resize(
image,
resize_shape,
cv2.INTER_LINEAR,
)
tif.write(image, **options, subfiletype=1)
return output_file_name
| 13,686 | 35.596257 | 106 | py |
wsireg | wsireg-master/wsireg/writers/__init__.py | 0 | 0 | 0 | py | |
wsireg | wsireg-master/wsireg/writers/ome_tiff_writer.py | from pathlib import Path
from typing import List, Optional, Tuple, Union
import cv2
import numpy as np
import SimpleITK as sitk
from tifffile import TiffWriter
from wsireg.reg_images.reg_image import RegImage
from wsireg.reg_transforms.reg_transform_seq import RegTransformSeq
from wsireg.utils.im_utils import (
format_channel_names,
get_pyramid_info,
prepare_ome_xml_str,
)
class OmeTiffWriter:
x_size: Optional[int] = None
y_size: Optional[int] = None
y_spacing: Optional[Union[int, float]] = None
x_spacing: Optional[Union[int, float]] = None
tile_size: int = 512
pyr_levels: Optional[List[Tuple[int, int]]] = None
n_pyr_levels: Optional[int] = None
PhysicalSizeY: Optional[Union[int, float]] = None
PhysicalSizeX: Optional[Union[int, float]] = None
subifds: Optional[int] = None
compression: str = "deflate"
def __init__(
self,
reg_image: RegImage,
reg_transform_seq: Optional[RegTransformSeq] = None,
):
"""
Class for managing writing images to OME-TIFF.
Parameters
----------
reg_image: RegImage
RegImage to be transformed
reg_transform_seq: RegTransformSeq or None
Registration transformation sequence from wsireg to transform image
Attibutes
---------
x_size: int
Size of the output image after transformation in x
y_size: int
Size of the output image after transformation in y
y_spacing: float
Pixel spacing in microns after transformation in y
x_spacing: float
Pixel spacing in microns after transformation in x
tile_size: int
Size of tiles to be written
pyr_levels: list of tuples of int:
Size of downsampled images in pyramid
n_pyr_levels: int
Number of downsamples in pyramid
PhysicalSizeY: float
physical size of image in micron for OME-TIFF in Y
PhysicalSizeX: float
physical size of image in micron for OME-TIFF in X
subifds: int
Number of sub-resolutions for pyramidal OME-TIFF
compression: str
tifffile string to pass to compression argument, defaults to "deflate" for minisblack
and "jpeg" for RGB type images
"""
self.reg_image = reg_image
self.reg_transform_seq = reg_transform_seq
def _prepare_image_info(
self,
image_name: str,
reg_transform_seq: Optional[RegTransformSeq] = None,
write_pyramid: bool = True,
tile_size: int = 512,
compression: Optional[str] = "default",
) -> None:
"""Get image info and OME-XML"""
if reg_transform_seq:
self.x_size, self.y_size = reg_transform_seq.output_size
self.x_spacing, self.y_spacing = reg_transform_seq.output_spacing
else:
self.y_size, self.x_size = (
(self.reg_image.shape[0], self.reg_image.shape[1])
if self.reg_image.is_rgb
else (self.reg_image.shape[1], self.reg_image.shape[2])
)
self.y_spacing, self.x_spacing = None, None
self.tile_size = tile_size
# protect against too large tile size
while (
self.y_size / self.tile_size <= 1
or self.x_size / self.tile_size <= 1
):
self.tile_size = self.tile_size // 2
self.pyr_levels, _ = get_pyramid_info(
self.y_size, self.x_size, self.reg_image.n_ch, self.tile_size
)
self.n_pyr_levels = len(self.pyr_levels)
if reg_transform_seq:
self.PhysicalSizeY = self.y_spacing
self.PhysicalSizeX = self.x_spacing
else:
self.PhysicalSizeY = self.reg_image.image_res
self.PhysicalSizeX = self.reg_image.image_res
channel_names = format_channel_names(
self.reg_image.channel_names, self.reg_image.n_ch
)
self.omexml = prepare_ome_xml_str(
self.y_size,
self.x_size,
self.reg_image.n_ch,
self.reg_image.im_dtype,
self.reg_image.is_rgb,
PhysicalSizeX=self.PhysicalSizeX,
PhysicalSizeY=self.PhysicalSizeY,
PhysicalSizeXUnit="µm",
PhysicalSizeYUnit="µm",
Name=image_name,
Channel=None if self.reg_image.is_rgb else {"Name": channel_names},
)
self.subifds = self.n_pyr_levels - 1 if write_pyramid is True else None
if compression == "default":
print("using default compression")
self.compression = "jpeg" if self.reg_image.is_rgb else "deflate"
else:
self.compression = compression
def write_image_by_plane(
self,
image_name: str,
output_dir: Union[Path, str] = "",
write_pyramid: bool = True,
tile_size: int = 512,
compression: Optional[str] = "default",
) -> str:
"""
Write OME-TIFF image plane-by-plane to disk. WsiReg compatible RegImages all
have methods to read an image channel-by-channel, thus each channel is read, transformed, and written to
reduce memory during write.
RGB images may run large memory footprints as they are interleaved before write, for RGB images,
using the `OmeTiledTiffWriter` is recommended.
Parameters
----------
image_name: str
Name to be written WITHOUT extension
for example if image_name = "cool_image" the file
would be "cool_image.ome.tiff"
output_dir: Path or str
Directory where the image will be saved
write_pyramid: bool
Whether to write the OME-TIFF with sub-resolutions or not
tile_size: int
What size to write OME-TIFF tiles to disk
compression: str
tifffile string to pass to compression argument, defaults to "deflate" for minisblack
and "jpeg" for RGB type images
Returns
-------
output_file_name: str
File path to the written OME-TIFF
"""
output_file_name = str(Path(output_dir) / f"{image_name}.ome.tiff")
self._prepare_image_info(
image_name,
reg_transform_seq=self.reg_transform_seq,
write_pyramid=write_pyramid,
tile_size=tile_size,
compression=compression,
)
rgb_im_data = []
print(f"saving to {output_file_name}")
with TiffWriter(output_file_name, bigtiff=True) as tif:
if self.reg_image.reader == "sitk":
self.reg_image._read_full_image()
for channel_idx in range(self.reg_image.n_ch):
print(f"transforming : {channel_idx}")
image = self.reg_image.read_single_channel(channel_idx)
image = np.squeeze(image)
image = sitk.GetImageFromArray(image)
image.SetSpacing(
(self.reg_image.image_res, self.reg_image.image_res)
)
if self.reg_transform_seq:
image = self.reg_transform_seq.resampler.Execute(image)
# image = transform_plane(
# image, final_transform, composite_transform
# )
print(f"transformed : {channel_idx}")
if self.reg_image.is_rgb:
rgb_im_data.append(image)
else:
print("saving")
if isinstance(image, sitk.Image):
image = sitk.GetArrayFromImage(image)
options = dict(
tile=(self.tile_size, self.tile_size),
compression=self.compression,
photometric="rgb"
if self.reg_image.is_rgb
else "minisblack",
metadata=None,
)
# write OME-XML to the ImageDescription tag of the first page
description = self.omexml if channel_idx == 0 else None
# write channel data
print(
f" writing channel {channel_idx} - shape: {image.shape}"
)
tif.write(
image,
subifds=self.subifds,
description=description,
**options,
)
if write_pyramid:
for pyr_idx in range(1, self.n_pyr_levels):
resize_shape = (
self.pyr_levels[pyr_idx][0],
self.pyr_levels[pyr_idx][1],
)
image = cv2.resize(
image,
resize_shape,
cv2.INTER_LINEAR,
)
print(
f"pyramid index {pyr_idx} : channel {channel_idx} shape: {image.shape}"
)
tif.write(image, **options, subfiletype=1)
if self.reg_image.is_rgb:
rgb_im_data = sitk.Compose(rgb_im_data)
rgb_im_data = sitk.GetArrayFromImage(rgb_im_data)
options = dict(
tile=(self.tile_size, self.tile_size),
compression=self.compression,
photometric="rgb",
metadata=None,
)
# write OME-XML to the ImageDescription tag of the first page
description = self.omexml
# write channel data
tif.write(
rgb_im_data,
subifds=self.subifds,
description=description,
**options,
)
print(f"RGB shape: {rgb_im_data.shape}")
if write_pyramid:
for pyr_idx in range(1, self.n_pyr_levels):
resize_shape = (
self.pyr_levels[pyr_idx][0],
self.pyr_levels[pyr_idx][1],
)
rgb_im_data = cv2.resize(
rgb_im_data,
resize_shape,
cv2.INTER_LINEAR,
)
tif.write(rgb_im_data, **options, subfiletype=1)
return output_file_name
| 10,814 | 35.785714 | 112 | py |
wsireg | wsireg-master/wsireg/writers/tiled_ome_tiff_writer.py | import multiprocessing
import random
import string
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import List, Optional, Tuple, Union
import dask.array as da
import numpy as np
import SimpleITK as sitk
import zarr
from tifffile import TiffWriter
from tiler import Tiler
from tqdm import tqdm
from wsireg.reg_images.reg_image import RegImage
from wsireg.reg_transforms.reg_transform_seq import RegTransformSeq
from wsireg.utils.im_utils import (
format_channel_names,
get_pyramid_info,
prepare_ome_xml_str,
)
from wsireg.utils.tform_utils import ELX_TO_ITK_INTERPOLATORS
class OmeTiffTiledWriter:
"""
Class for transforming, then writing whole slide images tile-by-tile, allowing
memory-efficient transformation of images. The image
to be transformed has to have a dask representation that is itself tiled because
the writer finds the position of each write tile in fixed space in moving space
and reads only the necessary portion of the image to
perform the transformation.
Tiles are stored in a temporary `zarr` store that is deleted after writing to OME-TIFF.
Uses the Tiler library to manage "virtual tiling" for transformation.
Parameters
----------
reg_image: RegImage
wsireg RegImage that has a dask store that is chunked in XY (typical of WSIs)
.czi reader does not work!
reg_transform_seq: RegTransformSeq
wsireg registration transform sequence to be applied to the image
tile_size: int
Tile size of the output image
zarr_tile_size: int
Tile used in the zarr intermediate
moving_tile_padding: int
How much additional padded to pull from moving for each transformed tile
This ensures that the interpolation is correctly performed during resampling
Non-rigid transforms may need more spacing
Attributes
----------
reg_image: RegImage
RegImage to be transformed
reg_transform_seq: RegTransformSeq
RegTransformSeq to be used in transformation
tile_shape: tuple of ints
Shape of OME-TIFF tiles going to disk
zarr_tile_shape: tuple of ints
Shape of zarr tiles going to disk temporarily
moving_tile_padding: int
Tile padding use at read in for interpolation
"""
def __init__(
self,
reg_image: RegImage,
reg_transform_seq: RegTransformSeq,
tile_size: int = 512,
zarr_tile_size: int = 2048,
moving_tile_padding: int = 128,
):
self._fixed_tile_positions: List[Tuple[int, int, int, int]] = []
self._fixed_tile_positions_phys: List[
Tuple[float, float, float, float]
] = []
self._moving_tile_positions: List[Tuple[int, int, int, int]] = []
self._moving_tile_positions_phys: List[
Tuple[float, float, float, float]
] = []
self._tiler: Optional[Tiler] = None
self.reg_image: RegImage = reg_image
self.reg_transform_seq: RegTransformSeq = reg_transform_seq
self.tile_shape = (tile_size, tile_size)
self.zarr_tile_shape = (zarr_tile_size, zarr_tile_size)
self._check_dask_array_chunk_sizes(self.reg_image.dask_image)
self.moving_tile_padding = moving_tile_padding
self._build_transformation_tiles()
@property
def fixed_tile_positions(self) -> List[Tuple[int, int, int, int]]:
"""List of tile positions on the fixed image in pixels
first np.ndarray is top-left x,y coordinate
second np.ndarray is bottom-right x,y coordinate"""
return self._fixed_tile_positions
@property
def fixed_tile_positions_phys(
self,
) -> List[Tuple[float, float, float, float]]:
"""List of tile positions on the fixed image in physical coordinate space
first np.ndarray is top-left x,y coordinate
second np.ndarray is bottom-right x,y coordinate"""
return self._fixed_tile_positions_phys
@property
def moving_tile_positions(self) -> List[Tuple[int, int, int, int]]:
"""Transformed coordinates of fixed tile positions to moving, pixels
first np.ndarray is top-left x,y coordinate
second np.ndarray is bottom-right x,y coordinate"""
return self._moving_tile_positions
@property
def moving_tile_positions_phys(
self,
) -> List[Tuple[float, float, float, float]]:
"""Transformed coordinates of fixed tile positions to moving, physical
first np.ndarray is top-left x,y coordinate
second np.ndarray is bottom-right x,y coordinate"""
return self._moving_tile_positions_phys
@property
def tiler(self) -> Tiler:
"""Tiler instance to manage fixed output tiling from image shape."""
return self._tiler
def _create_tiler(self):
"""Create the Tiler instance."""
self._tiler = Tiler(
self.reg_transform_seq.output_size,
self.zarr_tile_shape,
overlap=0,
mode="irregular",
)
def _check_dask_array_chunk_sizes(self, dask_image: da.Array) -> None:
"""Check if dask image has an acceptable chunk-size for tiled writing."""
yx_chunks = (
dask_image.chunksize[:2]
if self.reg_image.is_rgb
else dask_image.chunksize[1:]
)
if np.any(np.asarray(yx_chunks) > np.asarray(self.zarr_tile_shape)):
raise ValueError(
f"Dask image chunksize for image {str(self.reg_image.path)} "
"is too large for tiled writing and effectively memory use is not "
"compared to plane-by-plane writing."
)
return
def _build_transformation_tiles(self):
"""Method to reinitialize tiler if there are changes."""
self._create_tiler()
self._get_fixed_tile_positions()
self._get_fixed_tile_positions_phys()
self._get_moving_tile_positions()
def _get_and_clip_fixed_tile(
self, tile_idx: int, output_size: Tuple[int, int], order=[1, 0]
):
"""Method to ensure tiles do not go beyond the output shape
of the fixed target image"""
tile_pos = self.tiler.get_tile_bbox(tile_idx)
if tile_pos[1][0] > output_size[order[0]]:
tile_pos[1][0] = output_size[order[0]]
if tile_pos[1][1] > output_size[order[1]]:
tile_pos[1][1] = output_size[order[1]]
return tile_pos
def _get_fixed_tile_positions(self):
"""Find the tile positions on the fixed image."""
self._fixed_tile_positions = [
self._get_and_clip_fixed_tile(
i, self.reg_transform_seq.output_size, order=[0, 1]
)
for i in range(self.tiler.n_tiles)
]
def _get_fixed_tile_positions_phys(self):
"""Fixed tile pixel indices to physical coordinates
used in ITK transforms."""
self._fixed_tile_positions_phys = [
(
f[0] * self.reg_transform_seq.output_spacing,
f[1] * self.reg_transform_seq.output_spacing,
)
for f in self._fixed_tile_positions
]
def _get_moving_tile_positions(self):
"""Method to transform tile positions in fixed
to moving so that each write tile in fixed has a corresponding
read region in moving."""
for fixed_tile_pos in self._fixed_tile_positions_phys:
corners_phys = []
corners_px = []
for idx, corner in enumerate(fixed_tile_pos):
if idx == 0:
corner -= self.moving_tile_padding
if idx == 1:
corner += self.moving_tile_padding
for idx, t in enumerate(
self.reg_transform_seq.reg_transforms[::-1]
):
if idx == 0:
t_pt = t.itk_transform.TransformPoint(corner.tolist())
else:
t_pt = t.itk_transform.TransformPoint(t_pt)
t_pt = np.array(t_pt)
t_pt_px = t_pt / self.reg_image.image_res
corners_phys.append(t_pt)
corners_px.append(t_pt_px)
self._moving_tile_positions_phys.append(tuple(corners_phys))
self._moving_tile_positions.append(tuple(corners_px))
def set_output_spacing(
self, output_spacing: Tuple[Union[int, float], Union[int, float]]
) -> None:
"""
Sets the output spacing of the resampled image and will change
output shape accordingly
Parameters
----------
output_spacing: Tuple[Union[int,float], Union[int,float]]
Spacing of grid for resampling. Will default to target image spacing
"""
self.reg_transform_seq.set_output_spacing(output_spacing)
self._build_transformation_tiles()
def set_tile_size(self, tile_size: int) -> None:
"""
Set the internal tile size of the OME-TIFF to be written.
Parameters
----------
tile_size: int
tile size in pixels in x and y for the OME-TIFF
"""
self.tile_shape = (tile_size, tile_size)
def set_zarr_tile_size(self, tile_size: int) -> None:
"""
Set the tile size for the zarr intermediate.
Parameters
----------
tile_size: int
tile size in pixels in x and y for the temporary zarr store
"""
self.zarr_tile_shape = (tile_size, tile_size)
self._build_transformation_tiles()
def _create_tile_resampler(
self, tile_origin: Tuple[float, float]
) -> sitk.ResampleImageFilter:
"""
Build each tile's resampler.
Parameters
----------
tile_origin: Tuple[float, float]
Position of the tile in physical coordinates
Returns
-------
resampler: sitk.ResampleImageFilter
resampler for an individual fixed tile
"""
resampler = sitk.ResampleImageFilter()
resampler.SetOutputOrigin(tile_origin)
resampler.SetOutputDirection(
self.reg_transform_seq.reg_transforms[-1].output_direction
)
resampler.SetSize(self.zarr_tile_shape)
resampler.SetOutputSpacing(self.reg_transform_seq.output_spacing)
interpolator = ELX_TO_ITK_INTERPOLATORS.get(
self.reg_transform_seq.reg_transforms[-1].resample_interpolator
)
resampler.SetInterpolator(interpolator)
resampler.SetTransform(self.reg_transform_seq.composite_transform)
return resampler
def write_tiles_to_zarr_store(
self,
temp_zarr_store: zarr.TempStore,
max_workers: Optional[int] = None,
):
"""
Write tiles to a temporary zarr store.
Parameters
----------
temp_zarr_store: zarr.TempStore
Temporary store where the dataset will go.
Returns
-------
resample_zarray: zarr.Array
zarr store contained transformed images
"""
zgrp = zarr.open(temp_zarr_store)
if self.reg_image.is_rgb:
resample_zarray = zgrp.create_dataset(
random_str(),
shape=(
self.reg_transform_seq.output_size[1],
self.reg_transform_seq.output_size[0],
self.reg_image.shape[-1],
),
chunks=self.tile_shape,
dtype=self.reg_image.im_dtype,
)
else:
resample_zarray = zgrp.create_dataset(
random_str(),
shape=(
self.reg_image.n_ch,
self.reg_transform_seq.output_size[1],
self.reg_transform_seq.output_size[0],
),
chunks=(1,) + self.tile_shape,
dtype=self.reg_image.im_dtype,
)
self._transform_write_tile_set(
resample_zarray, max_workers=max_workers
)
return resample_zarray
def _transform_write_tile(self, data):
"""Worker function to transform and place tile in zarr store."""
(
resample_zarray,
ch_idx,
fixed_tile_position,
fixed_tile_origin,
moving_tile_corners,
) = data
tile_resampler = self._create_tile_resampler(fixed_tile_origin)
x_size, y_size = self._get_image_size()
x_max, x_min, y_max, y_min = self._get_moving_tile_slice(
moving_tile_corners, x_size, y_size
)
tile_resampled = self._resample_tile(
ch_idx, tile_resampler, x_max, x_min, y_max, y_min
)
if tile_resampled:
(
x_max_fixed,
x_min_fixed,
y_max_fixed,
y_min_fixed,
) = self._get_fixed_slice(fixed_tile_position)
x_max, y_max = self._correct_end_moving_slices(
x_max_fixed,
x_min_fixed,
y_max_fixed,
y_min_fixed,
)
if self.reg_image.is_rgb:
resample_zarray[
y_min_fixed:y_max_fixed, x_min_fixed:x_max_fixed, :
] = sitk.GetArrayFromImage(tile_resampled)[:y_max, :x_max, :]
else:
resample_zarray[
ch_idx, y_min_fixed:y_max_fixed, x_min_fixed:x_max_fixed
] = sitk.GetArrayFromImage(tile_resampled)[:y_max, :x_max]
def _get_image_size(self) -> Tuple[int, int]:
"""Get moving image size for tile dilineation"""
x_size = (
self.reg_image.shape[1]
if self.reg_image.is_rgb
else self.reg_image.shape[2]
)
y_size = (
self.reg_image.shape[0]
if self.reg_image.is_rgb
else self.reg_image.shape[1]
)
return x_size, y_size
def _resample_tile(
self,
ch_idx: int,
tile_resampler: sitk.ResampleImageFilter,
x_max: int,
x_min: int,
y_max: int,
y_min: int,
) -> Optional[sitk.Image]:
"""Resample tile or don't if it is outside of the moving
image space."""
if x_min == 0 and x_max == 0:
return
if y_min == 0 and y_max == 0:
return
if self.reg_image.is_rgb:
image = self.reg_image.dask_image[y_min:y_max, x_min:x_max, :]
image = sitk.GetImageFromArray(image, isVector=True)
elif self.reg_image.n_ch == 1:
image = da.squeeze(self.reg_image.dask_image)[
y_min:y_max, x_min:x_max
]
image = sitk.GetImageFromArray(image, isVector=False)
else:
image = self.reg_image.dask_image[ch_idx, y_min:y_max, x_min:x_max]
image = sitk.GetImageFromArray(image, isVector=False)
image.SetSpacing((self.reg_image.image_res, self.reg_image.image_res))
image.SetOrigin(
image.TransformIndexToPhysicalPoint([int(x_min), int(y_min)])
)
tile_resampled = tile_resampler.Execute(image)
return tile_resampled
def _correct_end_moving_slices(
self,
x_max_fixed: int,
x_min_fixed: int,
y_max_fixed: int,
y_min_fixed: int,
) -> Tuple[int, int]:
"""Correct tiles that extend past the size of the fixed coordinate space."""
# correct for end tiles
y_max = y_max_fixed - y_min_fixed
x_max = x_max_fixed - x_min_fixed
return x_max, y_max
def _get_fixed_slice(
self, fixed_tile_position: Tuple[np.ndarray, np.ndarray]
) -> Tuple[int, int, int, int]:
"""Get tile slice in fixed tile pixels."""
y_min_fixed = fixed_tile_position[0][1]
x_min_fixed = fixed_tile_position[0][0]
y_max_fixed = fixed_tile_position[1][1]
x_max_fixed = fixed_tile_position[1][0]
return x_max_fixed, x_min_fixed, y_max_fixed, y_min_fixed
def _get_moving_tile_slice(
self,
moving_tile_corners: Tuple[np.ndarray, np.ndarray],
x_size: int,
y_size: int,
) -> Tuple[int, int, int, int]:
"""Get tile slice in moving tile pixels."""
x_min = (
moving_tile_corners[0][0] if moving_tile_corners[0][0] >= 0 else 0
)
x_min = x_min if x_min <= x_size else x_size
x_min = np.ceil(x_min).astype(int)
x_max = (
moving_tile_corners[1][0] if moving_tile_corners[1][0] >= 0 else 0
)
x_max = x_max if x_max <= x_size else x_size
x_max = np.ceil(x_max).astype(int)
y_min = (
moving_tile_corners[0][1] if moving_tile_corners[0][1] >= 0 else 0
)
y_min = y_min if y_min <= y_size else y_size
y_min = np.ceil(y_min).astype(int)
y_max = (
moving_tile_corners[1][1] if moving_tile_corners[1][1] >= 0 else 0
)
y_max = y_max if y_max <= y_size else y_size
y_max = np.ceil(y_max).astype(int)
# catch changing positions of x and y when there are coordinate flips
if y_min > y_max:
y_max_temp = y_min
y_min_temp = y_max
y_max = y_max_temp
y_min = y_min_temp
if x_min > x_max:
x_max_temp = x_min
x_min_temp = x_max
x_max = x_max_temp
x_min = x_min_temp
return x_max, x_min, y_max, y_min
def _transform_write_tile_set(
self, resample_zarray: zarr.Array, max_workers: Optional[int] = None
):
"""Function to loop over all channels and tile positions
and write to zarr"""
if max_workers == 1:
use_multiprocessing = False
else:
use_multiprocessing = True
if not max_workers:
max_workers = multiprocessing.cpu_count()
n_ch = 1 if self.reg_image.is_rgb else self.reg_image.n_ch
all_tile_args = []
for ch_idx in range(n_ch):
for ft_pos, mt_pos in tqdm(
zip(
self._fixed_tile_positions,
self._moving_tile_positions,
),
total=len(self._fixed_tile_positions),
desc="Writing zarr tiles",
unit=" tile",
disable=True if use_multiprocessing else False,
):
tile_origin = (
ft_pos[0] * self.reg_transform_seq.output_spacing[0]
)
tile_args = (
resample_zarray,
ch_idx,
ft_pos,
tuple(tile_origin.astype(float)),
mt_pos,
)
all_tile_args.append(tile_args)
if not use_multiprocessing:
self._transform_write_tile(tile_args)
if use_multiprocessing:
with ThreadPoolExecutor(max_workers) as executor:
_ = list(
tqdm(
executor.map(
self._transform_write_tile, all_tile_args
),
total=len(all_tile_args),
desc="Writing zarr tiles",
unit=" tile",
)
)
def _prepare_image_info(
self,
image_name,
write_pyramid=True,
):
"""Prepare info for pyramidalization and create OME-TIFF."""
x_size, y_size = self.reg_transform_seq.output_size
x_spacing, y_spacing = self.reg_transform_seq.output_spacing
out_tile_shape = self.tile_shape
# protect against too large tile size
while (
y_size / out_tile_shape[0] <= 1 or x_size / out_tile_shape[0] <= 1
):
out_tile_shape = (out_tile_shape[0] // 2, out_tile_shape[1] // 2)
pyr_levels, _ = get_pyramid_info(
y_size, x_size, self.reg_image.n_ch, self.tile_shape[0]
)
n_pyr_levels = len(pyr_levels)
PhysicalSizeY = y_spacing
PhysicalSizeX = x_spacing
channel_names = format_channel_names(
self.reg_image.channel_names, self.reg_image.n_ch
)
omexml = prepare_ome_xml_str(
y_size,
x_size,
self.reg_image.n_ch,
self.reg_image.im_dtype,
self.reg_image.is_rgb,
PhysicalSizeX=PhysicalSizeX,
PhysicalSizeY=PhysicalSizeY,
PhysicalSizeXUnit="µm",
PhysicalSizeYUnit="µm",
Name=image_name,
Channel=None if self.reg_image.is_rgb else {"Name": channel_names},
)
subifds = n_pyr_levels - 1 if write_pyramid is True else None
return n_pyr_levels, subifds, out_tile_shape, omexml
def _transformed_tile_generator(self, d_array: da.Array, ch_idx: int):
"""Create generator of tifffile tiles for OME-TIFF."""
out_shape = (
d_array.shape[:2] if self.reg_image.is_rgb else d_array.shape[1:]
)
for y in range(0, out_shape[0], self.tile_shape[0]):
for x in range(0, out_shape[1], self.tile_shape[1]):
if self.reg_image.is_rgb:
yield d_array[
y : y + self.tile_shape[0],
x : x + self.tile_shape[1],
:,
].compute()
else:
yield d_array[
ch_idx,
y : y + self.tile_shape[0],
x : x + self.tile_shape[1],
].compute()
def write_image_by_tile(
self,
image_name: str,
output_dir: Union[Path, str] = "",
write_pyramid: bool = True,
compression: Optional[str] = "default",
zarr_temp_dir: Optional[Union[str, Path]] = None,
) -> str:
"""
Write images to OME-TIFF from temp zarr store with data.
Parameters
----------
image_name: str
file path stem of the image to be written
output_dir: Union[str,Path]
directory where image is to be written
write_pyramid: bool
whether to write a pyramid or single layer
compression: str
Use compression. "default" will be lossless "deflate" for non-rgb images
and "jpeg" for RGB images
zarr_temp_dir: Path or str
Directory to store the temporary zarr data
(mostly used for debugging)
Returns
-------
output_file_name: Path
Path to written image file
"""
zstr = zarr.TempStore(dir=zarr_temp_dir)
try:
resample_zarray = self.write_tiles_to_zarr_store(zstr)
output_file_name = str(Path(output_dir) / f"{image_name}.ome.tiff")
if compression == "default":
print("using default compression")
compression = "jpeg" if self.reg_image.is_rgb else "deflate"
else:
compression = compression
(
n_pyr_levels,
subifds,
out_tile_shape,
omexml,
) = self._prepare_image_info(
image_name, write_pyramid=write_pyramid
)
print(f"saving to {output_file_name}")
dask_image = da.from_zarr(resample_zarray)
options = dict(
tile=self.tile_shape,
compression=compression,
photometric="rgb" if self.reg_image.is_rgb else "minisblack",
metadata=None,
)
with TiffWriter(output_file_name, bigtiff=True) as tif:
if self.reg_image.is_rgb:
print(
f"writing base layer RGB - shape: {dask_image.shape}"
)
# tile_iterator_strides = self._get_tile_iterator_strides(dask_image)
tile_iterator = self._transformed_tile_generator(
dask_image, 0
)
tif.write(
tile_iterator,
subifds=subifds,
description=omexml,
shape=dask_image.shape,
dtype=dask_image.dtype,
**options,
)
if write_pyramid:
for pyr_idx in range(1, n_pyr_levels):
sub_res = compute_sub_res(
dask_image,
pyr_idx,
self.tile_shape[0],
self.reg_image.is_rgb,
self.reg_image.im_dtype,
)
print(
f"pyr {pyr_idx} : RGB-shape: {sub_res.shape}"
)
# tile_strides = self._get_tile_iterator_strides(sub_res)
sub_res_tile_iterator = (
self._transformed_tile_generator(sub_res, 0)
)
tif.write(
sub_res_tile_iterator,
shape=sub_res.shape,
dtype=self.reg_image.im_dtype,
**options,
subfiletype=1,
)
else:
for channel_idx in range(self.reg_image.n_ch):
description = omexml if channel_idx == 0 else None
print(
f"writing channel {channel_idx} - shape: {dask_image.shape[1:]}"
)
tile_iterator = self._transformed_tile_generator(
dask_image, channel_idx
)
tif.write(
tile_iterator,
subifds=subifds,
description=description,
shape=dask_image.shape[1:],
dtype=dask_image.dtype,
**options,
)
if write_pyramid:
for pyr_idx in range(1, n_pyr_levels):
sub_res = compute_sub_res(
dask_image,
pyr_idx,
self.tile_shape[0],
self.reg_image.is_rgb,
self.reg_image.im_dtype,
)
sub_res_tile_iterator = (
self._transformed_tile_generator(
sub_res, channel_idx
)
)
tif.write(
sub_res_tile_iterator,
shape=sub_res.shape[1:],
dtype=dask_image.dtype,
**options,
subfiletype=1,
)
try:
resample_zarray.store.clear()
except FileNotFoundError:
pass
return output_file_name
# bare except to always clear temporary storage on failure
except Exception as e:
print(e)
try:
resample_zarray.store.clear()
except FileNotFoundError:
pass
def compute_sub_res(
zarray: da.Array,
pyr_level: int,
tile_size: int,
is_rgb: bool,
im_dtype: np.dtype,
) -> da.Array:
"""
Compute factor-of-2 sub-resolutions from dask array for pyramidalization using dask.
Parameters
----------
zarray: da.Array
Dask array to be downsampled
pyr_level: int
level of the pyramid. 0 = base, 1 = 2x downsampled, 2=4x downsampled...
tile_size: int
Size of tiles in dask array after downsampling
is_rgb: bool
whether dask array is RGB interleaved
im_dtype: np.dtype
dtype of the output da.Array
Returns
-------
resampled_zarray_subres: da.Array
Dask array (unprocessed) to be written
"""
if is_rgb:
resampling_axis = {0: 2**pyr_level, 1: 2**pyr_level, 2: 1}
tiling = (tile_size, tile_size, 3)
else:
resampling_axis = {0: 1, 1: 2**pyr_level, 2: 2**pyr_level}
tiling = (1, tile_size, tile_size)
resampled_zarray_subres = da.coarsen(
np.mean,
zarray,
resampling_axis,
trim_excess=True,
)
resampled_zarray_subres = resampled_zarray_subres.astype(im_dtype)
resampled_zarray_subres = resampled_zarray_subres.rechunk(tiling)
return resampled_zarray_subres
def random_str() -> str:
"""Get a random string to store the zarr array"""
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for _ in range(10))
| 29,657 | 34.433692 | 92 | py |
wsireg | wsireg-master/wsireg/reg_images/czi_reg_image.py | import warnings
from typing import Tuple
import dask.array as da
import numpy as np
import SimpleITK as sitk
from wsireg.reg_images.reg_image import RegImage
from wsireg.utils.im_utils import CziRegImageReader, guess_rgb
class CziRegImage(RegImage):
def __init__(
self,
image,
image_res,
mask=None,
pre_reg_transforms=None,
preprocessing=None,
channel_names=None,
channel_colors=None,
):
super(CziRegImage, self).__init__(preprocessing)
self._path = image
self._image_res = image_res
self.czi = CziRegImageReader(self._path)
self.reader = "czi"
scene_idx = self.czi.axes.index('S')
if self.czi.shape[scene_idx] > 1:
raise ValueError('multi scene czis not allowed at this time')
(
self._shape,
self._im_dtype,
) = self._get_image_info()
self._is_rgb = guess_rgb(self._shape)
self._n_ch = self._shape[2] if self.is_rgb else self._shape[0]
self._dask_image = self._prepare_dask_image()
if mask:
self._mask = self.read_mask(mask)
self.pre_reg_transforms = pre_reg_transforms
self._channel_names = channel_names
self._channel_colors = channel_colors
self.original_size_transform = None
def _get_image_info(self):
# if RGB need to get 0
if self.czi.shape[-1] > 1:
ch_dim_idx = self.czi.axes.index('0')
else:
ch_dim_idx = self.czi.axes.index('C')
y_dim_idx = self.czi.axes.index('Y')
x_dim_idx = self.czi.axes.index('X')
if self.czi.shape[-1] > 1:
im_dims = np.array(self.czi.shape)[
[y_dim_idx, x_dim_idx, ch_dim_idx]
]
else:
im_dims = np.array(self.czi.shape)[
[ch_dim_idx, y_dim_idx, x_dim_idx]
]
im_dtype = self.czi.dtype
im_dims = (int(im_dims[0]), int(im_dims[1]), int(im_dims[2]))
return im_dims, im_dtype
def _prepare_dask_image(self) -> da.Array:
ch_dim = self._shape[1:] if not self._is_rgb else self._shape[:2]
chunks = ((1,) * self._n_ch, (ch_dim[0],), (ch_dim[1],))
dask_image = da.map_blocks(
self._czi_read_single_channel,
chunks=chunks,
dtype=self.im_dtype,
meta=np.array((), dtype=self._im_dtype),
)
return dask_image
def _czi_read_single_channel(self, block_id: Tuple[int, ...]):
channel_idx = block_id[0]
if self.is_rgb is False:
image = self.czi.sub_asarray(
channel_idx=[channel_idx],
)
else:
image = self.czi.sub_asarray_rgb(
channel_idx=[channel_idx], greyscale=False
)
return np.expand_dims(np.squeeze(image), axis=0)
def read_reg_image(self):
"""
Read and preprocess the image for registration.
For the Zeiss CZI reader, this involves grayscaling RGB on read
or reading only a subset of the channel images.
"""
if self.is_rgb:
reg_image = self.czi.sub_asarray_rgb(greyscale=True)
else:
reg_image = self.czi.sub_asarray(
channel_idx=self.preprocessing.ch_indices,
as_uint8=self.preprocessing.as_uint8,
)
reg_image = np.squeeze(reg_image)
reg_image = sitk.GetImageFromArray(reg_image)
self.preprocess_image(reg_image)
def read_single_channel(self, channel_idx: int):
"""
Read in a single channel for transformation by plane.
Parameters
----------
channel_idx: int
Index of the channel to be read
Returns
-------
image: np.ndarray
Numpy array of the selected channel to be read
"""
if channel_idx > (self.n_ch - 1):
warnings.warn(
"channel_idx exceeds number of channels, reading channel at channel_idx == 0"
)
channel_idx = 0
image = self._dask_image[channel_idx, :, :].compute()
return image
| 4,216 | 28.697183 | 93 | py |
wsireg | wsireg-master/wsireg/reg_images/sitk_reg_image.py | import warnings
import SimpleITK as sitk
from wsireg.reg_images.reg_image import RegImage
from wsireg.utils.im_utils import (
ensure_dask_array,
get_sitk_image_info,
guess_rgb,
)
class SitkRegImage(RegImage):
def __init__(
self,
image,
image_res,
mask=None,
pre_reg_transforms=None,
preprocessing=None,
channel_names=None,
channel_colors=None,
):
super(SitkRegImage, self).__init__(preprocessing)
self._path = image
self._image_res = image_res
self.reader = "sitk"
(
self._shape,
self._im_dtype,
) = self._get_image_info()
self._is_rgb = guess_rgb(self._shape)
self._n_ch = self._shape[2] if self.is_rgb else self._shape[0]
if mask:
self._mask = self.read_mask(mask)
self.pre_reg_transforms = pre_reg_transforms
self._channel_names = channel_names
self._channel_colors = channel_colors
self.original_size_transform = None
def _get_image_info(self):
im_dims, im_dtype = get_sitk_image_info(self._path)
im_dims = (int(im_dims[0]), int(im_dims[1]), int(im_dims[2]))
return im_dims, im_dtype
def read_reg_image(self):
"""
Read and preprocess the image for registration.
"""
reg_image = sitk.ReadImage(self._path)
if self.preprocessing.as_uint8 is True and reg_image.GetPixelID() != 1:
reg_image = sitk.Cast(
sitk.RescaleIntensity(reg_image), sitk.sitkUInt8
)
self.preprocess_image(reg_image)
def _read_full_image(self):
self._dask_image = ensure_dask_array(
sitk.GetArrayFromImage(sitk.ReadImage(self._path))
)
rechunk_size = (
(2048, 2048, self.n_ch) if self.is_rgb else (self.n_ch, 2048, 2048)
)
self._dask_image = self._dask_image.rechunk(rechunk_size)
def read_single_channel(self, channel_idx: int):
"""
Read in a single channel for transformation by plane.
Parameters
----------
channel_idx: int
Index of the channel to be read
Returns
-------
image: np.ndarray
Numpy array of the selected channel to be read
"""
if channel_idx > (self.n_ch - 1):
warnings.warn(
"channel_idx exceeds number of channels, reading channel at channel_idx == 0"
)
channel_idx = 0
if self._is_rgb:
image = self._dask_image[:, :, channel_idx].compute()
else:
image = self._dask_image[channel_idx, :, :].compute()
return image
| 2,748 | 25.432692 | 93 | py |
wsireg | wsireg-master/wsireg/reg_images/np_reg_image.py | import warnings
import numpy as np
import SimpleITK as sitk
from wsireg.reg_images.reg_image import RegImage
from wsireg.utils.im_utils import (
ensure_dask_array,
guess_rgb,
preprocess_dask_array,
)
class NumpyRegImage(RegImage):
def __init__(
self,
image,
image_res,
mask=None,
pre_reg_transforms=None,
preprocessing=None,
channel_names=None,
channel_colors=None,
image_filepath=None,
):
super(NumpyRegImage, self).__init__(preprocessing)
self._path = image_filepath
self._image_res = image_res
self.reader = "numpy"
dask_image = ensure_dask_array(image)
self._dask_image = (
dask_image.reshape(1, *dask_image.shape)
if len(dask_image.shape) == 2
else dask_image
)
self._shape, self._im_dtype = self._get_image_info()
self._is_rgb = guess_rgb(self._shape)
self._n_ch = self._shape[2] if self._is_rgb else self._shape[0]
rechunk_size = (
(2048, 2048, self.n_ch) if self.is_rgb else (self.n_ch, 2048, 2048)
)
self._dask_image = self._dask_image.rechunk(rechunk_size)
if mask is not None:
self._mask = self.read_mask(mask)
self.pre_reg_transforms = pre_reg_transforms
self._channel_names = channel_names
self._channel_colors = channel_colors
self.original_size_transform = None
def _get_image_info(self):
im_dims = self._dask_image.shape
im_dtype = self._dask_image.dtype
return im_dims, im_dtype
def read_reg_image(self):
"""
Read and preprocess the image for registration.
"""
reg_image = self._dask_image
reg_image = preprocess_dask_array(reg_image, self.preprocessing)
if (
self.preprocessing is not None
and self.preprocessing.as_uint8 is True
and reg_image.GetPixelID() != sitk.sitkUInt8
):
reg_image = sitk.RescaleIntensity(reg_image)
reg_image = sitk.Cast(reg_image, sitk.sitkUInt8)
self.preprocess_image(reg_image)
def read_single_channel(self, channel_idx: int) -> np.ndarray:
"""
Read in a single channel for transformation by plane.
Parameters
----------
channel_idx: int
Index of the channel to be read
Returns
-------
image: np.ndarray
Numpy array of the selected channel to be read
"""
if channel_idx > (self.n_ch - 1):
warnings.warn(
"channel_idx exceeds number of channels, reading channel at channel_idx == 0"
)
channel_idx = 0
if self._is_rgb:
image = self._dask_image[:, :, channel_idx].compute()
else:
image = self._dask_image[channel_idx, :, :].compute()
return image
| 2,969 | 27.285714 | 93 | py |
wsireg | wsireg-master/wsireg/reg_images/reg_image.py | import json
from abc import ABC
from copy import deepcopy
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import dask.array as da
import itk
import numpy as np
import SimpleITK as sitk
from wsireg.parameter_maps.preprocessing import ImagePreproParams
from wsireg.reg_shapes import RegShapes
from wsireg.utils.im_utils import (
compute_mask_to_bbox,
contrast_enhance,
sitk_inv_int,
sitk_max_int_proj,
transform_plane,
)
from wsireg.utils.tform_utils import (
gen_aff_tform_flip,
gen_rig_to_original,
gen_rigid_tform_rot,
gen_rigid_translation,
prepare_wsireg_transform_data,
)
class RegImage(ABC):
"""Base class for registration images"""
_path: Union[str, Path]
# image data
_dask_image: da.Array
_reg_image: Union[sitk.Image, itk.Image]
_mask: Optional[Union[sitk.Image, itk.Image]] = None
# image dimension information
_shape: Tuple[int, int, int]
_n_ch: int
_im_dtype: np.dtype
# channel information
_channel_axis: int
_is_rgb: bool
_is_interleaved: bool
_channel_names: List[str]
_channel_colors: List[str]
# scaling information
_image_res: Union[Tuple[int, int], Tuple[float, float]]
# reg image preprocessing
_preprocessing: Optional[ImagePreproParams] = None
def __init__(
self, preprocessing: Optional[Union[ImagePreproParams, Dict]] = None
):
if preprocessing:
if isinstance(preprocessing, ImagePreproParams):
self._preprocessing = preprocessing
elif isinstance(preprocessing, dict):
self._preprocessing = ImagePreproParams(**preprocessing)
else:
self._preprocessing = ImagePreproParams()
@property
def path(self) -> Union[str, Path]:
"""Path to image file."""
return self._path
@property
def shape(self) -> Tuple[int, int, int]:
"""Shape of image file (C,Y,X) or (Y,X,C) if RGB"""
return self._shape
@property
def n_ch(self) -> int:
"""Number of channels in image."""
return self._n_ch
@property
def im_dtype(self) -> np.dtype:
"""Data type of image"""
return self._im_dtype
@property
def is_rgb(self) -> bool:
"""Whether image is RGB or not."""
return self._is_rgb
@property
def is_interleaved(self) -> bool:
"""Whether RGB image is interleaved or not."""
return self._is_interleaved
@property
def channel_axis(self) -> int:
"""Axis of the channel dimension."""
return self._channel_axis
@property
def image_res(self) -> Union[float, int]:
"""Spacing of image pixels (only isotropic right now)"""
return self._image_res
@property
def channel_names(self) -> List[str]:
"""Name of the channels of the image."""
return self._channel_names
@property
def channel_colors(self) -> List[str]:
"""Colors of the channels."""
return self._channel_colors
@property
def dask_image(self) -> da.Array:
"""Dask representation of the image."""
return self._dask_image
@property
def mask(self) -> Optional[Union[sitk.Image, itk.Image]]:
"""Mask of the image."""
return self._mask
@property
def reg_image(self) -> Union[sitk.Image, itk.Image]:
"""Preprocessed version of image for registration"""
return self._reg_image
@property
def preprocessing(self) -> Optional[ImagePreproParams]:
"""Preprocessing params to make `reg_image`"""
return self._preprocessing
def read_mask(
self, mask: Union[str, Path, sitk.Image, np.ndarray]
) -> sitk.Image:
"""
Read a mask from geoJSON or a binary image.
Parameters
----------
mask: path to image/geoJSON or image
Data to be used to make the mask, can be a path to a geoJSON
or an image file, or a if an np.ndarray, used directly.
Returns
-------
mask: sitk.Image
Mask image with spacing/size of `reg_image`
"""
if isinstance(mask, np.ndarray):
mask = sitk.GetImageFromArray(mask)
elif isinstance(mask, (str, Path)):
if Path(mask).suffix.lower() == ".geojson":
out_shape = self.shape[:2] if self.is_rgb else self.shape[1:]
mask_shapes = RegShapes(mask)
mask = mask_shapes.draw_mask(out_shape[::-1], labels=False)
mask = sitk.GetImageFromArray(mask)
else:
mask = sitk.ReadImage(mask)
elif isinstance(mask, sitk.Image):
mask = mask
mask.SetSpacing((self.image_res, self.image_res))
return mask
def preprocess_reg_image_intensity(
self, image: sitk.Image, preprocessing: ImagePreproParams
) -> sitk.Image:
"""
Preprocess image intensity data to single channel image.
Parameters
----------
image: sitk.Image
reg_image to be preprocessed
preprocessing: ImagePreproParams
Parameters of the preprocessing
Returns
-------
image: sitk.Image
Preprocessed single-channel image
"""
if preprocessing.image_type.value == "FL":
preprocessing.invert_intensity = False
elif preprocessing.image_type.value == "BF":
preprocessing.max_int_proj = False
preprocessing.contrast_enhance = False
if self.is_rgb:
preprocessing.invert_intensity = True
if preprocessing.max_int_proj:
image = sitk_max_int_proj(image)
if preprocessing.contrast_enhance:
image = contrast_enhance(image)
if preprocessing.invert_intensity:
image = sitk_inv_int(image)
if preprocessing.custom_processing:
for k, v in preprocessing.custom_processing.items():
print(f"performing preprocessing: {k}")
image = v(image)
image.SetSpacing((self.image_res, self.image_res))
return image
def preprocess_reg_image_spatial(
self,
image: sitk.Image,
preprocessing: ImagePreproParams,
imported_transforms=None,
) -> Tuple[sitk.Image, List[Dict]]:
"""
Spatial preprocessing of the reg_image.
Parameters
----------
image: sitk.Image
reg_image to be preprocessed
preprocessing: ImagePreproParams
Spatial preprocessing parameters
imported_transforms:
Not implemented yet..
Returns
-------
image: sitk.Image
Spatially preprcessed image ready for registration
transforms: list of transforms
List of pre-initial transformations
"""
transforms = []
original_size = image.GetSize()
if preprocessing.downsampling > 1:
print(
"performing downsampling by factor: {}".format(
preprocessing.downsampling
)
)
image.SetSpacing((self.image_res, self.image_res))
image = sitk.Shrink(
image,
(preprocessing.downsampling, preprocessing.downsampling),
)
if self._mask is not None:
self._mask.SetSpacing((self.image_res, self.image_res))
self._mask = sitk.Shrink(
self._mask,
(
preprocessing.downsampling,
preprocessing.downsampling,
),
)
image_res = image.GetSpacing()[0]
else:
image_res = self.image_res
if float(preprocessing.rot_cc) != 0.0:
print(f"rotating counter-clockwise {preprocessing.rot_cc}")
rot_tform = gen_rigid_tform_rot(
image, image_res, preprocessing.rot_cc
)
(
composite_transform,
_,
final_tform,
) = prepare_wsireg_transform_data({"initial": [rot_tform]})
image = transform_plane(image, final_tform, composite_transform)
if self._mask is not None:
self._mask.SetSpacing((image_res, image_res))
self._mask = transform_plane(
self._mask, final_tform, composite_transform
)
transforms.append(rot_tform)
if preprocessing.flip:
print(f"flipping image {preprocessing.flip.value}")
flip_tform = gen_aff_tform_flip(
image, image_res, preprocessing.flip.value
)
(
composite_transform,
_,
final_tform,
) = prepare_wsireg_transform_data({"initial": [flip_tform]})
image = transform_plane(image, final_tform, composite_transform)
if self._mask is not None:
self._mask.SetSpacing((image_res, image_res))
self._mask = transform_plane(
self._mask, final_tform, composite_transform
)
transforms.append(flip_tform)
if self._mask and preprocessing.crop_to_mask_bbox:
print("computing mask bounding box")
if preprocessing.mask_bbox is None:
mask_bbox = compute_mask_to_bbox(self._mask)
preprocessing.mask_bbox = mask_bbox
if preprocessing.mask_bbox:
print("cropping to mask")
translation_transform = gen_rigid_translation(
image,
image_res,
preprocessing.mask_bbox.X,
preprocessing.mask_bbox.Y,
preprocessing.mask_bbox.WIDTH,
preprocessing.mask_bbox.HEIGHT,
)
(
composite_transform,
_,
final_tform,
) = prepare_wsireg_transform_data(
{"initial": [translation_transform]}
)
image = transform_plane(image, final_tform, composite_transform)
self.original_size_transform = gen_rig_to_original(
original_size, deepcopy(translation_transform)
)
if self._mask is not None:
self._mask.SetSpacing((image_res, image_res))
self._mask = transform_plane(
self._mask, final_tform, composite_transform
)
transforms.append(translation_transform)
return image, transforms
def preprocess_image(self, reg_image: sitk.Image) -> None:
"""
Run full intensity and spatial preprocessing. Creates the `reg_image` attribute
Parameters
----------
reg_image: sitk.Image
Raw form of image to be preprocessed
"""
reg_image = self.preprocess_reg_image_intensity(
reg_image, self.preprocessing
)
if reg_image.GetDepth() >= 1:
raise ValueError(
"preprocessing did not result in a single image plane\n"
"multi-channel or 3D image return"
)
if reg_image.GetNumberOfComponentsPerPixel() > 1:
raise ValueError(
"preprocessing did not result in a single image plane\n"
"multi-component / RGB(A) image returned"
)
reg_image, pre_reg_transforms = self.preprocess_reg_image_spatial(
reg_image, self.preprocessing, self.pre_reg_transforms
)
if len(pre_reg_transforms) > 0:
self.pre_reg_transforms = pre_reg_transforms
self._reg_image = reg_image
def reg_image_sitk_to_itk(self, cast_to_float32: bool = True) -> None:
"""
Convert SimpleITK to ITK for use in ITKElastix.
Parameters
----------
cast_to_float32: bool
Whether to make image float32 for ITK, needs to be true for registration.
"""
origin = self._reg_image.GetOrigin()
spacing = self._reg_image.GetSpacing()
# direction = image.GetDirection()
is_vector = self._reg_image.GetNumberOfComponentsPerPixel() > 1
if cast_to_float32 is True:
self._reg_image = sitk.Cast(self._reg_image, sitk.sitkFloat32)
self._reg_image = sitk.GetArrayFromImage(self._reg_image)
else:
self._reg_image = sitk.GetArrayFromImage(self._reg_image)
self._reg_image = itk.GetImageFromArray(
self._reg_image, is_vector=is_vector
)
self._reg_image.SetOrigin(origin)
self._reg_image.SetSpacing(spacing)
if self._mask is not None:
origin = self._mask.GetOrigin()
spacing = self._mask.GetSpacing()
# direction = image.GetDirection()
is_vector = self._mask.GetNumberOfComponentsPerPixel() > 1
if cast_to_float32 is True:
self._mask = sitk.Cast(self._mask, sitk.sitkFloat32)
self._mask = sitk.GetArrayFromImage(self._mask)
else:
self._mask = sitk.GetArrayFromImage(self._mask)
self._mask = itk.GetImageFromArray(self._mask, is_vector=is_vector)
self._mask.SetOrigin(origin)
self._mask.SetSpacing(spacing)
mask_im_type = itk.Image[itk.UC, 2]
self._mask = itk.binary_threshold_image_filter(
self._mask,
lower_threshold=1,
inside_value=1,
ttype=(type(self._mask), mask_im_type),
)
@staticmethod
def _get_all_cache_data_fps(output_dir: Union[str, Path], image_tag: str):
"""Get cached directories"""
output_dir = Path(output_dir)
out_image_fp = output_dir / f"{image_tag}_prepro.tiff"
out_params_fp = output_dir / f"{image_tag}_preprocessing_params.json"
out_mask_fp = output_dir / f"{image_tag}_prepro_mask.tiff"
out_init_tform_fp = output_dir / f"{image_tag}_init_tforms.json"
out_osize_tform_fp = output_dir / f"{image_tag}_orig_size_tform.json"
return (
out_image_fp,
out_params_fp,
out_mask_fp,
out_init_tform_fp,
out_osize_tform_fp,
)
def check_cache_preprocessing(
self, output_dir: Union[str, Path], image_tag: str
):
"""
Parameters
----------
output_dir: path
Where cached data is on disk
image_tag:
Tag of the image modality
Returns
-------
prepro_flag: bool
Whether a preprocessed version of the image exists in the cache.
"""
(
out_image_fp,
out_params_fp,
out_mask_fp,
_,
_,
) = self._get_all_cache_data_fps(output_dir, image_tag)
if out_image_fp.exists() and out_params_fp.exists():
cached_preprocessing = ImagePreproParams.parse_file(out_params_fp)
return self.preprocessing == cached_preprocessing
else:
return False
def cache_image_data(
self, output_dir: Union[str, Path], image_tag: str, check: bool = True
) -> None:
"""
Save preprocessed image data to a cache in WsiReg2D.
Parameters
----------
output_dir: path
Where cached data is on disk
image_tag:
Tag of the image modality
check: bool
Whether to check for existence of data
"""
(
out_image_fp,
out_params_fp,
out_mask_fp,
out_init_tform_fp,
out_osize_tform_fp,
) = self._get_all_cache_data_fps(output_dir, image_tag)
if check:
read_from_cache = self.check_cache_preprocessing(
output_dir, image_tag
)
else:
read_from_cache = False
if not read_from_cache:
print(f"Writing preprocessed image for {image_tag}")
sitk.WriteImage(
self.reg_image, str(out_image_fp), useCompression=True
)
print(f"Finished writing preprocessed image for {image_tag}")
json.dump(
deepcopy(
self.preprocessing.dict(
exclude_none=True, exclude_defaults=True
)
),
open(out_params_fp, "w"),
cls=NpEncoder,
)
json.dump(self.pre_reg_transforms, open(out_init_tform_fp, "w"))
if self._mask is not None:
print(f"Writing preprocessed mask for {image_tag}")
sitk.WriteImage(
self.mask, str(out_mask_fp), useCompression=True
)
print(f"Finished writing preprocessed mask for {image_tag}")
if self.original_size_transform:
json.dump(
self.original_size_transform, open(out_osize_tform_fp, "w")
)
def load_from_cache(self, output_dir: Union[str, Path], image_tag: str):
"""
Read in preprocessed data from the cache folder.
Parameters
----------
output_dir: path
Where cached data is on disk
image_tag:
Tag of the image modality
Returns
-------
from_cache_flag: bool
Whether data was read from cache
"""
(
image_fp,
params_fp,
mask_fp,
init_tform_fp,
osize_tform_fp,
) = self._get_all_cache_data_fps(output_dir, image_tag)
read_from_cache = self.check_cache_preprocessing(output_dir, image_tag)
if read_from_cache:
self._reg_image = sitk.ReadImage(str(image_fp))
self._preprocessing = ImagePreproParams(
**json.load(open(params_fp, "r"))
)
self.pre_reg_transforms = json.load(open(init_tform_fp, "r"))
if osize_tform_fp.exists():
self.original_size_transform = json.load(
open(osize_tform_fp, "r")
)
if mask_fp.exists():
self._mask = sitk.ReadImage(str(mask_fp))
return True
else:
return False
@staticmethod
def load_orignal_size_transform(
output_dir: Union[str, Path], image_tag: str
):
"""
Read original size transform from cache.
Parameters
----------
output_dir: path
Where cached data is on disk
image_tag:
Tag of the image modality
Returns
-------
osize_tform: list
Original size transform or empty
"""
(
_,
_,
_,
init_tform_fp,
_,
) = RegImage._get_all_cache_data_fps(output_dir, image_tag)
if init_tform_fp.exists():
return [json.load(open(init_tform_fp, "r"))]
else:
return []
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return super(NpEncoder, self).default(obj)
| 19,721 | 29.960754 | 87 | py |
wsireg | wsireg-master/wsireg/reg_images/merge_reg_image.py | from pathlib import Path
from typing import List, Optional, Union
from warnings import warn
import numpy as np
from wsireg.reg_images.loader import reg_image_loader
class MergeRegImage:
def __init__(
self,
image_fp: List[Union[Path, str]],
image_res: List[Union[int, float]],
channel_names: Optional[List[List[str]]] = None,
channel_colors: Optional[List[List[str]]] = None,
):
if isinstance(image_fp, list) is False:
raise ValueError(
"MergeRegImage requires a list of images to merge"
)
if isinstance(image_res, list) is False:
raise ValueError(
"MergeRegImage requires a list of image resolutions for each image to merge"
)
if channel_names is None:
channel_names = [None for _ in range(0, len(image_fp))]
if channel_colors is None:
channel_colors = [None for _ in range(0, len(image_fp))]
images = []
for im_idx, image_data in enumerate(
zip(image_fp, image_res, channel_names, channel_colors)
):
image, image_res, channel_names, channel_colors = image_data
imdata = reg_image_loader(
image,
image_res,
channel_names=channel_names,
channel_colors=channel_colors,
)
if (
imdata.channel_names is None
or len(imdata.channel_names) != imdata.n_ch
):
imdata._channel_names = [
f"C{idx}" for idx in range(0, imdata.n_ch)
]
images.append(imdata)
if all([im.im_dtype == images[0].im_dtype for im in images]) is False:
warn(
"MergeRegImage created with mixed data types, writing will cast "
"to the largest data type"
)
if any([im.is_rgb for im in images]) is True:
warn(
"MergeRegImage does not support writing merged interleaved RGB "
"Data will be written as multi-channel"
)
self.images = images
self.image_fps = image_fp
self.im_dtype = self.images[0].im_dtype
self.is_rgb = False
self.n_ch = np.sum([i.n_ch for i in self.images])
self.channel_names = [i.channel_names for i in self.images]
self.original_size_transform = None
| 2,466 | 31.038961 | 92 | py |
wsireg | wsireg-master/wsireg/reg_images/aics_reg_image.py | import warnings
import dask.array as da
import numpy as np
import SimpleITK as sitk
from aicsimageio import AICSImage
from wsireg.reg_images.reg_image import RegImage
from wsireg.utils.im_utils import (
ensure_dask_array,
guess_rgb,
preprocess_dask_array,
)
class AICSRegImage(RegImage):
def __init__(
self,
image,
image_res,
mask=None,
pre_reg_transforms=None,
preprocessing=None,
channel_names=None,
channel_colors=None,
):
super(AICSRegImage, self).__init__(preprocessing)
self._path = image
self._image_res = image_res
self.reader = "aics"
self.aics_image = AICSImage(self._path)
dask_image = da.queeze(ensure_dask_array(self.aics_image.dask_data))
self._dask_image = (
dask_image.reshape(1, *dask_image.shape)
if len(dask_image.shape) == 2
else dask_image
)
(
self._shape,
self.im_dtype,
) = self._get_image_info()
self._is_rgb = guess_rgb(self._shape)
self._n_ch = self._shape[2] if self._is_rgb else self._shape[0]
if mask:
self._mask = self.read_mask(mask)
self.pre_reg_transforms = pre_reg_transforms
self.channel_names = channel_names
self.channel_colors = channel_colors
self.original_size_transform = None
def _get_image_info(self):
im_dims = self._dask_image.shape
im_dtype = self._dask_image.dtype
return im_dims, im_dtype
def read_reg_image(self):
"""
Read and preprocess the image for registration.
"""
reg_image = self._dask_image
reg_image = preprocess_dask_array(reg_image, self.preprocessing)
if (
self.preprocessing is not None
and self.preprocessing.as_uint8 is True
and reg_image.GetPixelID() != sitk.sitkUInt8
):
reg_image = sitk.RescaleIntensity(reg_image)
reg_image = sitk.Cast(reg_image, sitk.sitkUInt8)
self.preprocess_image(reg_image)
def read_single_channel(self, channel_idx: int) -> np.ndarray:
"""
Read in a single channel for transformation by plane.
Parameters
----------
channel_idx: int
Index of the channel to be read
Returns
-------
image: np.ndarray
Numpy array of the selected channel to be read
"""
if channel_idx > (self.n_ch - 1):
warnings.warn(
"channel_idx exceeds number of channels, reading channel at channel_idx == 0"
)
channel_idx = 0
if self._is_rgb:
image = self._dask_image[:, :, channel_idx].compute()
else:
image = self._dask_image[channel_idx, :, :].compute()
return image
| 2,908 | 25.935185 | 93 | py |
wsireg | wsireg-master/wsireg/reg_images/tifffile_reg_image.py | import warnings
from typing import List, Tuple
import dask.array as da
import numpy as np
import SimpleITK as sitk
from ome_types import from_xml
from tifffile import TiffFile
from wsireg.reg_images.reg_image import RegImage
from wsireg.utils.im_utils import (
get_tifffile_info,
guess_rgb,
preprocess_dask_array,
tifffile_to_dask,
)
class TiffFileRegImage(RegImage):
def __init__(
self,
image_fp,
image_res,
mask=None,
pre_reg_transforms=None,
preprocessing=None,
channel_names=None,
channel_colors=None,
):
super(TiffFileRegImage, self).__init__(preprocessing)
self._path = image_fp
self._image_res = image_res
self.tf = TiffFile(self._path)
self.reader = "tifffile"
(
self._shape,
self._im_dtype,
self.largest_series,
) = self._get_image_info()
self._get_dim_info()
self._dask_image = self._get_dask_image()
if mask:
self._mask = self.read_mask(mask)
self.pre_reg_transforms = pre_reg_transforms
self._channel_names = channel_names
self._channel_colors = channel_colors
self.original_size_transform = None
def _get_image_info(self) -> Tuple[Tuple[int, int, int], np.dtype, int]:
if len(self.tf.series) > 1:
warnings.warn(
"The tiff contains multiple series, "
"the largest series will be read by default"
)
im_dims, im_dtype, largest_series = get_tifffile_info(self._path)
im_dims = (int(im_dims[0]), int(im_dims[1]), int(im_dims[2]))
return im_dims, im_dtype, largest_series
def _get_dim_info(self) -> None:
if self._shape:
if self.tf.ome_metadata:
self.ome_metadata = from_xml(self.tf.ome_metadata)
spp = (
self.ome_metadata.images[self.largest_series]
.pixels.channels[0]
.samples_per_pixel
)
interleaved = self.ome_metadata.images[
self.largest_series
].pixels.interleaved
if spp and spp > 1:
self._is_rgb = True
else:
self._is_rgb = False
if guess_rgb(self._shape) is False:
self._channel_axis = 0
self._is_interleaved = False
elif interleaved and guess_rgb(self._shape):
self._is_interleaved = True
self._channel_axis = len(self._shape) - 1
else:
self._is_rgb = guess_rgb(self._shape)
self._is_interleaved = self._is_rgb
if self._is_rgb:
self._channel_axis = len(self._shape) - 1
else:
self._channel_axis = 0
self._n_ch = self._shape[self._channel_axis]
def _get_dask_image(self) -> List[da.Array]:
dask_image = tifffile_to_dask(self._path, self.largest_series, level=0)
dask_image = (
dask_image.reshape(1, *dask_image.shape)
if len(dask_image.shape) == 2
else dask_image
)
if self._is_rgb and not self._is_interleaved:
dask_image = da.rollaxis(dask_image, 0, 3)
return dask_image
def read_reg_image(self):
"""
Read and preprocess the image for registration.
"""
reg_image = self._dask_image
reg_image = preprocess_dask_array(reg_image, self.preprocessing)
if (
self.preprocessing is not None
and self.preprocessing.as_uint8 is True
and reg_image.GetPixelID() != sitk.sitkUInt8
):
reg_image = sitk.RescaleIntensity(reg_image)
reg_image = sitk.Cast(reg_image, sitk.sitkUInt8)
self.preprocess_image(reg_image)
def read_single_channel(self, channel_idx: int):
"""
Read in a single channel for transformation by plane.
Parameters
----------
channel_idx: int
Index of the channel to be read
Returns
-------
image: np.ndarray
Numpy array of the selected channel to be read
"""
if channel_idx > (self.n_ch - 1):
warnings.warn(
"channel_idx exceeds number of channels, reading channel at channel_idx == 0"
)
channel_idx = 0
if self._is_rgb:
image = self._dask_image[:, :, channel_idx].compute()
else:
image = self._dask_image[channel_idx, :, :].compute()
return image
| 4,762 | 29.33758 | 93 | py |
wsireg | wsireg-master/wsireg/reg_images/__init__.py | from .np_reg_image import NumpyRegImage # noqa: F401
from .sitk_reg_image import SitkRegImage # noqa: F401
from .tifffile_reg_image import TiffFileRegImage # noqa: F401
from .aics_reg_image import AICSRegImage # noqa: F401
from .czi_reg_image import CziRegImage # noqa: F401
from .merge_reg_image import MergeRegImage # noqa: F401
| 337 | 47.285714 | 62 | py |
wsireg | wsireg-master/wsireg/reg_images/loader.py | from pathlib import Path
from typing import Union, Optional, List
import numpy as np
import dask.array as da
import zarr
from wsireg.utils.im_utils import ARRAYLIKE_CLASSES, TIFFFILE_EXTS
from wsireg.parameter_maps.preprocessing import ImagePreproParams
from . import CziRegImage # AICSRegImage,
from . import NumpyRegImage, SitkRegImage, TiffFileRegImage
def reg_image_loader(
image: Union[np.ndarray, da.Array, zarr.Array, str, Path],
image_res: Union[int, float],
mask: Optional[Union[np.ndarray, str, Path]] = None,
pre_reg_transforms: Optional[dict] = None,
preprocessing: Optional[ImagePreproParams] = None,
channel_names: Optional[List[str]] = None,
channel_colors: Optional[List[str]] = None,
) -> Union[TiffFileRegImage, SitkRegImage, NumpyRegImage, CziRegImage]:
"""
Convenience function to read in images. Determines the correct reader.
Parameters
----------
image : str, array-like
file path to the image to be read or an array like image such as
a numpy, dask or zarr array
image_res : float
spatial resolution of image in units per px (i.e. 0.9 um / px)
mask: Union[str, Path, np.ndarray]
path to binary mask (>0 is in) image for registration and/or cropping or a geoJSON with shapes
that will be processed to a binary mask
pre_reg_transforms: dict
Pre-computed transforms to be applied to the image prior to registration
preprocessing: ImagePreproParams
preprocessing parameters for the modality for registration. Registration images should be a xy single plane
so many modalities (multi-channel, RGB) must "create" a single channel.
Defaults: multi-channel images -> max intensity project image
RGB -> greyscale then intensity inversion (black background, white foreground)
channel_names: List[str]
names for the channels to go into the OME-TIFF
channel_colors: List[str]
channels colors for OME-TIFF (not implemented)
Returns
-------
reg_image: RegImage
A RegImage subclass for the particular image loaded
"""
if isinstance(image, ARRAYLIKE_CLASSES):
return NumpyRegImage(
image,
image_res,
mask=mask,
pre_reg_transforms=pre_reg_transforms,
preprocessing=preprocessing,
channel_names=channel_names,
channel_colors=channel_colors,
image_filepath=None,
)
image_ext = Path(image).suffix.lower()
if image_ext in TIFFFILE_EXTS:
reg_image = TiffFileRegImage(
image,
image_res,
mask=mask,
pre_reg_transforms=pre_reg_transforms,
preprocessing=preprocessing,
channel_names=channel_names,
channel_colors=channel_colors,
)
elif image_ext == ".czi":
reg_image = CziRegImage(
image,
image_res,
mask=mask,
pre_reg_transforms=pre_reg_transforms,
preprocessing=preprocessing,
channel_names=channel_names,
channel_colors=channel_colors,
)
else:
reg_image = SitkRegImage(
image,
image_res,
mask=mask,
pre_reg_transforms=pre_reg_transforms,
preprocessing=preprocessing,
channel_names=channel_names,
channel_colors=channel_colors,
)
return reg_image
| 3,482 | 35.28125 | 115 | py |
wsireg | wsireg-master/wsireg/utils/output_utils.py | from typing import Dict, Union
from pathlib import Path
import re
import numpy as np
import matplotlib.pyplot as plt
def _natural_sort(list_to_sort: list) -> list:
"""
Sort list account for lack of leading zeroes.
"""
convert = (
lambda text: int(text) if text.isdigit() else text.lower()
) # noqa: E731
alphanum_key = lambda key: [ # noqa: E731
convert(c) for c in re.split('([0-9]+)', key)
]
return sorted(list_to_sort, key=alphanum_key)
def read_elastix_iteration_data(
iteration_txt: Union[Path, str]
) -> Dict[str, np.ndarray]:
"""
Read and parse elastix iteration info.
Parameters
----------
iteration_txt: str or Path
File path to an elastix iteration info file
Returns
-------
iteration_dict: Dict[str, np.ndarray]
dict containing keys of the data in the information: iteration, metric, time, etc.
"""
with open(iteration_txt, "r") as f:
iteration_data = f.readlines()
iteration_data = [it.split("\t") for it in iteration_data[1:]]
iteration_dict = {
"iteration": np.array([int(it[0]) for it in iteration_data]),
"metric": np.array([float(it[1]) for it in iteration_data]),
"time[a]": np.array([float(it[2]) for it in iteration_data]),
"step_size": np.array([float(it[3]) for it in iteration_data]),
"gradient": np.array([float(it[4]) for it in iteration_data]),
"iter_time": np.array([float(it[5]) for it in iteration_data]),
}
return iteration_dict
def read_elastix_iteration_dir(
registration_dir: Union[Path, str]
) -> Dict[int, Dict[int, Dict[str, np.ndarray]]]:
"""
Read a directory of iteration info and save organize it in a dict for access and plotting.
Parameters
----------
registration_dir: str or Path
output directory of the elastix registration data
Returns
-------
all_iteration_data: Dict[int, Dict[int, Dict[str, np.ndarray]]]
Data for each registration. Keys are 0, 1, 2 for first transform, second transform, etc.
sub-keys of each top level key are resolution 0, 1, 2, etc.
"""
iter_info_fps = sorted(Path(registration_dir).glob("IterationInfo*"))
iter_info_fps = [
Path(fp) for fp in _natural_sort([str(fp) for fp in iter_info_fps])
]
all_iteration_data = dict()
for iter_fp in iter_info_fps:
model_idx = int(iter_fp.name.split(".")[1])
if model_idx not in all_iteration_data.keys():
all_iteration_data.update({model_idx: dict()})
res_idx = int(iter_fp.name.split(".")[2].strip("R"))
model_res_data = read_elastix_iteration_data(iter_fp)
all_iteration_data[model_idx].update({res_idx: model_res_data})
return all_iteration_data
def read_elastix_intermediate_transform_data(
transform_txt: Union[Path, str]
) -> Dict[str, str]:
"""
Read transformation data into a dict for each intermediate transform.
Parameters
----------
transform_txt: str or Path
file path to the transform parameters file
Returns
-------
elastix_transform_data: Dict[str,str]
Transform parameters for each transform in the sequence
"""
with open(transform_txt, "r") as f:
transform_parameters = f.readlines()
elastix_transform_data = dict()
for param in transform_parameters:
if param[0] == "/":
continue
if param[0] == "\n":
continue
param = (
param.replace("(", "")
.replace(")", "")
.replace("\n", "")
.replace('"', "")
)
param_name, param_vals = param.split(" ", 1)
elastix_transform_data.update({param_name: param_vals})
return elastix_transform_data
def read_elastix_transform_dir(
registration_dir: Union[Path, str]
) -> Dict[int, Dict[int, Dict[str, str]]]:
"""
Read an elastix output directory's transformation data.
Parameters
----------
registration_dir: str or Path
file path to the elastix output directory
Returns
-------
all_tform_data: Dict[int, Dict[int, Dict[str, str]]]
Transform paramteter data for each registration. Keys are 0, 1, 2 for first transform, second transform, etc.
sub-keys of each top level key are resolution 0, 1, 2, etc.
"""
tform_info_fps = sorted(
Path(registration_dir).glob("TransformParameters*")
)
tform_info_fps = [
Path(fp) for fp in _natural_sort([str(fp) for fp in tform_info_fps])
]
all_tform_data = dict()
for tform_fp in tform_info_fps:
if len(tform_fp.name.split(".")) > 3:
model_idx = int(tform_fp.name.split(".")[1])
if model_idx not in all_tform_data.keys():
all_tform_data.update({model_idx: dict()})
res_idx = int(tform_fp.name.split(".")[2].strip("R"))
model_res_data = read_elastix_intermediate_transform_data(tform_fp)
all_tform_data[model_idx].update({res_idx: model_res_data})
return all_tform_data
def create_iteration_plot(
iteration_dict: Dict[str, np.ndarray], plot_title: str
) -> plt.Figure:
"""
Generate a multi-panel plot of the elastix iteration info.
Parameters
----------
iteration_dict: Dict[str, np.ndarray]
dict containing keys of the data in the information: iteration, metric, time, etc.
plot_title: str
Main title of the plot
Returns
-------
fig: plt.Figure
Matplotlib figure object on the multi-panel plot
"""
fig, ((plt1, plt2), (plt3, plt4), (plt5, plt6)) = plt.subplots(
3, 2, sharex=True, figsize=(8, 6)
)
fig.suptitle(plot_title)
fig.subplots_adjust(hspace=0.3, wspace=0.3)
plt_pos = {
"metric": plt1,
"time[a]": plt2,
"step_size": plt3,
"gradient": plt4,
"iter_time": plt5,
}
for k, v in iteration_dict.items():
if k == "iteration":
continue
x_data = iteration_dict["iteration"]
y_data = v
x_name = "iteration"
y_name = k
plt_pos[y_name].plot(x_data, y_data)
plt_pos[y_name].set(xlabel=x_name, ylabel=y_name)
if k in ["step_size", "gradient"]:
plt_pos[y_name].set_yscale('log')
plt6.axis('off')
return fig
def write_iteration_plots(
all_iteration_data: Dict[int, Dict[int, Dict[str, np.ndarray]]],
data_key: str,
output_dir: Union[str, Path],
) -> None:
"""
Write all plots for an elastix output folder to a folder.
Parameters
----------
all_iteration_data: Dict[int, Dict[int, Dict[str, np.ndarray]]]
Data for each registration. Keys are 0, 1, 2 for first transform, second transform, etc.
sub-keys of each top level key are resolution 0, 1, 2, etc.
data_key: str
bit of text indicating which images are registered
output_dir: str or Path
Output path of the registration
Returns
-------
None
"""
for model_idx, model_data in all_iteration_data.items():
for res_idx, res_data in model_data.items():
plot_title = f"Registration info for {data_key}- transform idx {model_idx} - resolution {res_idx}"
output_filepath = (
Path(output_dir) / f"IterationPlot.{model_idx}.R{res_idx}.png"
)
out_fig = create_iteration_plot(res_data, plot_title)
out_fig.savefig(str(output_filepath))
plt.close(out_fig)
| 7,574 | 29.668016 | 117 | py |
wsireg | wsireg-master/wsireg/utils/tform_utils.py | import json
from pathlib import Path
from typing import Tuple, Union
import itk
import numpy as np
import SimpleITK as sitk
from wsireg.parameter_maps.transformations import (
BASE_AFF_TFORM,
BASE_RIG_TFORM,
)
from wsireg.reg_transforms.reg_transform import RegTransform
from wsireg.utils.itk_im_conversions import (
itk_image_to_sitk_image,
sitk_image_to_itk_image,
)
from wsireg.utils.reg_utils import json_to_pmap_dict
NUMERIC_ELX_PARAMETERS = {
"CenterOfRotationPoint": np.float64,
"DefaultPixelValue": np.float64,
"Direction": np.float64,
"FixedImageDimension": np.int64,
"Index": np.int64,
"MovingImageDimension": np.int64,
"NumberOfParameters": np.int64,
"Origin": np.float64,
"Size": np.int64,
"Spacing": np.float64,
"TransformParameters": np.float64,
}
ELX_LINEAR_TRANSFORMS = [
"AffineTransform",
"EulerTransform",
"SimilarityTransform",
]
ELX_TO_ITK_INTERPOLATORS = {
"FinalNearestNeighborInterpolator": sitk.sitkNearestNeighbor,
"FinalLinearInterpolator": sitk.sitkLinear,
"FinalBSplineInterpolator": sitk.sitkBSpline,
}
def prepare_tform_dict(tform_dict, shape_tform=False):
tform_dict_out = {}
for k, v in tform_dict.items():
if k == "initial":
tform_dict_out["initial"] = v
else:
tforms = []
for tform in v:
if "invert" in list(tform.keys()):
if shape_tform is False:
tforms.append(tform["image"])
else:
tforms.append(tform["invert"])
else:
tforms.append(tform)
tform_dict_out[k] = tforms
return tform_dict_out
def transform_2d_image_itkelx(
image, transformation_maps, writer="sitk", **zarr_kwargs
):
"""
Transform 2D images with multiple models and return the transformed image
or write the transformed image to disk as a .tif file.
Multichannel or multicomponent images (RGB) have to be transformed a single channel at a time
This function takes care of performing those transformations and reconstructing the image in the same
data type as the input
Parameters
----------
image : SimpleITK.Image
Image to be transformed
transformation_maps : list
list of SimpleElastix ParameterMaps to used for transformation
Returns
-------
Transformed SimpleITK.Image
"""
if transformation_maps is not None:
tfx = itk.TransformixFilter.New()
# TODO: add mask cropping here later
# print("mask cropping")
# tmap = sitk.ReadParameterFile(transformation_maps[0])
# x_min = int(float(tmap["MinimumX"][0]))
# x_max = int(float(tmap["MaximumX"][0]))
# y_min = int(float(tmap["MinimumY"][0]))
# y_max = int(float(tmap["MaximumY"][0]))
# image = image[x_min:x_max, y_min:y_max]
# origin = np.repeat(0, len(image.GetSize()))
# image.SetOrigin(tuple([int(i) for i in origin]))
# else:
transform_pobj = itk.ParameterObject.New()
for idx, tmap in enumerate(transformation_maps):
if isinstance(tmap, str):
tmap = sitk.ReadParameterFile(tmap)
if idx == 0:
tmap["InitialTransformParametersFileName"] = (
"NoInitialTransform",
)
transform_pobj.AddParameterMap(tmap)
else:
tmap["InitialTransformParametersFileName"] = (
"NoInitialTransform",
)
transform_pobj.AddParameterMap(tmap)
tfx.SetTransformParameterObject(transform_pobj)
tfx.LogToConsoleOn()
tfx.LogToFileOff()
else:
tfx = None
# if tfx is None:
# xy_final_size = np.array(image.GetSize(), dtype=np.uint32)
# else:
# xy_final_size = np.array(
# transformation_maps[-1]["Size"], dtype=np.uint32
# )
if writer == "sitk" or writer is None:
return transform_image_itkelx_to_sitk(image, tfx)
elif writer == "zarr":
return
else:
raise ValueError("writer type {} not recognized".format(writer))
def transform_image_to_sitk(image, tfx):
# manage transformation/casting if data is multichannel or RGB
# data is always returned in the same PixelIDType as it is entered
pixel_id = image.GetPixelID()
if tfx is not None:
if pixel_id in list(range(1, 13)) and image.GetDepth() == 0:
tfx.SetMovingImage(image)
image = tfx.Execute()
image = sitk.Cast(image, pixel_id)
elif pixel_id in list(range(1, 13)) and image.GetDepth() > 0:
images = []
for chan in range(image.GetDepth()):
tfx.SetMovingImage(image[:, :, chan])
images.append(sitk.Cast(tfx.Execute(), pixel_id))
image = sitk.JoinSeries(images)
image = sitk.Cast(image, pixel_id)
elif pixel_id > 12:
images = []
for idx in range(image.GetNumberOfComponentsPerPixel()):
im = sitk.VectorIndexSelectionCast(image, idx)
pixel_id_nonvec = im.GetPixelID()
tfx.SetMovingImage(im)
images.append(sitk.Cast(tfx.Execute(), pixel_id_nonvec))
del im
image = sitk.Compose(images)
image = sitk.Cast(image, pixel_id)
return image
def transform_image_itkelx_to_sitk(image, tfx):
# manage transformation/casting if data is multichannel or RGB
# data is always returned in the same PixelIDType as it is entered
pixel_id = image.GetPixelID()
if tfx is not None:
if pixel_id in list(range(1, 13)) and image.GetDepth() == 0:
image = sitk_image_to_itk_image(image, cast_to_float32=True)
tfx.SetMovingImage(image)
tfx.UpdateLargestPossibleRegion()
image = tfx.GetOutput()
image = itk_image_to_sitk_image(image)
image = sitk.Cast(image, pixel_id)
elif pixel_id in list(range(1, 13)) and image.GetDepth() > 0:
images = []
for chan in range(image.GetDepth()):
image = sitk_image_to_itk_image(
image[:, :, chan], cast_to_float32=True
)
tfx.SetMovingImage(image)
tfx.UpdateLargestPossibleRegion()
image = tfx.GetOutput()
image = itk_image_to_sitk_image(image)
image = sitk.Cast(image, pixel_id)
images.append(image)
image = sitk.JoinSeries(images)
image = sitk.Cast(image, pixel_id)
elif pixel_id > 12:
images = []
for idx in range(image.GetNumberOfComponentsPerPixel()):
im = sitk.VectorIndexSelectionCast(image, idx)
pixel_id_nonvec = im.GetPixelID()
im = sitk_image_to_itk_image(im, cast_to_float32=True)
tfx.SetMovingImage(im)
tfx.UpdateLargestPossibleRegion()
im = tfx.GetOutput()
im = itk_image_to_sitk_image(im)
im = sitk.Cast(im, pixel_id_nonvec)
images.append(im)
del im
image = sitk.Compose(images)
image = sitk.Cast(image, pixel_id)
return image
def apply_transform_dict_itkelx(
image_fp,
image_res,
tform_dict_in,
prepro_dict=None,
is_shape_mask=False,
writer="sitk",
**im_tform_kwargs,
):
"""
Apply a complex series of transformations in a python dictionary to an image
Parameters
----------
image_fp : str
file path to the image to be transformed, it will be read in it's entirety
image_res : float
pixel resolution of image to be transformed
tform_dict : dict of lists
dict of SimpleElastix transformations stored in lists, may contain an "initial" transforms (preprocessing transforms)
these will be applied first, then the key order of the dict will determine the rest of the transformations
is_shape_mask : bool
whether the image being transformed is a shape mask (determines import)
Returns
-------
image: itk.Image
image that has been transformed
"""
if is_shape_mask is False:
if isinstance(image_fp, sitk.Image):
image = image_fp
# else:
# image = RegImage(
# image_fp, image_res, prepro_dict=prepro_dict
# ).image
else:
image = sitk.GetImageFromArray(image_fp)
del image_fp
image.SetSpacing((image_res, image_res))
if tform_dict_in is None:
if writer == "zarr":
image = transform_2d_image_itkelx(
image,
None,
writer="zarr",
zarr_store_dir=im_tform_kwargs["zarr_store_dir"],
channel_names=im_tform_kwargs["channel_names"],
channel_colors=im_tform_kwargs["channel_colors"],
)
else:
image = transform_2d_image_itkelx(image, None)
else:
tform_dict = tform_dict_in.copy()
if tform_dict.get("registered") is None and tform_dict.get(0) is None:
tform_dict["registered"] = tform_dict["initial"]
tform_dict.pop("initial", None)
if isinstance(tform_dict.get("registered"), list) is False:
tform_dict["registered"] = [tform_dict["registered"]]
for idx in range(len(tform_dict["registered"])):
tform_dict[idx] = [tform_dict["registered"][idx]]
tform_dict.pop("registered", None)
else:
tform_dict = prepare_tform_dict(tform_dict, shape_tform=False)
if "initial" in tform_dict:
for initial_tform in tform_dict["initial"]:
if isinstance(initial_tform, list) is False:
initial_tform = [initial_tform]
for tform in initial_tform:
image = transform_2d_image_itkelx(image, [tform])
tform_dict.pop("initial", None)
for k, v in tform_dict.items():
if writer == "zarr" and k == list(tform_dict.keys())[-1]:
image = transform_2d_image_itkelx(
image,
v,
writer="zarr",
zarr_store_dir=im_tform_kwargs["zarr_store_dir"],
channel_names=im_tform_kwargs["channel_names"],
channel_colors=im_tform_kwargs["channel_colors"],
)
else:
image = transform_2d_image_itkelx(image, v)
return image
def compute_rot_bound(image, angle=30):
"""
compute the bounds of an image after by an angle
Parameters
----------
image : sitk.Image
SimpleITK image that will be rotated angle
angle : float
angle of rotation in degrees, rotates counter-clockwise if positive
Returns
-------
tuple of the rotated image's size in x and y
"""
w, h = image.GetSize()[0], image.GetSize()[1]
theta = np.radians(angle)
c, s = np.abs(np.cos(theta)), np.abs(np.sin(theta))
bound_w = (h * s) + (w * c)
bound_h = (h * c) + (w * s)
return bound_w, bound_h
def gen_rigid_tform_rot(image, spacing, angle):
"""
generate a SimpleElastix transformation parameter Map to rotate image by angle
Parameters
----------
image : sitk.Image
SimpleITK image that will be rotated
spacing : float
Physical spacing of the SimpleITK image
angle : float
angle of rotation in degrees, rotates counter-clockwise if positive
Returns
-------
SimpleITK.ParameterMap of rotation transformation (EulerTransform)
"""
tform = BASE_RIG_TFORM.copy()
image.SetSpacing((spacing, spacing))
bound_w, bound_h = compute_rot_bound(image, angle=angle)
rot_cent_pt = image.TransformContinuousIndexToPhysicalPoint(
((bound_w - 1) / 2, (bound_h - 1) / 2)
)
c_x, c_y = (image.GetSize()[0] - 1) / 2, (image.GetSize()[1] - 1) / 2
c_x_phy, c_y_phy = image.TransformContinuousIndexToPhysicalPoint(
(c_x, c_y)
)
t_x = rot_cent_pt[0] - c_x_phy
t_y = rot_cent_pt[1] - c_y_phy
tform["Spacing"] = [str(spacing), str(spacing)]
tform["Size"] = [str(int(np.ceil(bound_w))), str(int(np.ceil(bound_h)))]
tform["CenterOfRotationPoint"] = [str(rot_cent_pt[0]), str(rot_cent_pt[1])]
tform["TransformParameters"] = [
str(np.radians(angle)),
str(-1 * t_x),
str(-1 * t_y),
]
return tform
def gen_rigid_translation(
image, spacing, translation_x, translation_y, size_x, size_y
):
"""
generate a SimpleElastix transformation parameter Map to rotate image by angle
Parameters
----------
image : sitk.Image
SimpleITK image that will be rotated
spacing : float
Physical spacing of the SimpleITK image
Returns
-------
SimpleITK.ParameterMap of rotation transformation (EulerTransform)
"""
tform = BASE_RIG_TFORM.copy()
image.SetSpacing((spacing, spacing))
bound_w, bound_h = compute_rot_bound(image, angle=0)
rot_cent_pt = image.TransformContinuousIndexToPhysicalPoint(
((bound_w - 1) / 2, (bound_h - 1) / 2)
)
(
translation_x,
translation_y,
) = image.TransformContinuousIndexToPhysicalPoint(
(float(translation_x), float(translation_y))
)
# c_x, c_y = (image.GetSize()[0] - 1) / 2, (image.GetSize()[1] - 1) / 2
tform["Spacing"] = [str(spacing), str(spacing)]
tform["Size"] = [str(size_x), str(size_y)]
tform["CenterOfRotationPoint"] = [str(rot_cent_pt[0]), str(rot_cent_pt[1])]
tform["TransformParameters"] = [
str(0),
str(translation_x),
str(translation_y),
]
return tform
def gen_rig_to_original(original_size, crop_transform):
crop_transform["Size"] = [str(original_size[0]), str(original_size[1])]
tform_params = [float(t) for t in crop_transform["TransformParameters"]]
crop_transform["TransformParameters"] = [
str(0),
str(tform_params[1] * -1),
str(tform_params[2] * -1),
]
return crop_transform
def gen_aff_tform_flip(image, spacing, flip="h"):
"""
generate a SimpleElastix transformation parameter Map to horizontally or vertically flip image
Parameters
----------
image : sitk.Image
SimpleITK image that will be rotated
spacing : float
Physical spacing of the SimpleITK image
flip : str
"h" or "v" for horizontal or vertical flipping, respectively
Returns
-------
SimpleITK.ParameterMap of flipping transformation (AffineTransform)
"""
tform = BASE_AFF_TFORM.copy()
image.SetSpacing((spacing, spacing))
bound_w, bound_h = compute_rot_bound(image, angle=0)
rot_cent_pt = image.TransformContinuousIndexToPhysicalPoint(
((bound_w - 1) / 2, (bound_h - 1) / 2)
)
tform["Spacing"] = [str(spacing), str(spacing)]
tform["Size"] = [str(int(bound_w)), str(int(bound_h))]
tform["CenterOfRotationPoint"] = [str(rot_cent_pt[0]), str(rot_cent_pt[1])]
if flip == "h":
tform_params = ["-1", "0", "0", "1", "0", "0"]
elif flip == "v":
tform_params = ["1", "0", "0", "-1", "0", "0"]
tform["TransformParameters"] = tform_params
return tform
def make_composite_itk(itk_tforms):
itk_composite = sitk.CompositeTransform(2)
for t in itk_tforms:
itk_composite.AddTransform(t.itk_transform)
return itk_composite
def get_final_tform(parameter_data):
if (
isinstance(parameter_data, str)
and Path(parameter_data).suffix == ".json"
):
parameter_data = json.load(open(parameter_data, "r"))
final_key = list(parameter_data.keys())[-1]
final_tform = parameter_data[final_key][-1]
return final_tform
def collate_wsireg_transforms(parameter_data):
if (
isinstance(parameter_data, str)
and Path(parameter_data).suffix == ".json"
):
parameter_data = json.load(open(parameter_data, "r"))
parameter_data_list = []
for k, v in parameter_data.items():
if k == "initial":
if isinstance(v, dict):
parameter_data_list.append([v])
elif isinstance(v, list):
for init_tform in v:
parameter_data_list.append([init_tform])
else:
sub_tform = []
if isinstance(v, dict):
sub_tform.append(v)
elif isinstance(v, list):
sub_tform += v
sub_tform = sub_tform[::-1]
parameter_data_list.append(sub_tform)
flat_pmap_list = [
item for sublist in parameter_data_list for item in sublist
]
if all([isinstance(t, dict) for t in flat_pmap_list]):
flat_pmap_list = [RegTransform(t) for t in flat_pmap_list]
return flat_pmap_list
def wsireg_transforms_to_itk_composite(parameter_data):
reg_transforms = collate_wsireg_transforms(parameter_data)
composite_tform = make_composite_itk(reg_transforms)
return composite_tform, reg_transforms
def prepare_wsireg_transform_data(transform_data):
if isinstance(transform_data, str) is True:
transform_data = json_to_pmap_dict(transform_data)
if transform_data is not None:
(
composite_transform,
itk_transforms,
) = wsireg_transforms_to_itk_composite(transform_data)
final_tform = itk_transforms[-1]
return composite_transform, itk_transforms, final_tform
def wsireg_transforms_to_resampler(final_tform):
resampler = sitk.ResampleImageFilter()
resampler.SetOutputOrigin(final_tform.output_origin)
resampler.SetSize(final_tform.output_size)
resampler.SetOutputDirection(final_tform.output_direction)
resampler.SetOutputSpacing(final_tform.output_spacing)
interpolator = ELX_TO_ITK_INTERPOLATORS.get(
final_tform.resample_interpolator
)
resampler.SetInterpolator(interpolator)
return resampler
def sitk_transform_image(image, final_tform, composite_transform):
resampler = wsireg_transforms_to_resampler(final_tform)
resampler.SetTransform(composite_transform)
image = resampler.Execute(image)
return image
def identity_elx_transform(
image_size: Tuple[int, int],
image_spacing: Union[Tuple[int, int], Tuple[float, float]],
):
identity = BASE_RIG_TFORM
identity.update({"Size": [str(i) for i in image_size]})
identity.update({"Spacing": [str(i) for i in image_spacing]})
return identity
| 18,878 | 30.998305 | 125 | py |
wsireg | wsireg-master/wsireg/utils/itk_im_conversions.py | import itk
import SimpleITK as sitk
def itk_image_to_sitk_image(image):
origin = tuple(image.GetOrigin())
spacing = tuple(image.GetSpacing())
direction = itk.GetArrayFromMatrix(image.GetDirection()).flatten()
image = sitk.GetImageFromArray(
itk.GetArrayFromImage(image),
isVector=image.GetNumberOfComponentsPerPixel() > 1,
)
image.SetOrigin(origin)
image.SetSpacing(spacing)
image.SetDirection(direction)
return image
def sitk_image_to_itk_image(image, cast_to_float32=False):
origin = image.GetOrigin()
spacing = image.GetSpacing()
# direction = image.GetDirection()
is_vector = image.GetNumberOfComponentsPerPixel() > 1
if cast_to_float32 is True:
image = sitk.Cast(image, sitk.sitkFloat32)
image = sitk.GetArrayFromImage(image)
else:
image = sitk.GetArrayFromImage(image)
image = itk.GetImageFromArray(image, is_vector=is_vector)
image.SetOrigin(origin)
image.SetSpacing(spacing)
# image.SetDirection(direction)
return image
| 1,053 | 29.114286 | 70 | py |
wsireg | wsireg-master/wsireg/utils/shape_utils.py | import json
import zipfile
from copy import deepcopy
from pathlib import Path
import cv2
import geojson
import numpy as np
import SimpleITK as sitk
from wsireg.reg_transforms.reg_transform import RegTransform
from wsireg.utils.tform_utils import wsireg_transforms_to_itk_composite
GJ_SHAPE_TYPE = {
"polygon": geojson.Polygon,
"multipolygon": geojson.MultiPolygon,
"point": geojson.Point,
"multipoint": geojson.MultiPoint,
"multilinestring": geojson.MultiLineString,
"linestring": geojson.LineString,
}
GJ_SHAPE_TYPE_NAME = {
"polygon": "Polygon",
"multipolygon": "MultiPolygon",
"point": "Point",
"multipoint": "MultiPoint",
"multilinestring": "MultiLineString",
"linestring": "LineString",
}
def gj_to_np(gj: dict):
"""
Convert geojson representation to np.ndarray representation of shape
Parameters
----------
gj : dict
GeoJSON data stored as python dict
Returns
-------
dict
containing keys
"array": np.ndarray - x,y point data in array
"shape_type": str - indicates GeoJSON shape_type (Polygon, MultiPolygon, etc.)
"shape_name": str - name inherited from QuPath GeoJSON
"""
if gj.get("geometry").get("type") == "MultiPolygon":
pts = []
for geo in gj.get("geometry").get("coordinates"):
pts.append(np.squeeze(np.array(geo)))
pts = np.vstack(pts)
elif gj.get("geometry").get("type") == "Polygon":
pts = np.squeeze(np.asarray(gj.get("geometry").get("coordinates")))
elif gj.get("geometry").get("type") == "Point":
pts = np.expand_dims(
np.asarray(gj.get("geometry").get("coordinates")), 0
)
elif gj.get("geometry").get("type") == "MultiPoint":
pts = np.asarray(gj.get("geometry").get("coordinates"))
elif gj.get("geometry").get("type") == "LineString":
pts = np.asarray(gj.get("geometry").get("coordinates"))
if gj.get("properties").get("classification") is None:
shape_name = "unnamed"
else:
shape_name = gj.get("properties").get("classification").get("name")
if len(pts.shape) == 1:
return {
"array": np.asarray(pts[0]).astype(np.double),
"shape_type": gj.get("geometry").get("type"),
"shape_name": shape_name,
}
else:
return {
"array": pts.astype(np.double),
"shape_type": gj.get("geometry").get("type"),
"shape_name": shape_name,
}
def add_unamed(gj):
if gj.get("properties").get("classification") is None:
gj.get("properties").update({"classification": {"name": "unnamed"}})
return gj
def read_geojson(json_file: str):
"""Read GeoJSON files (and some QuPath metadata).
Parameters
----------
json_file : str
file path of QuPath exported GeoJSON
Returns
-------
gj_data : dict
dict of GeoJSON information
shapes_np : dict
dict of GeoJSON information stored in np.ndarray
"array": np.ndarray - x,y point data in array
"shape_type": str - indicates GeoJSON shape_type (Polygon, MultiPolygon, etc.)
"shape_name": str - name inherited from QuPath GeoJSON
"""
if Path(json_file).suffix != ".zip":
gj_data = json.load(open(json_file, "r"))
else:
with zipfile.ZipFile(json_file, "r") as z:
for filename in z.namelist():
with z.open(filename) as f:
data = f.read()
gj_data = json.loads(data.decode("utf-8"))
if isinstance(gj_data, dict):
gj_data = [gj_data]
shapes_np = [gj_to_np(s) for s in gj_data]
gj_data = [add_unamed(gj) for gj in gj_data]
return gj_data, shapes_np
def np_to_geojson(
np_array: np.ndarray, shape_type="polygon", shape_name="unnamed"
):
"""convert np.ndarray to GeoJSON dict
Parameters
----------
np_array: np.ndarray
coordinates of data
shape_type:str
GeoJSON shape type
shape_name:str
Name of the shape
Returns
-------
shape_gj : dict
dict of GeoJSON information
shape_np : dict
dict of GeoJSON information stored in np.ndarray
"array": np.ndarray - x,y point data in array
"shape_type": str - indicates GeoJSON shape_type (Polygon, MultiPolygon, etc.)
"shape_name": str - name inherited from QuPath GeoJSON
"""
sh_type = shape_type.lower()
gj_func = GJ_SHAPE_TYPE[sh_type]
if sh_type == "polygon":
np_array = np.vstack([np_array, np_array[0, :]])
geometry = gj_func([np_array.tolist()])
elif sh_type in ["multipoint", "linestring"]:
geometry = gj_func(np_array.transpose().tolist())
else:
geometry = gj_func(np_array.tolist())
shape_gj = {
"type": "Feature",
"id": "annotation",
"geometry": geometry,
"properties": {
"classification": {"name": shape_name, "colorRGB": -1},
"isLocked": False,
},
}
shape_np = {
"array": np_array,
"shape_type": shape_type.lower(),
"shape_name": shape_name,
}
return shape_gj, shape_np
def shape_reader(shape_data, **kwargs):
"""
Read shape data for transformation
Shape data is stored as numpy arrays for operations but also as GeoJSON
to contain metadata and interface with QuPath
Parameters
----------
shape_data: list of np.ndarray or str
if str, will read data as GeoJSON file, if np.ndarray with assume
it is coordinates
kwargs
keyword args passed to np_to_geojson convert
Returns
-------
shapes_gj: list of dicts
list of dicts of GeoJSON information
shapes_np: list of dicts
list of dicts of GeoJSON information stored in np.ndarray
"array": np.ndarray - x,y point data in array
"shape_type": str - indicates GeoJSON shape_type (Polygon, MultiPolygon, etc.)
"shape_name": str - name inherited from QuPath GeoJSON
"""
if isinstance(shape_data, list) is False:
shape_data = [shape_data]
shapes_gj = []
shapes_np = []
for sh in shape_data:
if isinstance(sh, dict):
out_shape_gj, out_shape_np = np_to_geojson(
sh["array"], sh["shape_type"], sh["shape_name"]
)
elif isinstance(sh, np.ndarray):
out_shape_gj, out_shape_np = np_to_geojson(sh, **kwargs)
else:
if Path(sh).is_file():
sh_fp = Path(sh)
if sh_fp.suffix in [".json", ".geojson", ".zip"]:
out_shape_gj, out_shape_np = read_geojson(str(sh_fp))
# elif sh_fp.suffix == ".cz":
# out_shape_gj = read_zen_shapes(str(sh_fp))
# out_shape_np = [gj_to_np(s) for s in out_shape_gj]
else:
raise ValueError(
"{} is not a geojson or numpy array".format(str(sh_fp))
)
else:
raise FileNotFoundError(
"{} file not found".format(str(Path(sh).as_posix()))
)
if isinstance(out_shape_gj, list):
shapes_gj.extend(out_shape_gj)
else:
shapes_gj.append(out_shape_gj)
if isinstance(out_shape_np, list):
shapes_np.extend(out_shape_np)
else:
shapes_np.append(out_shape_np)
return shapes_gj, shapes_np
def scale_shape_coordinates(poly: dict, scale_factor: float):
"""
Scale coordinates by a factor
Parameters
----------
poly: dict
dict of coordinate data contain np.ndarray in "array" key
scale_factor: float
isotropic scaling factor for the coordinates
Returns
-------
poly: dict
dict containing coordinates scaled by scale_factor
"""
poly_coords = poly["array"]
poly_coords = poly_coords * scale_factor
poly["array"] = poly_coords
return poly
def invert_nonrigid_transforms(itk_transforms: list):
"""
Check list of sequential ITK transforms for non-linear (i.e., bspline) transforms
Transformations need to be inverted to transform from moving to fixed space as transformations
are mapped from fixed space to moving.
This will first convert any non-linear transforms to a displacement field then invert the displacement field
using ITK methods. It usually works quite well but is not an exact solution.
Linear transforms can be inverted on the fly when transforming points
Parameters
----------
itk_transforms:list
list of itk.Transform
Returns
-------
itk_transforms:list
list of itk.Transform where any non-linear transforms are replaced with an inverted displacement field
"""
tform_linear = [t.is_linear for t in itk_transforms]
if all(tform_linear):
return itk_transforms
else:
nl_idxs = np.where(np.array(tform_linear) == 0)[0]
for nl_idx in nl_idxs:
if not itk_transforms[nl_idx].inverse_transform:
print(
f"transform at index {nl_idx} is non-linear and the inverse has not been computed\n"
"inverting displacement field(s)...\n"
"this can take some time"
)
itk_transforms[nl_idx].compute_inverse_nonlinear()
return itk_transforms
def prepare_pt_transformation_data(transformations, compute_inverse=True):
"""
Read and prepare wsireg transformation data for point set transformation
Parameters
----------
transformations
list of dict containing elastix transformation data or str to wsireg .json file containing
elastix transformation data
compute_inverse : bool
whether or not to compute the inverse transformation for moving to fixed point transformations
Returns
-------
itk_pt_transforms:list
list of transformation data ready to operate on points
target_res:
physical spacing of the final transformation in the transform sequence
This is needed to map coordinates defined as pixel indices to physical coordinates and then back
"""
if all([isinstance(t, RegTransform) for t in transformations]) is False:
_, transformations = wsireg_transforms_to_itk_composite(
transformations
)
if compute_inverse:
transformations = invert_nonrigid_transforms(transformations)
target_res = float(transformations[-1].output_spacing[0])
return transformations, target_res
def itk_transform_pts(
pt_data: np.ndarray,
itk_transforms: list,
px_idx=True,
source_res=1,
output_idx=True,
target_res=2,
):
"""
Transforms x,y points stored in np.ndarray using list of ITK transforms
All transforms are in physical coordinates, so all points must be converted to physical coordinates
before transformation, but this function allows converting back to pixel indices after transformation
Can intake points in physical coordinates is px_idx == False
Can output points in physical coordinates if output_idx == False
Parameters
----------
pt_data : np.ndarray
array where rows are points and columns are x,y
itk_transforms: list
list of ITK transforms, non-linear transforms should be inverted
px_idx: bool
whether points are specified in physical coordinates (i.e., microns) or
in pixel indices
source_res: float
resolution of the image on which annotations were made
output_idx: bool
whether transformed points should be output in physical coordinates (i.e., microns) or
in pixel indices
target_res: float
resolution of the final target image for conversion back to pixel indices
Returns
-------
tformed_pts:np.ndarray
transformed points array where rows are points and columns are x,y
"""
tformed_pts = []
for pt in pt_data:
if px_idx is True:
pt = pt * source_res
for idx, t in enumerate(itk_transforms):
if idx == 0:
t_pt = t.inverse_transform.TransformPoint(pt)
else:
t_pt = t.inverse_transform.TransformPoint(t_pt)
t_pt = np.array(t_pt)
if output_idx is True:
t_pt *= 1 / target_res
tformed_pts.append(t_pt)
return np.stack(tformed_pts)
def transform_shapes(
shape_data: list,
itk_transforms: list,
px_idx=True,
source_res=1,
output_idx=True,
target_res=2,
):
"""
Convenience function to apply itk_transform_pts to a list of shape data
Parameters
----------
shape_data:
list of arrays where rows are points and columns are x,y
itk_transforms: list
list of ITK transforms, non-linear transforms should be inverted
px_idx: bool
whether points are specified in physical coordinates (i.e., microns) or
in pixel indices
source_res: float
resolution of the image on which annotations were made
output_idx: bool
whether transformed points should be output in physical coordinates (i.e., microns) or
in pixel indices
target_res: float
resolution of the final target image for conversion back to pixel indices
Returns
-------
transformed_shape_data:list
list of transformed np.ndarray data where rows are points and columns are x,y
"""
transformed_shape_data = []
for sh in shape_data:
t_ptset = deepcopy(sh)
ptset = sh.get("array")
t_pts = itk_transform_pts(
ptset,
itk_transforms,
px_idx=px_idx,
source_res=source_res,
output_idx=output_idx,
target_res=target_res,
)
t_ptset["array"] = t_pts
transformed_shape_data.append(t_ptset)
return transformed_shape_data
def insert_transformed_pts_gj(gj_data: list, np_data: list):
"""
insert point data into a list of geojson data
Parameters
----------
shape_gj : dict
dict of GeoJSON information
shape_np : dict
transformed point data in wsireg shape dict
Returns
-------
shape_gj : dict
dict of GeoJSON information with updated coordinate information
"""
gj_data_t = deepcopy(gj_data)
for sh, gj in zip(np_data, gj_data_t):
shape_type = gj.get("geometry").get("type")
if shape_type == "Polygon":
gj.get("geometry").update({"coordinates": [sh["array"].tolist()]})
elif shape_type == "Point":
gj.get("geometry").update(
{"coordinates": np.squeeze(sh["array"]).tolist()}
)
elif shape_type == "MultiPoint":
gj.get("geometry").update({"coordinates": sh["array"].tolist()})
elif shape_type == "LineString":
gj.get("geometry").update({"coordinates": sh["array"].tolist()})
return gj_data_t
def get_int_dtype(value: int):
"""
Determine appropriate bit precision for indexed image
Parameters
----------
value:int
number of shapes
Returns
-------
dtype:np.dtype
apppropriate data type for index mask
"""
if value <= np.iinfo(np.uint8).max:
return np.uint8
if value <= np.iinfo(np.uint16).max:
return np.uint16
if value <= np.iinfo(np.uint32).max:
return np.int32
else:
raise ValueError("Too many shapes")
def get_all_shape_coords(shapes: list):
return np.vstack(
[np.squeeze(sh["geometry"]["coordinates"][0]) for sh in shapes]
)
# code below is for managing transforms as masks rather than point sets
# will probably not reimplement, if segmentation data can is expressed
# as a mask, it can be transformed as an image (using attachment_modality)
def approx_polygon_contour(mask: np.ndarray, percent_arc_length=0.01):
"""
Approximate binary mask contours to polygon vertices using cv2.
Parameters
----------
mask : numpy.ndarray
2-d numpy array of datatype np.uint8.
percent_arc_length : float
scaling of epsilon for polygon approximate vertices accuracy.
maximum distance of new vertices from original.
Returns
-------
numpy.ndarray
returns an 2d array of vertices, rows: points, columns: y,x
"""
contours, hierarchy = cv2.findContours(
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
)
if len(contours) > 1:
contours = [contours[np.argmax([cnt.shape[0] for cnt in contours])]]
epsilon = percent_arc_length * cv2.arcLength(contours[0], True)
approx = cv2.approxPolyDP(contours[0], epsilon, True)
return np.squeeze(approx).astype(np.uint32)
def index_mask_to_shapes(index_mask, shape_name, tf_shapes):
"""
Find the polygons of a transformed shape mask, conveting binary mask
to list of polygon verteces and sorting by numerical index
Parameters
----------
index_mask:np.ndarray
mask where each shape is defined by it's index
shape_name:str
name of the shape
tf_shapes:list
original list of shape GeoJSON data to be updated
Returns
-------
updated_shapes:list
dict of GeoJSON information with updated coordinate information
"""
labstats = sitk.LabelShapeStatisticsImageFilter()
labstats.SetBackgroundValue(0)
labstats.Execute(index_mask)
index_mask = sitk.GetArrayFromImage(index_mask)
updated_shapes = deepcopy(tf_shapes)
for idx, shape in enumerate(tf_shapes):
if shape["properties"]["classification"]["name"] == shape_name:
label_bb = labstats.GetBoundingBox(idx + 1)
x_min = label_bb[0]
x_len = label_bb[2]
y_min = label_bb[1]
y_len = label_bb[3]
sub_mask = index_mask[y_min : y_min + y_len, x_min : x_min + x_len]
sub_mask[sub_mask == idx + 1] = 255
yx_coords = approx_polygon_contour(sub_mask, 0.00001)
xy_coords = yx_coords
xy_coords = np.append(xy_coords, xy_coords[:1, :], axis=0)
xy_coords = xy_coords + [x_min, y_min]
updated_shapes[idx]["geometry"]["coordinates"] = [
xy_coords.tolist()
]
return updated_shapes
# don't intend to maintain
# def read_zen_shapes(zen_fp):
# """Read Zeiss Zen Blue .cz ROIs files to wsimap shapely format.
#
# Parameters
# ----------
# zen_fp : str
# file path of Zen .cz.
#
# Returns
# -------
# list
# list of wsimap shapely rois
#
# """
#
# root = etree.parse(zen_fp)
#
# rois = root.xpath("//Elements")[0]
# shapes_out = []
# for shape in rois:
# try:
# ptset_name = shape.find("Attributes/Name").text
# except AttributeError:
# ptset_name = "unnamed"
#
# if shape.tag == "Polygon":
# ptset_cz = shape.find("Geometry/Points")
# # ptset_type = "Polygon"
#
# poly_str = ptset_cz.text
# poly_str = poly_str.split(" ")
# poly_str = [poly.split(",") for poly in poly_str]
# poly_str = [[float(pt[0]), float(pt[1])] for pt in poly_str]
#
# poly = {
# "geometry": geojson.Polygon(poly_str),
# "properties": {"classification": {"name": ptset_name}},
# }
#
# shapes_out.append(poly)
#
# if shape.tag == "Rectangle":
# rect_pts = shape.find("Geometry")
#
# x = float(rect_pts.findtext("Left"))
# y = float(rect_pts.findtext("Top"))
# width = float(rect_pts.findtext("Width"))
# height = float(rect_pts.findtext("Height"))
#
# rect = geojson.Polygon(
# [
# [x, y],
# [x + width, y],
# [x + width, y + height],
# [x, y + height],
# [x, y],
# ]
# )
#
# rect = {
# "geometry": rect,
# "properties": {"classification": {"name": ptset_name}},
# }
#
# shapes_out.append(rect)
#
# return shapes_out
| 20,475 | 30.213415 | 112 | py |
wsireg | wsireg-master/wsireg/utils/config_utils.py | import yaml
def parse_check_reg_config(yaml_filepath):
with open(yaml_filepath, "r") as file:
reg_config = yaml.full_load(file)
def check_for_key(top_key, check_dict, check_key):
if check_dict.get(check_key) is None:
raise ValueError(f"{top_key} does not contain an {check_key}")
if reg_config.get("project_name") is None:
raise ValueError(
f"{yaml_filepath} does not contain a project_name key"
)
if reg_config.get("output_dir") is None:
raise ValueError(f"{yaml_filepath} does not contain a output_dir key")
if reg_config.get("cache_images") is None:
reg_config.update({"cache_images": True})
if reg_config.get("modalities"):
for key, val in reg_config["modalities"].items():
[
check_for_key(key, val, ck)
for ck in ["image_filepath", "image_res"]
]
if reg_config.get("reg_paths"):
for key, val in reg_config["reg_paths"].items():
[
check_for_key(key, val, ck)
for ck in [
"src_modality_name",
"tgt_modality_name",
"reg_params",
]
]
if isinstance(val.get("reg_params"), str):
val.update({"reg_params": [val.get("reg_params")]})
if reg_config.get("attachment_images"):
for key, val in reg_config["attachment_images"].items():
[
check_for_key(key, val, ck)
for ck in [
"attachment_modality",
"image_filepath",
"image_res",
]
]
if reg_config.get("attachment_shapes"):
for key, val in reg_config["attachment_shapes"].items():
[
check_for_key(key, val, ck)
for ck in ["attachment_modality", "shape_files"]
]
return reg_config
| 1,985 | 29.553846 | 78 | py |
wsireg | wsireg-master/wsireg/utils/tform_conversion.py | from copy import deepcopy
import SimpleITK as sitk
def euler_elx_to_itk2d(tform, is_translation=False):
euler2d = sitk.Euler2DTransform()
if is_translation:
elx_parameters = [0]
elx_parameters_trans = [float(p) for p in tform['TransformParameters']]
elx_parameters.extend(elx_parameters_trans)
else:
center = [float(p) for p in tform['CenterOfRotationPoint']]
euler2d.SetFixedParameters(center)
elx_parameters = [float(p) for p in tform['TransformParameters']]
euler2d.SetParameters(elx_parameters)
return euler2d
def similarity_elx_to_itk2d(tform):
similarity2d = sitk.Similarity2DTransform()
center = [float(p) for p in tform['CenterOfRotationPoint']]
similarity2d.SetFixedParameters(center)
elx_parameters = [float(p) for p in tform['TransformParameters']]
similarity2d.SetParameters(elx_parameters)
return similarity2d
def affine_elx_to_itk2d(tform):
im_dimension = len(tform["Size"])
affine2d = sitk.AffineTransform(im_dimension)
center = [float(p) for p in tform['CenterOfRotationPoint']]
affine2d.SetFixedParameters(center)
elx_parameters = [float(p) for p in tform['TransformParameters']]
affine2d.SetParameters(elx_parameters)
return affine2d
def bspline_elx_to_itk2d(tform):
im_dimension = len(tform["Size"])
bspline2d = sitk.BSplineTransform(im_dimension, 3)
bspline2d.SetTransformDomainOrigin(
[float(p) for p in tform['Origin']]
) # from fixed image
bspline2d.SetTransformDomainPhysicalDimensions(
[int(p) for p in tform['Size']]
) # from fixed image
bspline2d.SetTransformDomainDirection(
[float(p) for p in tform['Direction']]
) # from fixed image
fixedParams = [int(p) for p in tform['GridSize']]
fixedParams += [float(p) for p in tform['GridOrigin']]
fixedParams += [float(p) for p in tform['GridSpacing']]
fixedParams += [float(p) for p in tform['GridDirection']]
bspline2d.SetFixedParameters(fixedParams)
bspline2d.SetParameters([float(p) for p in tform['TransformParameters']])
return bspline2d
def convert_to_itk(tform):
if tform["Transform"][0] == "AffineTransform":
itk_tform = affine_elx_to_itk2d(tform)
elif tform["Transform"][0] == "SimilarityTransform":
itk_tform = similarity_elx_to_itk2d(tform)
elif tform["Transform"][0] == "TranslationTransform":
itk_tform = euler_elx_to_itk2d(tform, is_translation=True)
elif tform["Transform"][0] == "EulerTransform":
itk_tform = euler_elx_to_itk2d(tform)
elif tform["Transform"][0] == "BSplineTransform":
itk_tform = bspline_elx_to_itk2d(tform)
itk_tform.OutputSpacing = [float(p) for p in tform["Spacing"]]
itk_tform.OutputDirection = [float(p) for p in tform["Direction"]]
itk_tform.OutputOrigin = [float(p) for p in tform["Origin"]]
itk_tform.OutputSize = [int(p) for p in tform["Size"]]
itk_tform.ResampleInterpolator = tform["ResampleInterpolator"][0]
return itk_tform
def get_elastix_transforms(transformations):
elastix_transforms = deepcopy(transformations)
for k, v in elastix_transforms.items():
elastix_transforms.update({k: [t.elastix_transform for t in v]})
return elastix_transforms
| 3,303 | 32.714286 | 79 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.