repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
Few-shot-WSI | Few-shot-WSI-master/openselfsup/hooks/__init__.py | from .builder import build_hook
from .byol_hook import BYOLHook
from .deepcluster_hook import DeepClusterHook
from .odc_hook import ODCHook
from .optimizer_hook import DistOptimizerHook
from .extractor import Extractor
from .validate_hook import ValidateHook
from .registry import HOOKS
| 287 | 31 | 45 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/hooks/builder.py | from openselfsup.utils import build_from_cfg
from .registry import HOOKS
def build_hook(cfg, default_args=None):
return build_from_cfg(cfg, HOOKS, default_args)
| 168 | 20.125 | 51 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/hooks/deepcluster_hook.py | import numpy as np
from mmcv.runner import Hook
import torch
import torch.distributed as dist
from openselfsup.third_party import clustering as _clustering
from openselfsup.utils import print_log
from .registry import HOOKS
from .extractor import Extractor
@HOOKS.register_module
class DeepClusterHook(Hook):
"""Hook for DeepCluster.
Args:
extractor (dict): Config dict for feature extraction.
clustering (dict): Config dict that specifies the clustering algorithm.
unif_sampling (bool): Whether to apply uniform sampling.
reweight (bool): Whether to apply loss re-weighting.
reweight_pow (float): The power of re-weighting.
init_memory (bool): Whether to initialize memory banks for ODC.
Default: False.
initial (bool): Whether to call the hook initially. Default: True.
interval (int): Frequency of epochs to call the hook. Default: 1.
dist_mode (bool): Use distributed training or not. Default: True.
data_loaders (DataLoader): A PyTorch dataloader. Default: None.
"""
def __init__(
self,
extractor,
clustering,
unif_sampling,
reweight,
reweight_pow,
init_memory=False, # for ODC
initial=True,
interval=1,
dist_mode=True,
data_loaders=None):
self.extractor = Extractor(dist_mode=dist_mode, **extractor)
self.clustering_type = clustering.pop('type')
self.clustering_cfg = clustering
self.unif_sampling = unif_sampling
self.reweight = reweight
self.reweight_pow = reweight_pow
self.init_memory = init_memory
self.initial = initial
self.interval = interval
self.dist_mode = dist_mode
self.data_loaders = data_loaders
def before_run(self, runner):
if self.initial:
self.deepcluster(runner)
def after_train_epoch(self, runner):
if not self.every_n_epochs(runner, self.interval):
return
self.deepcluster(runner)
def deepcluster(self, runner):
# step 1: get features
runner.model.eval()
features = self.extractor(runner)
runner.model.train()
# step 2: get labels
if not self.dist_mode or (self.dist_mode and runner.rank == 0):
clustering_algo = _clustering.__dict__[self.clustering_type](
**self.clustering_cfg)
# Features are normalized during clustering
clustering_algo.cluster(features, verbose=True)
assert isinstance(clustering_algo.labels, np.ndarray)
new_labels = clustering_algo.labels.astype(np.int64)
np.save(
"{}/cluster_epoch_{}.npy".format(runner.work_dir,
runner.epoch), new_labels)
self.evaluate(runner, new_labels)
else:
new_labels = np.zeros((len(self.data_loaders[0].dataset), ),
dtype=np.int64)
if self.dist_mode:
new_labels_tensor = torch.from_numpy(new_labels).cuda()
dist.broadcast(new_labels_tensor, 0)
new_labels = new_labels_tensor.cpu().numpy()
new_labels_list = list(new_labels)
# step 3: assign new labels
self.data_loaders[0].dataset.assign_labels(new_labels_list)
# step 4 (a): set uniform sampler
if self.unif_sampling:
self.data_loaders[0].sampler.set_uniform_indices(
new_labels_list, self.clustering_cfg.k)
# step 4 (b): set loss reweight
if self.reweight:
runner.model.module.set_reweight(new_labels, self.reweight_pow)
# step 5: randomize classifier
runner.model.module.head.init_weights(init_linear='normal')
if self.dist_mode:
for p in runner.model.module.head.state_dict().values():
dist.broadcast(p, 0)
# step 6: init memory for ODC
if self.init_memory:
runner.model.module.memory_bank.init_memory(features, new_labels)
def evaluate(self, runner, new_labels):
hist = np.bincount(new_labels, minlength=self.clustering_cfg.k)
empty_cls = (hist == 0).sum()
minimal_cls_size, maximal_cls_size = hist.min(), hist.max()
if runner.rank == 0:
print_log(
"empty_num: {}\tmin_cluster: {}\tmax_cluster:{}".format(
empty_cls.item(), minimal_cls_size.item(),
maximal_cls_size.item()),
logger='root')
| 4,637 | 36.104 | 79 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/utils/contextmanagers.py | # coding: utf-8
import asyncio
import contextlib
import logging
import os
import time
from typing import List
import torch
logger = logging.getLogger(__name__)
DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False))
@contextlib.asynccontextmanager
async def completed(trace_name='',
name='',
sleep_interval=0.05,
streams: List[torch.cuda.Stream] = None):
"""Async context manager that waits for work to complete on
given CUDA streams."""
if not torch.cuda.is_available():
yield
return
stream_before_context_switch = torch.cuda.current_stream()
if not streams:
streams = [stream_before_context_switch]
else:
streams = [s if s else stream_before_context_switch for s in streams]
end_events = [
torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams
]
if DEBUG_COMPLETED_TIME:
start = torch.cuda.Event(enable_timing=True)
stream_before_context_switch.record_event(start)
cpu_start = time.monotonic()
logger.debug('%s %s starting, streams: %s', trace_name, name, streams)
grad_enabled_before = torch.is_grad_enabled()
try:
yield
finally:
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_end = time.monotonic()
for i, stream in enumerate(streams):
event = end_events[i]
stream.record_event(event)
grad_enabled_after = torch.is_grad_enabled()
# observed change of torch.is_grad_enabled() during concurrent run of
# async_test_bboxes code
assert (grad_enabled_before == grad_enabled_after
), 'Unexpected is_grad_enabled() value change'
are_done = [e.query() for e in end_events]
logger.debug('%s %s completed: %s streams: %s', trace_name, name,
are_done, streams)
with torch.cuda.stream(stream_before_context_switch):
while not all(are_done):
await asyncio.sleep(sleep_interval)
are_done = [e.query() for e in end_events]
logger.debug(
'%s %s completed: %s streams: %s',
trace_name,
name,
are_done,
streams,
)
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_time = (cpu_end - cpu_start) * 1000
stream_times_ms = ''
for i, stream in enumerate(streams):
elapsed_time = start.elapsed_time(end_events[i])
stream_times_ms += ' {} {:.2f} ms'.format(stream, elapsed_time)
logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time,
stream_times_ms)
@contextlib.asynccontextmanager
async def concurrent(streamqueue: asyncio.Queue,
trace_name='concurrent',
name='stream'):
"""Run code concurrently in different streams.
:param streamqueue: asyncio.Queue instance.
Queue tasks define the pool of streams used for concurrent execution.
"""
if not torch.cuda.is_available():
yield
return
initial_stream = torch.cuda.current_stream()
with torch.cuda.stream(initial_stream):
stream = await streamqueue.get()
assert isinstance(stream, torch.cuda.Stream)
try:
with torch.cuda.stream(stream):
logger.debug('%s %s is starting, stream: %s', trace_name, name,
stream)
yield
current = torch.cuda.current_stream()
assert current == stream
logger.debug('%s %s has finished, stream: %s', trace_name,
name, stream)
finally:
streamqueue.task_done()
streamqueue.put_nowait(stream)
| 4,103 | 32.365854 | 79 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/utils/registry.py | import inspect
from functools import partial
import mmcv
class Registry(object):
def __init__(self, name):
self._name = name
self._module_dict = dict()
def __repr__(self):
format_str = self.__class__.__name__ + '(name={}, items={})'.format(
self._name, list(self._module_dict.keys()))
return format_str
@property
def name(self):
return self._name
@property
def module_dict(self):
return self._module_dict
def get(self, key):
return self._module_dict.get(key, None)
def _register_module(self, module_class, force=False):
"""Register a module.
Args:
module (:obj:`nn.Module`): Module to be registered.
"""
if not inspect.isclass(module_class):
raise TypeError('module must be a class, but got {}'.format(
type(module_class)))
module_name = module_class.__name__
if not force and module_name in self._module_dict:
raise KeyError('{} is already registered in {}'.format(
module_name, self.name))
self._module_dict[module_name] = module_class
def register_module(self, cls=None, force=False):
if cls is None:
return partial(self.register_module, force=force)
self._register_module(cls, force=force)
return cls
def build_from_cfg(cfg, registry, default_args=None):
"""Build a module from config dict.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
registry (:obj:`Registry`): The registry to search the type from.
default_args (dict, optional): Default initialization arguments.
Returns:
obj: The constructed object.
"""
assert isinstance(cfg, dict) and 'type' in cfg
assert isinstance(default_args, dict) or default_args is None
args = cfg.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
obj_cls = registry.get(obj_type)
if obj_cls is None:
raise KeyError('{} is not in the {} registry'.format(
obj_type, registry.name))
elif inspect.isclass(obj_type):
obj_cls = obj_type
else:
raise TypeError('type must be a str or valid type, but got {}'.format(
type(obj_type)))
if default_args is not None:
for name, value in default_args.items():
args.setdefault(name, value)
return obj_cls(**args)
| 2,478 | 29.9875 | 78 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/utils/optimizers.py | import torch
from torch.optim.optimizer import Optimizer, required
from torch.optim import *
class LARS(Optimizer):
r"""Implements layer-wise adaptive rate scaling for SGD.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): base learning rate (\gamma_0)
momentum (float, optional): momentum factor (default: 0) ("m")
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
("\beta")
dampening (float, optional): dampening for momentum (default: 0)
eta (float, optional): LARS coefficient
nesterov (bool, optional): enables Nesterov momentum (default: False)
Based on Algorithm 1 of the following paper by You, Gitman, and Ginsburg.
Large Batch Training of Convolutional Networks:
https://arxiv.org/abs/1708.03888
Example:
>>> optimizer = LARS(model.parameters(), lr=0.1, momentum=0.9,
>>> weight_decay=1e-4, eta=1e-3)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
"""
def __init__(self,
params,
lr=required,
momentum=0,
dampening=0,
weight_decay=0,
eta=0.001,
nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay))
if eta < 0.0:
raise ValueError("Invalid LARS coefficient value: {}".format(eta))
defaults = dict(
lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov, eta=eta)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(LARS, self).__init__(params, defaults)
def __setstate__(self, state):
super(LARS, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
eta = group['eta']
nesterov = group['nesterov']
lr = group['lr']
lars_exclude = group.get('lars_exclude', False)
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad
if lars_exclude:
local_lr = 1.
else:
weight_norm = torch.norm(p).item()
grad_norm = torch.norm(d_p).item()
# Compute local learning rate for this layer
local_lr = eta * weight_norm / \
(grad_norm + weight_decay * weight_norm)
actual_lr = local_lr * lr
d_p = d_p.add(p, alpha=weight_decay).mul(actual_lr)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = \
torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(-d_p)
return loss
| 4,327 | 35.991453 | 88 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/utils/profiling.py | import contextlib
import sys
import time
import torch
if sys.version_info >= (3, 7):
@contextlib.contextmanager
def profile_time(trace_name,
name,
enabled=True,
stream=None,
end_stream=None):
"""Print time spent by CPU and GPU.
Useful as a temporary context manager to find sweet spots of
code suitable for async implementation.
"""
if (not enabled) or not torch.cuda.is_available():
yield
return
stream = stream if stream else torch.cuda.current_stream()
end_stream = end_stream if end_stream else stream
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
stream.record_event(start)
try:
cpu_start = time.monotonic()
yield
finally:
cpu_end = time.monotonic()
end_stream.record_event(end)
end.synchronize()
cpu_time = (cpu_end - cpu_start) * 1000
gpu_time = start.elapsed_time(end)
msg = "{} {} cpu_time {:.2f} ms ".format(trace_name, name,
cpu_time)
msg += "gpu_time {:.2f} ms stream {}".format(gpu_time, stream)
print(msg, end_stream)
| 1,363 | 32.268293 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/utils/collect.py | import numpy as np
import mmcv
import torch
from .gather import gather_tensors_batch
def nondist_forward_collect(func, data_loader, length):
"""Forward and collect network outputs.
This function performs forward propagation and collects outputs.
It can be used to collect results, features, losses, etc.
Args:
func (function): The function to process data. The output must be
a dictionary of CPU tensors.
length (int): Expected length of output arrays.
Returns:
results_all (dict(np.ndarray)): The concatenated outputs.
"""
results = []
prog_bar = mmcv.ProgressBar(len(data_loader))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = func(**data)
results.append(result)
prog_bar.update()
results_all = {}
for k in results[0].keys():
results_all[k] = np.concatenate(
[batch[k].numpy() for batch in results], axis=0)
assert results_all[k].shape[0] == length
return results_all
def dist_forward_collect(func, data_loader, rank, length, ret_rank=-1):
"""Forward and collect network outputs in a distributed manner.
This function performs forward propagation and collects outputs.
It can be used to collect results, features, losses, etc.
Args:
func (function): The function to process data. The output must be
a dictionary of CPU tensors.
rank (int): This process id.
length (int): Expected length of output arrays.
ret_rank (int): The process that returns.
Other processes will return None.
Returns:
results_all (dict(np.ndarray)): The concatenated outputs.
"""
results = []
if rank == 0:
prog_bar = mmcv.ProgressBar(len(data_loader))
for idx, data in enumerate(data_loader):
with torch.no_grad():
result = func(**data) # dict{key: tensor}
results.append(result)
if rank == 0:
prog_bar.update()
results_all = {}
for k in results[0].keys():
results_cat = np.concatenate([batch[k].numpy() for batch in results],
axis=0)
if ret_rank == -1:
results_gathered = gather_tensors_batch(results_cat, part_size=20)
results_strip = np.concatenate(results_gathered, axis=0)[:length]
else:
results_gathered = gather_tensors_batch(
results_cat, part_size=20, ret_rank=ret_rank)
if rank == ret_rank:
results_strip = np.concatenate(
results_gathered, axis=0)[:length]
else:
results_strip = None
results_all[k] = results_strip
return results_all
| 2,773 | 32.02381 | 78 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/utils/misc.py | from functools import partial
import mmcv
import numpy as np
from six.moves import map, zip
def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
num_imgs = tensor.size(0)
mean = np.array(mean, dtype=np.float32)
std = np.array(std, dtype=np.float32)
imgs = []
for img_id in range(num_imgs):
img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0)
img = mmcv.imdenormalize(
img, mean, std, to_bgr=to_rgb).astype(np.uint8)
imgs.append(np.ascontiguousarray(img))
return imgs
def multi_apply(func, *args, **kwargs):
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
def unmap(data, count, inds, fill=0):
"""Unmap a subset of item (data) back to the original set of items (of
size count)."""
if data.dim() == 1:
ret = data.new_full((count, ), fill)
ret[inds] = data
else:
new_size = (count, ) + data.size()[1:]
ret = data.new_full(new_size, fill)
ret[inds, :] = data
return ret
| 1,107 | 28.157895 | 74 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/utils/logger.py | import logging
from mmcv.runner import get_dist_info
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added. The name of the root logger is the top-level package name,
e.g., "openselfsup".
Args:
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
logger = logging.getLogger(__name__.split('.')[0]) # i.e., openselfsup
# if the logger has been initialized, just return it
if logger.hasHandlers():
return logger
format_str = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=format_str, level=log_level)
rank, _ = get_dist_info()
if rank != 0:
logger.setLevel('ERROR')
elif log_file is not None:
file_handler = logging.FileHandler(log_file, 'w')
file_handler.setFormatter(logging.Formatter(format_str))
file_handler.setLevel(log_level)
logger.addHandler(file_handler)
return logger
def print_log(msg, logger=None, level=logging.INFO):
"""Print a log message.
Args:
msg (str): The message to be logged.
logger (logging.Logger | str | None): The logger to be used. Some
special loggers are:
- "root": the root logger obtained with `get_root_logger()`.
- "silent": no message will be printed.
- None: The `print()` method will be used to print log messages.
level (int): Logging level. Only available when `logger` is a Logger
object or "root".
"""
if logger is None:
print(msg)
elif logger == 'root':
_logger = get_root_logger()
_logger.log(level, msg)
elif isinstance(logger, logging.Logger):
logger.log(level, msg)
elif logger != 'silent':
raise TypeError(
'logger should be either a logging.Logger object, "root", '
'"silent" or None, but got {}'.format(logger))
| 2,424 | 35.19403 | 79 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/utils/alias_multinomial.py | import torch
import numpy as np
class AliasMethod(object):
"""The alias method for sampling.
From: https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
Args:
probs (Tensor): Sampling probabilities.
"""
def __init__(self, probs):
if probs.sum() > 1:
probs.div_(probs.sum())
K = len(probs)
self.prob = torch.zeros(K)
self.alias = torch.LongTensor([0] * K)
# Sort the data into the outcomes with probabilities
# that are larger and smaller than 1/K.
smaller = []
larger = []
for kk, prob in enumerate(probs):
self.prob[kk] = K * prob
if self.prob[kk] < 1.0:
smaller.append(kk)
else:
larger.append(kk)
# Loop though and create little binary mixtures that
# appropriately allocate the larger outcomes over the
# overall uniform mixture.
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
self.alias[small] = large
self.prob[large] = (self.prob[large] - 1.0) + self.prob[small]
if self.prob[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
for last_one in smaller + larger:
self.prob[last_one] = 1
def cuda(self):
self.prob = self.prob.cuda()
self.alias = self.alias.cuda()
def draw(self, N):
"""Draw N samples from multinomial.
Args:
N (int): Number of samples.
Returns:
Tensor: Samples.
"""
K = self.alias.size(0)
kk = torch.zeros(
N, dtype=torch.long, device=self.prob.device).random_(0, K)
prob = self.prob.index_select(0, kk)
alias = self.alias.index_select(0, kk)
# b is whether a random number is greater than q
b = torch.bernoulli(prob)
oq = kk.mul(b.long())
oj = alias.mul((1 - b).long())
return oq + oj
| 2,132 | 27.065789 | 120 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/utils/gather.py | import numpy as np
import torch
import torch.distributed as dist
def gather_tensors(input_array):
world_size = dist.get_world_size()
## gather shapes first
myshape = input_array.shape
mycount = input_array.size
shape_tensor = torch.Tensor(np.array(myshape)).cuda()
all_shape = [
torch.Tensor(np.array(myshape)).cuda() for i in range(world_size)
]
dist.all_gather(all_shape, shape_tensor)
## compute largest shapes
all_shape = [x.cpu().numpy() for x in all_shape]
all_count = [int(x.prod()) for x in all_shape]
all_shape = [list(map(int, x)) for x in all_shape]
max_count = max(all_count)
## padding tensors and gather them
output_tensors = [
torch.Tensor(max_count).cuda() for i in range(world_size)
]
padded_input_array = np.zeros(max_count)
padded_input_array[:mycount] = input_array.reshape(-1)
input_tensor = torch.Tensor(padded_input_array).cuda()
dist.all_gather(output_tensors, input_tensor)
## unpadding gathered tensors
padded_output = [x.cpu().numpy() for x in output_tensors]
output = [
x[:all_count[i]].reshape(all_shape[i])
for i, x in enumerate(padded_output)
]
return output
def gather_tensors_batch(input_array, part_size=100, ret_rank=-1):
# batch-wize gathering to avoid CUDA out of memory
rank = dist.get_rank()
all_features = []
part_num = input_array.shape[0] // part_size + 1 if input_array.shape[
0] % part_size != 0 else input_array.shape[0] // part_size
for i in range(part_num):
part_feat = input_array[i *
part_size:min((i + 1) *
part_size, input_array.shape[0]),
...]
assert part_feat.shape[
0] > 0, "rank: {}, length of part features should > 0".format(rank)
#print("rank: {}, gather part: {}/{}, length: {}".format(rank, i, part_num, len(part_feat)))
gather_part_feat = gather_tensors(part_feat)
all_features.append(gather_part_feat)
if ret_rank == -1:
all_features = [
np.concatenate([all_features[i][j] for i in range(part_num)],
axis=0) for j in range(len(all_features[0]))
]
return all_features
else:
if rank == ret_rank:
all_features = [
np.concatenate([all_features[i][j] for i in range(part_num)],
axis=0) for j in range(len(all_features[0]))
]
return all_features
else:
return None
| 2,629 | 36.571429 | 100 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/utils/collect_env.py | import os.path as osp
import subprocess
import sys
from collections import defaultdict
import cv2
import mmcv
import torch
import torchvision
import openselfsup
def collect_env():
"""Collect the information of the running environments."""
env_info = {}
env_info['sys.platform'] = sys.platform
env_info['Python'] = sys.version.replace('\n', '')
cuda_available = torch.cuda.is_available()
env_info['CUDA available'] = cuda_available
if cuda_available:
from torch.utils.cpp_extension import CUDA_HOME
env_info['CUDA_HOME'] = CUDA_HOME
if CUDA_HOME is not None and osp.isdir(CUDA_HOME):
try:
nvcc = osp.join(CUDA_HOME, 'bin/nvcc')
nvcc = subprocess.check_output(
'"{}" -V | tail -n1'.format(nvcc), shell=True)
nvcc = nvcc.decode('utf-8').strip()
except subprocess.SubprocessError:
nvcc = 'Not Available'
env_info['NVCC'] = nvcc
devices = defaultdict(list)
for k in range(torch.cuda.device_count()):
devices[torch.cuda.get_device_name(k)].append(str(k))
for name, devids in devices.items():
env_info['GPU ' + ','.join(devids)] = name
gcc = subprocess.check_output('gcc --version | head -n1', shell=True)
gcc = gcc.decode('utf-8').strip()
env_info['GCC'] = gcc
env_info['PyTorch'] = torch.__version__
env_info['PyTorch compiling details'] = torch.__config__.show()
env_info['TorchVision'] = torchvision.__version__
env_info['OpenCV'] = cv2.__version__
env_info['MMCV'] = mmcv.__version__
env_info['OpenSelfSup'] = openselfsup.__version__
#from openselfsup.ops import get_compiler_version, get_compiling_cuda_version
#env_info['OpenSelfSup Compiler'] = get_compiler_version()
#env_info['OpenSelfSup CUDA Compiler'] = get_compiling_cuda_version()
return env_info
if __name__ == "__main__":
for name, val in collect_env().items():
print('{}: {}'.format(name, val))
| 2,055 | 30.630769 | 81 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/utils/flops_counter.py | # Modified from flops-counter.pytorch by Vladislav Sovrasov
# original repo: https://github.com/sovrasov/flops-counter.pytorch
# MIT License
# Copyright (c) 2018 Vladislav Sovrasov
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import numpy as np
import torch
import torch.nn as nn
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin
from torch.nn.modules.pooling import (_AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd,
_AvgPoolNd, _MaxPoolNd)
def get_model_complexity_info(model,
input_res,
print_per_layer_stat=True,
as_strings=True,
input_constructor=None,
ost=sys.stdout):
assert type(input_res) is tuple
assert len(input_res) >= 2
flops_model = add_flops_counting_methods(model)
flops_model.eval().start_flops_count()
if input_constructor:
input = input_constructor(input_res)
_ = flops_model(**input)
else:
batch = torch.ones(()).new_empty(
(1, *input_res),
dtype=next(flops_model.parameters()).dtype,
device=next(flops_model.parameters()).device)
flops_model(batch)
if print_per_layer_stat:
print_model_with_flops(flops_model, ost=ost)
flops_count = flops_model.compute_average_flops_cost()
params_count = get_model_parameters_number(flops_model)
flops_model.stop_flops_count()
if as_strings:
return flops_to_string(flops_count), params_to_string(params_count)
return flops_count, params_count
def flops_to_string(flops, units='GMac', precision=2):
if units is None:
if flops // 10**9 > 0:
return str(round(flops / 10.**9, precision)) + ' GMac'
elif flops // 10**6 > 0:
return str(round(flops / 10.**6, precision)) + ' MMac'
elif flops // 10**3 > 0:
return str(round(flops / 10.**3, precision)) + ' KMac'
else:
return str(flops) + ' Mac'
else:
if units == 'GMac':
return str(round(flops / 10.**9, precision)) + ' ' + units
elif units == 'MMac':
return str(round(flops / 10.**6, precision)) + ' ' + units
elif units == 'KMac':
return str(round(flops / 10.**3, precision)) + ' ' + units
else:
return str(flops) + ' Mac'
def params_to_string(params_num):
"""converting number to string
:param float params_num: number
:returns str: number
>>> params_to_string(1e9)
'1000.0 M'
>>> params_to_string(2e5)
'200.0 k'
>>> params_to_string(3e-9)
'3e-09'
"""
if params_num // 10**6 > 0:
return str(round(params_num / 10**6, 2)) + ' M'
elif params_num // 10**3:
return str(round(params_num / 10**3, 2)) + ' k'
else:
return str(params_num)
def print_model_with_flops(model, units='GMac', precision=3, ost=sys.stdout):
total_flops = model.compute_average_flops_cost()
def accumulate_flops(self):
if is_supported_instance(self):
return self.__flops__ / model.__batch_counter__
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def flops_repr(self):
accumulated_flops_cost = self.accumulate_flops()
return ', '.join([
flops_to_string(
accumulated_flops_cost, units=units, precision=precision),
'{:.3%} MACs'.format(accumulated_flops_cost / total_flops),
self.original_extra_repr()
])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
flops_extra_repr = flops_repr.__get__(m)
if m.extra_repr != flops_extra_repr:
m.original_extra_repr = m.extra_repr
m.extra_repr = flops_extra_repr
assert m.extra_repr != m.original_extra_repr
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
model.apply(add_extra_repr)
print(model, file=ost)
model.apply(del_extra_repr)
def get_model_parameters_number(model):
params_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
return params_num
def add_flops_counting_methods(net_main_module):
# adding additional methods to the existing module object,
# this is done this way so that each function has access to self object
net_main_module.start_flops_count = start_flops_count.__get__(
net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(
net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(
net_main_module)
net_main_module.compute_average_flops_cost = \
compute_average_flops_cost.__get__(net_main_module)
net_main_module.reset_flops_count()
# Adding variables necessary for masked flops computation
net_main_module.apply(add_flops_mask_variable_or_reset)
return net_main_module
def compute_average_flops_cost(self):
"""
A method that will be available after add_flops_counting_methods() is
called on a desired net object.
Returns current mean flops consumption per image.
"""
batches_count = self.__batch_counter__
flops_sum = 0
for module in self.modules():
if is_supported_instance(module):
flops_sum += module.__flops__
return flops_sum / batches_count
def start_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is
called on a desired net object.
Activates the computation of mean flops consumption per image.
Call it before you run the network.
"""
add_batch_counter_hook_function(self)
self.apply(add_flops_counter_hook_function)
def stop_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is
called on a desired net object.
Stops computing the mean flops consumption per image.
Call whenever you want to pause the computation.
"""
remove_batch_counter_hook_function(self)
self.apply(remove_flops_counter_hook_function)
def reset_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is
called on a desired net object.
Resets statistics computed so far.
"""
add_batch_counter_variables_or_reset(self)
self.apply(add_flops_counter_variable_or_reset)
def add_flops_mask(module, mask):
def add_flops_mask_func(module):
if isinstance(module, torch.nn.Conv2d):
module.__mask__ = mask
module.apply(add_flops_mask_func)
def remove_flops_mask(module):
module.apply(add_flops_mask_variable_or_reset)
def is_supported_instance(module):
for mod in hook_mapping:
if issubclass(type(module), mod):
return True
return False
def empty_flops_counter_hook(module, input, output):
module.__flops__ += 0
def upsample_flops_counter_hook(module, input, output):
output_size = output[0]
batch_size = output_size.shape[0]
output_elements_count = batch_size
for val in output_size.shape[1:]:
output_elements_count *= val
module.__flops__ += int(output_elements_count)
def relu_flops_counter_hook(module, input, output):
active_elements_count = output.numel()
module.__flops__ += int(active_elements_count)
def linear_flops_counter_hook(module, input, output):
input = input[0]
batch_size = input.shape[0]
module.__flops__ += int(batch_size * input.shape[1] * output.shape[1])
def pool_flops_counter_hook(module, input, output):
input = input[0]
module.__flops__ += int(np.prod(input.shape))
def bn_flops_counter_hook(module, input, output):
input = input[0]
batch_flops = np.prod(input.shape)
if module.affine:
batch_flops *= 2
module.__flops__ += int(batch_flops)
def gn_flops_counter_hook(module, input, output):
elems = np.prod(input[0].shape)
# there is no precise FLOPs estimation of computing mean and variance,
# and we just set it 2 * elems: half muladds for computing
# means and half for computing vars
batch_flops = 3 * elems
if module.affine:
batch_flops += elems
module.__flops__ += int(batch_flops)
def deconv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
input_height, input_width = input.shape[2:]
kernel_height, kernel_width = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = out_channels // groups
conv_per_position_flops = (
kernel_height * kernel_width * in_channels * filters_per_channel)
active_elements_count = batch_size * input_height * input_width
overall_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if conv_module.bias is not None:
output_height, output_width = output.shape[2:]
bias_flops = out_channels * batch_size * output_height * output_height
overall_flops = overall_conv_flops + bias_flops
conv_module.__flops__ += int(overall_flops)
def conv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
output_dims = list(output.shape[2:])
kernel_dims = list(conv_module.kernel_size)
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = out_channels // groups
conv_per_position_flops = np.prod(
kernel_dims) * in_channels * filters_per_channel
active_elements_count = batch_size * np.prod(output_dims)
if conv_module.__mask__ is not None:
# (b, 1, h, w)
output_height, output_width = output.shape[2:]
flops_mask = conv_module.__mask__.expand(batch_size, 1, output_height,
output_width)
active_elements_count = flops_mask.sum()
overall_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if conv_module.bias is not None:
bias_flops = out_channels * active_elements_count
overall_flops = overall_conv_flops + bias_flops
conv_module.__flops__ += int(overall_flops)
hook_mapping = {
# conv
_ConvNd: conv_flops_counter_hook,
# deconv
_ConvTransposeMixin: deconv_flops_counter_hook,
# fc
nn.Linear: linear_flops_counter_hook,
# pooling
_AvgPoolNd: pool_flops_counter_hook,
_MaxPoolNd: pool_flops_counter_hook,
_AdaptiveAvgPoolNd: pool_flops_counter_hook,
_AdaptiveMaxPoolNd: pool_flops_counter_hook,
# activation
nn.ReLU: relu_flops_counter_hook,
nn.PReLU: relu_flops_counter_hook,
nn.ELU: relu_flops_counter_hook,
nn.LeakyReLU: relu_flops_counter_hook,
nn.ReLU6: relu_flops_counter_hook,
# normalization
_BatchNorm: bn_flops_counter_hook,
nn.GroupNorm: gn_flops_counter_hook,
# upsample
nn.Upsample: upsample_flops_counter_hook,
}
def batch_counter_hook(module, input, output):
batch_size = 1
if len(input) > 0:
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = len(input)
else:
print('Warning! No positional inputs found for a module, '
'assuming batch size is 1.')
module.__batch_counter__ += batch_size
def add_batch_counter_variables_or_reset(module):
module.__batch_counter__ = 0
def add_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
return
handle = module.register_forward_hook(batch_counter_hook)
module.__batch_counter_handle__ = handle
def remove_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
module.__batch_counter_handle__.remove()
del module.__batch_counter_handle__
def add_flops_counter_variable_or_reset(module):
if is_supported_instance(module):
module.__flops__ = 0
def add_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
for mod_type, counter_hook in hook_mapping.items():
if issubclass(type(module), mod_type):
handle = module.register_forward_hook(counter_hook)
break
module.__flops_handle__ = handle
def remove_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
# --- Masked flops counting
# Also being run in the initialization
def add_flops_mask_variable_or_reset(module):
if is_supported_instance(module):
module.__mask__ = None
| 14,304 | 31.146067 | 79 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/utils/__init__.py | from .alias_multinomial import AliasMethod
from .collect import nondist_forward_collect, dist_forward_collect
from .collect_env import collect_env
from .config_tools import traverse_replace
from .flops_counter import get_model_complexity_info
from .logger import get_root_logger, print_log
from .registry import Registry, build_from_cfg
from . import optimizers
| 362 | 39.333333 | 66 | py |
Few-shot-WSI | Few-shot-WSI-master/openselfsup/utils/config_tools.py | from mmcv import Config
def traverse_replace(d, key, value):
if isinstance(d, (dict, Config)):
for k, v in d.items():
if k == key:
d[k] = value
else:
traverse_replace(v, key, value)
elif isinstance(d, (list, tuple, set)):
for v in d:
traverse_replace(v, key, value)
| 359 | 26.692308 | 47 | py |
rivuletpy | rivuletpy-master/build.py | """
Build C extensions.
Adapted from: https://github.com/zoj613/htnorm/blob/main/build.py
"""
import os
from distutils.core import Extension
import numpy as np
source_files = [
"rivuletpy/msfm/msfmmodule.c",
"rivuletpy/msfm/_msfm.c",
]
# get environmental variables to determine the flow of the build process
BUILD_WHEELS = os.getenv("BUILD_WHEELS", None)
LIBS_DIR = os.getenv("LIBS_DIR", "/usr/lib")
libraries = ["m"]
# when building manylinux2014 wheels for pypi use different directories as
# required by CentOS, else allow the user to specify them when building from
# source distribution
if BUILD_WHEELS:
library_dirs = ["/usr/lib64"]
libraries.append("openblas")
else:
library_dirs = [LIBS_DIR]
libraries.extend(["blas", "lapack"])
extensions = [
Extension(
"msfm",
source_files,
include_dirs=[np.get_include(), "rivuletpy/msfm"],
library_dirs=library_dirs,
libraries=libraries,
define_macros=[("NPY_NO_DEPRECATED_API", 0)],
extra_compile_args=["-std=c99"],
)
]
def build(setup_kwargs):
"""Build extension modules."""
kwargs = {"ext_modules": extensions, "zip_safe": False}
setup_kwargs.update(kwargs)
| 1,216 | 24.354167 | 76 | py |
rivuletpy | rivuletpy-master/tests/test_riveal.py | from filtering.riveal import riveal
from rivuletpy.utils.io import *
from filtering.thresholding import rescale
img = loadimg('tests/data/test.tif')
dtype = img.dtype
swc = loadswc('tests/data/test.swc')
img = riveal(img, swc, nsample=5e4, epoch=30)
img = rescale(img)
try:
from skimage import filters
except ImportError:
from skimage import filter as filters
threshold = filters.threshold_otsu(img)
img[img<=threshold] = 0
writetiff3d('dt.tif', img.astype(dtype))
my_env = os.environ.copy()
v3dcmd = "%s/vaa3d" % my_env['V3DPATH']
v3dcmd += ' -v -i dt.tif'
os.system(v3dcmd) | 585 | 26.904762 | 45 | py |
rivuletpy | rivuletpy-master/tests/testbgrsp.py | from filtering.anisotropic import *
from rivuletpy.utils.io import *
import matplotlib.pyplot as plt
from scipy import io as sio
try:
from skimage import filters
except ImportError:
from skimage import filter as filters
from scipy.ndimage.filters import gaussian_filter
mat = sio.loadmat('tests/data/very-small-oof.mat', )
img = mat['img']
ostu_img = 0.
radii = np.arange(0.2, 2, 0.5)
rho = 0.5
oof_matlab = mat['oof']
ostu_matlaboof = filters.threshold_otsu(oof_matlab)
rps, _ = response(img.astype('float'), rsptype='bg', radii=radii, rho=rho)
thr = 1
from scipy import ndimage as ndi
from skimage import feature
canny = feature.canny(rps, sigma=3)
smoothed_rps = gaussian_filter(rps, 0.5)
# ostu_smooth = filters.threshold_otsu(smoothed_rps)
ostu_smooth = 1
plotidx = 1
plt.subplot(4, 4, plotidx)
plt.imshow(rps.max(axis=0))
plt.title('OOF Python MEM_SAVE YZ')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow(rps.max(axis=1))
plt.title('OOF Python MEM_SAVE XZ')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow(rps.max(axis=2))
plt.title('OOF Python MEM_SAVE XY')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow((rps > thr).max(axis=2))
plt.title('OOF Python MEM_SAVE Otsu XY')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow(smoothed_rps.max(axis=0))
plt.title('Smooth YZ')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow(smoothed_rps.max(axis=1))
plt.title('Smooth XZ')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow(smoothed_rps.max(axis=2))
plt.title('Smooth XY')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow((smoothed_rps > ostu_smooth).max(axis=2))
plt.title('Smooth XY')
plotidx +=1
plt.subplot(4, 4, plotidx)
plt.imshow(canny.max(axis=0))
plt.title('OOF Matlab YZ')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow(canny.max(axis=1))
plt.title('OOF Matlab XZ')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow(canny.max(axis=2))
plt.title('OOF Matlab XY')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow((canny > ostu_matlaboof).max(axis=2))
plt.title('OOF Matlab Otsu XY')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow(img.max(axis=0))
plt.title('Original YZ')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow(img.max(axis=1))
plt.title('Original XZ')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow(img.max(axis=2))
plt.title('Original XY')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow((img > ostu_img).max(axis=2))
plt.title('Original Otsu XY')
plt.show() | 2,448 | 20.866071 | 74 | py |
rivuletpy | rivuletpy-master/tests/testmsfm.py | import msfm
from rivuletpy.utils.io import *
import skfmm
import os
from matplotlib import pyplot as plt
dir_path = os.path.dirname(os.path.realpath(__file__))
img = loadimg(os.path.join(dir_path, 'data/test.tif'))
dt = skfmm.distance(img > 0, dx=1) # Boundary DT
somaradius = dt.max()
somapos = np.asarray(np.unravel_index(dt.argmax(), dt.shape))
print('somapos in python', somapos, somapos.dtype)
print('dt[soma pos] in python: %f' % dt[somapos[0], somapos[1], somapos[2]])
print('Running MSFM...')
T = msfm.run(dt, somapos, False, True)
plt.imshow(np.squeeze(T.min(axis=-1)))
plt.show()
| 592 | 30.210526 | 76 | py |
rivuletpy | rivuletpy-master/tests/test_fuzzy_threshold.py | from os import path
from rivuletpy.utils.io import *
from filtering.thresholding import fuzzy
import matplotlib.pyplot as plt
img = loadimg(path.join('tests', 'data', 'test.tif'))
thr = fuzzy(img, render=True)
| 213 | 22.777778 | 53 | py |
rivuletpy | rivuletpy-master/tests/testssm.py | from filtering.morphology import ssm
from rivuletpy.utils.io import *
import matplotlib.pyplot as plt
import skfmm
ITER = 30
img = loadimg('/home/siqi/ncidata/rivuletpy/tests/data/test-crop.tif')
bimg = (img > 0).astype('int')
dt = skfmm.distance(bimg, dx=1)
sdt = ssm(dt, anisotropic=True, iterations=ITER)
try:
from skimage import filters
except ImportError:
from skimage import filter as filters
s_seg = s > filters.threshold_otsu(s)
plt.figure()
plt.title('DT')
plt.imshow(dt.max(-1))
plt.figure()
plt.title('img > 0')
plt.imshow((img > 0).max(-1))
plt.figure()
plt.title('SSM-DT')
plt.imshow(sdt.max(-1))
| 624 | 20.551724 | 70 | py |
rivuletpy | rivuletpy-master/tests/testmetrics.py | from rivuletpy.utils.metrics import *
from rivuletpy.utils.io import *
from os.path import join
datapath = 'tests/data'
swc1 = loadswc(join(datapath, 'test-output.swc'))
swc2 = loadswc(join(datapath, 'test-expected.swc'))
prf, swc_compare = precision_recall(swc1, swc2)
print('Precision: %.2f\tRecall: %.2f\tF1: %.2f\t' % prf)
M1, M2 = gaussian_distance(swc1, swc2, 3.0)
print('M1 MEAN: %.2f\tM2 MEAN: %.2f' % (M1.mean(), M2.mean()))
midx1, midx2 = connectivity_distance(swc1, swc2)
for i in midx1:
swc1[i, 1] = 2
swc1[i, 5] = 4
saveswc(join(datapath, 'test.connect1.swc'), swc1)
for i in midx2:
swc2[i, 1] = 2
swc2[i, 5] = 4
saveswc(join(datapath, 'test.connect2.swc'), swc2)
| 700 | 27.04 | 62 | py |
rivuletpy | rivuletpy-master/tests/testoof.py | from filtering.anisotropic import *
from rivuletpy.utils.io import *
import matplotlib.pyplot as plt
from scipy import io as sio
try:
from skimage import filters
except ImportError:
from skimage import filter as filters
mat = sio.loadmat('tests/data/very-small-oof.mat', )
img = mat['img']
ostu_img = filters.threshold_otsu(img)
radii = np.arange(1, 1.5, 0.1)
oof_matlab = mat['oof']
ostu_matlaboof = filters.threshold_otsu(oof_matlab)
oofrps_memsave, _ = response(img.astype('float'), rsptype='oof', radii=np.asarray(radii), memory_save=True)
otsu_memsave = filters.threshold_otsu(oofrps_memsave)
oofrps_highmem, _ = response(img.astype('float'), rsptype='oof', radii=np.asarray(radii), memory_save=False)
otsu_highmem = filters.threshold_otsu(oofrps_highmem)
plotidx = 1
plt.subplot(4, 4, plotidx)
plt.imshow(oofrps_memsave.max(axis=0))
plt.title('OOF Python MEM_SAVE YZ')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow(oofrps_memsave.max(axis=1))
plt.title('OOF Python MEM_SAVE XZ')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow(oofrps_memsave.max(axis=2))
plt.title('OOF Python MEM_SAVE XY')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow((oofrps_memsave > otsu_memsave).max(axis=2))
plt.title('OOF Python MEM_SAVE Otsu XY')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow(oofrps_highmem.max(axis=0))
plt.title('OOF Python HIGHMEM YZ')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow(oofrps_highmem.max(axis=1))
plt.title('OOF Python HIGHMEM XZ')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow(oofrps_highmem.max(axis=2))
plt.title('OOF Python HIGHMEM XY')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow((oofrps_highmem > otsu_highmem).max(axis=2))
plt.title('OOF Python HIGHMEM Otsu XY')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow(oof_matlab.max(axis=0))
plt.title('OOF Matlab YZ')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow(oof_matlab.max(axis=1))
plt.title('OOF Matlab XZ')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow(oof_matlab.max(axis=2))
plt.title('OOF Matlab XY')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow((oof_matlab > ostu_matlaboof).max(axis=2))
plt.title('OOF Matlab Otsu XY')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow(img.max(axis=0))
plt.title('Original YZ')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow(img.max(axis=1))
plt.title('Original XZ')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow(img.max(axis=2))
plt.title('Original XY')
plotidx += 1
plt.subplot(4, 4, plotidx)
plt.imshow((img > ostu_img).max(axis=2))
plt.title('Original Otsu XY')
plt.show()
| 2,584 | 23.619048 | 108 | py |
rivuletpy | rivuletpy-master/tests/test_node_push.py | # Load swc
import SimpleITK as sitk
from rivuletpy.utils.io import loadswc, loadimg
from rivuletpy.swc import SWC
from rivuletpy.utils.io import swc2world, swc2vtk
swc_mat = loadswc(
'/home/z003s24h/Desktop/zhoubing_vessel_example/mask/Anonymous EJRH_16.r2.swc')
s = SWC()
s._data = swc_mat
# Load image and binarize
img = loadimg(
'/home/z003s24h/Desktop/zhoubing_vessel_example/mask/Anonymous EJRH_16.mhd', 1)
imgdtype = img.dtype
imgshape = img.shape
bimg = img > 0
s._data[:, 2] *= .7 / 1.
s._data[:, 3] *= 0.363281 / 1.
s._data[:, 4] *= 0.363281 / 1.
s.push_nodes_with_binary(bimg)
# s.view()
print('Converting to world space...')
mhd = sitk.ReadImage(
'/home/z003s24h/Desktop/zhoubing_vessel_example/mask/Anonymous EJRH_16.mhd')
swc = swc2world(s.get_array(),
mhd.GetOrigin(),
[1.] * 3)
print('Saving to VTK format...')
swc2vtk(swc, '/home/z003s24h/Desktop/zhoubing_vessel_example/mask/Anonymous EJRH_16.r2.pushed.vtk')
| 974 | 28.545455 | 99 | py |
rivuletpy | rivuletpy-master/tests/testbg.py | from filtering.anisotropic import *
from rivuletpy.utils.io import *
import matplotlib.pyplot as plt
from scipy import io as sio
try:
from skimage import filters
except ImportError:
from skimage import filter as filters
# plot the gaussian kernel
nsig = 5
nmu = 5
kerlen = 101
kr = (kerlen - 1) / 2
X, Y, Z = np.meshgrid(np.arange(-kr, kr+1),
np.arange(-kr, kr+1),
np.arange(-kr, kr+1))
indstack = np.stack((X, Y, Z))
dist = np.linalg.norm(indstack, axis=0)
plt.title('Gaussian')
for i in range(nsig):
for j in range(nmu):
mu = float(j*10)
sigma = float(i+4)
k = gkern3(dist, mu, sigma)
ax = plt.subplot(nsig, nmu, (i) * nsig + (j + 1))
ax.set_title('mu=%.2f, sigma=%.2f' % (mu, sigma))
imgplot = plt.imshow(k[:, :, int((kerlen-1)/2)])
plt.colorbar()
plt.figure(2)
plt.title('Bi-Gaussian')
nsig = 5
nrho = 5
kerlen = 101
for i in range(nsig):
for j in range(nrho):
sigma = float(i * 10)+1
rho = (j+1) * 0.1
k = bgkern3(kerlen, 0, sigma, rho)
ax = plt.subplot(nsig, nrho, (i) * nsig + (j + 1))
ax.set_title('sigma=%.2f, rho=%.2f' % (sigma, rho))
imgplot = plt.imshow(k[:, :, int((kerlen-1)/2) ])
plt.colorbar()
plt.show()
| 1,287 | 24.76 | 59 | py |
rivuletpy | rivuletpy-master/tests/test_viewer.py | from rivuletpy.utils.io import loadswc
from rivuletpy.swc import SWC
swc_mat = loadswc('test_data/test.tif.r2.swc')
s = SWC()
s._data = swc_mat
s.view()
input("Press any key to continue...") | 191 | 23 | 46 | py |
rivuletpy | rivuletpy-master/rivuletpy/soma.py | # -*- coding: utf-8 -*-
"""
somasnakes
===========
Original package is adjusted for soma detection by donghaozhang and siqiliu.
This soma submodule can be used for soma detection only, but this submodule is
currently embedded in rivuletpy. The soma mask can be generate by setting
its corresponding argument. Soma detection requires an initial soma centroid,
estimated somatic radius and grayscale neuron image. Soma growth is based on
the Morphological Active Contours without Edges algorithm. The original paper
is named as A morphological approach to curvature-based
evolution of curves and surfaces.The following papers are Rivulet papers.
The soma growth algorithm can converge by applying the sliding window.
Journal Rivulet Paper : Rivulet: 3D Neuron Morphology Tracing
with Iterative Back-Tracking Conference Rivulet Paper : Reconstruction
of 3D neuron morphology using Rivulet back-tracking
soma is a submodule of rivuletpy
"""
__author__ = "Donghao Zhang <zdhpeter1991@gmail.com>, Siqi Liu <lsqshr@gmail.com>"
from itertools import cycle
import math
import numpy as np
from scipy.ndimage import binary_dilation, binary_erosion
from scipy.ndimage import gaussian_filter, gaussian_gradient_magnitude
from scipy.ndimage.measurements import center_of_mass
from scipy.ndimage.morphology import generate_binary_structure
from rivuletpy.utils.io import writetiff3d
import skfmm
class Soma(object):
def __init__(self):
self.centroid = None
self.radius = 0
self.mask = None
def simple_mask(self, bimg):
'''
Make soma binary mask with the original
binary image and its radius and position
'''
# Make a ball like mask with 2 X somaradius
ballvolume = np.zeros(bimg.shape)
ballvolume[self.centroid[0], self.centroid[1], self.centroid[2]] = 1
stt = generate_binary_structure(3, 1)
for i in range(math.ceil(self.radius * 2.5)):
ballvolume = binary_dilation(ballvolume, structure=stt)
# Make the soma mask with the intersection
# between the ball area and the original binary
self.mask = np.logical_and(ballvolume, bimg)
# Shift the centroid according to the cropped region
def crop_centroid(self, crop_region):
self.centroid[0] = self.centroid[0] - crop_region[0, 0]
self.centroid[1] = self.centroid[1] - crop_region[1, 0]
self.centroid[2] = self.centroid[2] - crop_region[2, 0]
def detect(self, bimg, simple=False, silent=False):
"""
Automatic detection of soma volume unless the iterations are given.
"""
# Smooth iterations
smoothing = 1
# A float number controls the weight of internal energy
lambda1 = 1
# A float number controls the weight of external energy
lambda2 = 1.5
# Manually set the number of iterations required for the soma
# The type of iterations is int
iterations = -1
bimg = bimg.astype('int') # Segment
dt = skfmm.distance(bimg, dx=1.1) # Boundary DT
# somaradius : the approximate value of
# soma radius estimated from distance transform
# the type of somaradius is float64
# somaradius is just a float number
somaradius = dt.max()
# somapos : the coordinate of estimated soma centroid
# the type of somapos is int64
# the shape of somapos is (3,)
# somapos is array-like
somapos = np.asarray(np.unravel_index(dt.argmax(), dt.shape))
# Soma detection is required
if not simple:
if not silent:
print('Reconstructing Soma with SRET')
ratioxz = bimg.shape[0] / bimg.shape[2]
ratioyz = bimg.shape[1] / bimg.shape[2]
sqrval = (somaradius**0.5 * max(ratioxz, ratioyz))
sqrval = np.floor(min(max(sqrval, 3), (somaradius**0.5) * 6))
startpt = somapos - 3 * sqrval
endpt = somapos + 3 * sqrval
# # To constrain the soma growth region inside the cubic region
# # Python index start from 0
startpt[0] = min(max(0, startpt[0]), bimg.shape[0] - 1)
startpt[1] = min(max(0, startpt[1]), bimg.shape[1] - 1)
startpt[2] = min(max(0, startpt[2]), bimg.shape[2] - 1)
endpt[0] = min(max(0, endpt[0]), bimg.shape[0] - 1)
endpt[1] = min(max(0, endpt[1]), bimg.shape[1] - 1)
endpt[2] = min(max(0, endpt[2]), bimg.shape[2] - 1)
startpt = startpt.astype(int) # Convert type to int for indexing
endpt = endpt.astype(int)
# # Extract soma region for fast soma detection
somaimg = bimg[startpt[0]:endpt[0], startpt[1]:endpt[1], startpt[2]:
endpt[2]]
centerpt = np.zeros(3)
centerpt[0] = somaimg.shape[0] / 2
centerpt[1] = somaimg.shape[1] / 2
centerpt[2] = somaimg.shape[2] / 2
centerpt = np.floor(centerpt)
# Morphological ACWE. Initialization of the level-set.
macwe = MorphACWE(somaimg, startpt, endpt,
smoothing, lambda1, lambda2)
macwe.levelset = circle_levelset(somaimg.shape,
np.floor(centerpt), sqrval)
# -1 means the automatic detection
# Positive integers means the number of iterations
if iterations == -1:
macwe.autoconvg() # automatic soma detection
else:
# Input the iteration number manually
for i in range(iterations):
macwe.step()
# The following achieves the automatic somtic box extension
# The maximum somatic region extension iteration
# It is set to 10 avoid infinite loops
for i in range(1, 11):
# if not silent:
# print('The somatic region extension iteration is', i)
if macwe.enlrspt is None:
break
# Copy the values to new variables for the safe purpose
startpt = macwe.enlrspt.copy()
endpt = macwe.enlrept.copy()
startpt[0] = min(max(0, startpt[0]), bimg.shape[0])
startpt[1] = min(max(0, startpt[1]), bimg.shape[1])
startpt[2] = min(max(0, startpt[2]), bimg.shape[2])
endpt[0] = min(max(0, endpt[0]), bimg.shape[0])
endpt[1] = min(max(0, endpt[1]), bimg.shape[1])
endpt[2] = min(max(0, endpt[2]), bimg.shape[2])
somaimg = bimg[startpt[0]:endpt[0], startpt[1]:endpt[1], startpt[2]:
endpt[2]]
full_soma_mask = np.zeros(
(bimg.shape[0], bimg.shape[1], bimg.shape[2]))
# Put the detected somas into the whole image
# It is either true or false
full_soma_mask[macwe.startpoint[0]:macwe.endpoint[
0], macwe.startpoint[1]:macwe.endpoint[1], macwe.startpoint[2]:
macwe.endpoint[2]] = macwe._u
# The newlevelset is the initial soma volume from previous iteration
#(the automatic converge operation)
newlevelset = full_soma_mask[startpt[0]:endpt[0], startpt[1]:endpt[1],
startpt[2]:endpt[2]]
# The previous macwe class is released
# To avoid the conflicts with the new initialisation of the
# macwe class
del macwe
# Initialisation for the new class
macwe = MorphACWE(somaimg, startpt, endpt, smoothing, lambda1,
lambda2)
del somaimg, full_soma_mask, startpt, endpt
# Reuse the soma volume from previous iteration
macwe.set_levelset(newlevelset)
# Release memory to avoid conflicts with previous newlevelset
del newlevelset
macwe.autoconvg()
# The automatic smoothing operation to remove the interferes with
# dendrites
macwe.autosmooth()
# Initialise soma mask image
full_soma_mask = np.zeros(
(bimg.shape[0], bimg.shape[1], bimg.shape[2]))
# There are two possible scenarios
# The first scenrio is that the automatic box extension is not
# necessary
if macwe.enlrspt is None:
startpt = macwe.startpoint.copy()
endpt = macwe.endpoint.copy()
# The second scenrio is that the automatic box extension operations
# has been performed
else:
startpt = macwe.enlrspt.copy()
endpt = macwe.enlrept.copy()
startpt[0] = min(max(0, startpt[0]), bimg.shape[0])
startpt[1] = min(max(0, startpt[1]), bimg.shape[1])
startpt[2] = min(max(0, startpt[2]), bimg.shape[2])
endpt[0] = min(max(0, endpt[0]), bimg.shape[0])
endpt[1] = min(max(0, endpt[1]), bimg.shape[1])
endpt[2] = min(max(0, endpt[2]), bimg.shape[2])
# The soma mask image contains only two possible values
# Each element is either 0 or 40
# Value 40 is assigned for the visualisation purpose.
full_soma_mask[startpt[0]:endpt[0], startpt[1]:endpt[1], startpt[2]:endpt[
2]] = macwe._u > 0
# Calculate the new centroid using the soma volume
newsomapos = center_of_mass(full_soma_mask)
# Round the float coordinates into integers
newsomapos = [math.floor(p) for p in newsomapos]
self.centroid = newsomapos
self.radius = somaradius
self.mask = full_soma_mask
else:
if not silent:
print('Reconstructing Soma with Simple Mask')
self.centroid = somapos
self.radius = somaradius
self.simple_mask(bimg)
def pad(self, crop_region, original_shape):
xmin = crop_region[0, 0]
ymin = crop_region[1, 0]
zmin = crop_region[2, 0]
xmax = crop_region[0, 1]
ymax = crop_region[1, 1]
zmax = crop_region[2, 1]
self.mask = np.pad(self.mask, ((xmin, original_shape[0] - xmax),
(ymin, original_shape[1] - ymax),
(zmin, original_shape[2] - zmax)),
mode='constant',
constant_values=0)
def save(self, fname):
writetiff3d(fname, self.mask * 255)
class Fcycle(object):
def __init__(self, iterable):
"""Call functions from the iterable each time it is called."""
self.funcs = cycle(iterable)
def __call__(self, *args, **kwargs):
f = next(self.funcs)
return f(*args, **kwargs)
# SI and IS operators for 2D and 3D.
_P2 = [
np.eye(3), np.array([[0, 1, 0]] * 3), np.flipud(np.eye(3)),
np.rot90([[0, 1, 0]] * 3)
]
_P3 = [np.zeros((3, 3, 3)) for i in range(9)]
_P3[0][:, :, 1] = 1
_P3[1][:, 1, :] = 1
_P3[2][1, :, :] = 1
_P3[3][:, [0, 1, 2], [0, 1, 2]] = 1
_P3[4][:, [0, 1, 2], [2, 1, 0]] = 1
_P3[5][[0, 1, 2], :, [0, 1, 2]] = 1
_P3[6][[0, 1, 2], :, [2, 1, 0]] = 1
_P3[7][[0, 1, 2], [0, 1, 2], :] = 1
_P3[8][[0, 1, 2], [2, 1, 0], :] = 1
_aux = np.zeros((0))
def SI(u):
"""SI operator."""
# print('SI operator has been called')
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError(
"u has an invalid number of dimensions (should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P), ) + u.shape)
for i in range(len(P)):
_aux[i] = binary_erosion(u, P[i])
return _aux.max(0)
def circle_levelset(shape, center, sqradius, scalerow=1.0):
"""Build a binary function with a circle as the 0.5-levelset."""
grid = np.mgrid[list(map(slice, shape))].T - center
phi = sqradius - np.sqrt(np.sum((grid.T)**2, 0))
u = np.float_(phi > 0)
return u
def IS(u):
"""IS operator."""
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError(
"u has an invalid number of dimensions (should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P), ) + u.shape)
for i in range(len(P)):
_aux[i] = binary_dilation(u, P[i])
return _aux.min(0)
# SIoIS operator.
SIoIS = lambda u: SI(IS(u))
ISoSI = lambda u: IS(SI(u))
curvop = Fcycle([SIoIS, ISoSI])
# Stopping factors (function g(I) in the paper).
def gborders(img, alpha=1.0, sigma=1.0):
"""Stopping criterion for image borders."""
# The norm of the gradient.
gradnorm = gaussian_gradient_magnitude(img, sigma, mode='constant')
return 1.0 / np.sqrt(1.0 + alpha * gradnorm)
def glines(img, sigma=1.0):
"""Stopping criterion for image black lines."""
return gaussian_filter(img, sigma)
class MorphACWE(object):
"""Morphological ACWE based on the Chan-Vese energy functional."""
def __init__(self,
data,
startpoint,
endpoint,
imgshape,
smoothing=1,
lambda1=1,
lambda2=1.5):
"""Create a Morphological ACWE solver.
Parameters
----------
data : ndarray
The image data.
smoothing : scalar
The number of repetitions of the smoothing step (the
curv operator) in each iteration. In other terms,
this is the strength of the smoothing. This is the
parameter µ.
lambda1, lambda2 : scalars
Relative importance of the inside pixels (lambda1)
against the outside pixels (lambda2).
startpt, endpt : numpy int array
startpt is the initial starting point of the somatic region
endpt is the initial ending point of the somatic region
"""
self._u = None
self.smoothing = smoothing
self.lambda1 = lambda1
self.lambda2 = lambda2
self.imgshape = imgshape
self.data = data
self.startpoint = startpoint
self.endpoint = endpoint
self.enlrspt = None
self.enlrept = None
def set_levelset(self, u):
self._u = np.double(u)
self._u[u > 0] = 1
self._u[u <= 0] = 0
levelset = property(
lambda self: self._u,
set_levelset,
doc="The level set embedding function (u).")
def step(self):
"""Perform a single step of the morphological Chan-Vese evolution."""
# Assign attributes to local variables for convenience.
u = self._u
if u is None:
raise ValueError(
"the levelset function is not set (use set_levelset)")
data = self.data
# Determine c0 and c1.
inside = u > 0
outside = u <= 0
c0 = data[outside].sum() / float(outside.sum())
c1 = data[inside].sum() / float(inside.sum())
# Image attachment.
dres = np.array(np.gradient(u))
abs_dres = np.abs(dres).sum(0)
#aux = abs_dres * (c0 - c1) * (c0 + c1 - 2*data)
aux = abs_dres * (self.lambda1 * (data - c1)**2 - self.lambda2 *
(data - c0)**2)
res = np.copy(u)
res[aux < 0] = 1
res[aux > 0] = 0
res = IS(res)
# Smoothing.
for i in range(self.smoothing):
res = curvop(res)
self._u = res
def step_sm(self):
"""A smoothing step of the morphological Chan-Vese evolution."""
# Assign attributes to local variables for convenience.
u = self._u
if u is None:
raise ValueError(
"the levelset function is not set (use set_levelset)")
res = np.copy(u)
# Smoothing.
res = curvop(res)
self._u = res
def run(self, iterations):
"""Run several iterations of the morphological Chan-Vese method."""
for i in range(iterations):
self.step()
def autoconvg(self):
"""Soma detection converges by itself."""
# Autoconvg is the abbreviation of automatic convergence
iterations = 200
# The following vector is the number of foreground voxels
foreground_num = np.zeros(iterations)
# The following vector is initialised for storing forward difference
forward_diff_store = np.zeros(iterations)
# This is the initilization of automatic converge
for i in range(iterations):
self.step()
u = self._u
volu = sum(u[u > 0])
foreground_num[i] = volu
if i > 0:
# The variable diff_step is the current first order difference
diff_step = foreground_num[i] - foreground_num[i - 1]
forward_diff_store[i - 1] = diff_step
if i > 6:
# The variable cur_slider_diff is the sum of sliding window
# The size of sliding window is 6
cur_slider_diff = np.sum(forward_diff_store[i - 6:i - 1])
volu_thres = 0.05 * foreground_num[i]
convg_one = np.absolute(cur_slider_diff) < 20
convg_two = np.absolute(cur_slider_diff) < volu_thres
convg_criteria = np.logical_or(convg_one, convg_two)
if convg_criteria:
break
A = self._u > 0.5
slicevalarray = np.zeros(6)
# Front face along dimension 1
somaslice = A[0, :, :]
slicearray = np.sum(somaslice, axis=0)
sliceval = np.sum(slicearray, axis=0)
slicevalarray[0] = sliceval
# Back face along dimension 1
somaslice = A[A.shape[0] - 1, :, :]
slicearray = np.sum(somaslice, axis=0)
sliceval = np.sum(slicearray, axis=0)
slicevalarray[1] = sliceval
# Front face along dimension 2
somaslice = A[:, 0, :]
slicearray = np.sum(somaslice, axis=0)
sliceval = np.sum(slicearray, axis=0)
slicevalarray[2] = sliceval
# Back face along dimension 2
somaslice = A[:, A.shape[1] - 1, :]
slicearray = np.sum(somaslice, axis=0)
sliceval = np.sum(slicearray, axis=0)
slicevalarray[3] = sliceval
# Front face along dimension 3
somaslice = A[:, :, 0]
slicearray = np.sum(somaslice, axis=0)
sliceval = np.sum(slicearray, axis=0)
slicevalarray[4] = sliceval
# Back face along dimension 3
somaslice = A[:, :, A.shape[2] - 1]
slicearray = np.sum(somaslice, axis=0)
sliceval = np.sum(slicearray, axis=0)
slicevalarray[5] = sliceval
# The maxval is used to compare the threshold(100 mentioned later)
maxval = slicevalarray.max()
# The maxind is the index of slicevalarray.
# In addition, it determines which wall will be extended
maxind = slicevalarray.argmax()
# The size of binary data image
sz1 = self.data.shape[0]
# sz2 = self.data.shape[1]
# sz3 = self.data.shape[2]
# extend = enlrspt have value, not extend = (enlrspt=None)
# 100 : A threshold of the total number of somatic voxels on each wall
if (maxval > 100):
self.enlrspt = self.startpoint.copy()
self.enlrept = self.endpoint.copy()
# The following code determines the most possible wall(face)
# which requires the extension
if (maxind == 0):
self.enlrspt[0] = self.enlrspt[0] - (sz1 / 4)
elif (maxind == 1):
self.enlrept[0] = self.enlrept[0] + (sz1 / 4)
elif (maxind == 2):
self.enlrspt[1] = self.enlrspt[1] - (sz1 / 4)
elif (maxind == 3):
self.enlrept[1] = self.enlrept[1] + (sz1 / 4)
elif (maxind == 4):
self.enlrspt[2] = self.enlrspt[2] - (sz1 / 4)
elif (maxind == 5):
self.enlrept[2] = self.enlrept[2] + (sz1 / 4)
# To constrain new bounding box inside the image size
else:
self.enlrspt = None
self.enlrept = None
def autosmooth(self):
"""The automatic smoothing of soma volume to remove dendrites"""
# The autosmooth is the abbreviation of automatic smoothing
iterations = 20
# Calculate the initial volume
u = self._u
ini_vol = sum(u[u > 0])
# The smooth operation make
for i in range(iterations):
self.step_sm()
u = self._u
volu = sum(u[u > 0])
vol_pct = volu / ini_vol
# The criteria of the termination of soma growth
# The somatic volume underwent dramatic change
judge_one = vol_pct < 0.75
judge_two = vol_pct > 1.15
judge_criteria = np.logical_or(judge_one, judge_two)
if judge_criteria:
break
def evolve_visual(msnake, levelset=None, num_iters=20, background=None):
"""
Visual evolution of a morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
background : array-like, optional
If given, background will be shown behind the contours instead of
msnake.data.
"""
from matplotlib import pyplot as ppl
if levelset is not None:
msnake.levelset = levelset
# Prepare the visual environment.
fig = ppl.gcf()
fig.clf()
ax1 = fig.add_subplot(1, 2, 1)
if background is None:
ax1.imshow(msnake.data, cmap=ppl.cm.gray)
else:
ax1.imshow(background, cmap=ppl.cm.gray)
ax1.contour(msnake.levelset, [0.5], colors='r')
ax2 = fig.add_subplot(1, 2, 2)
ax_u = ax2.imshow(msnake.levelset)
ppl.pause(0.001)
# Iterate.
for i in range(num_iters):
# Evolve.
msnake.step()
# Update figure.
del ax1.collections[0]
ax1.contour(msnake.levelset, [0.5], colors='r')
ax_u.set_data(msnake.levelset)
fig.canvas.draw()
# ppl.pause(0.001)
# Return the last levelset.
return msnake.levelset
def evolve_visual3d(msnake, levelset=None, num_iters=20):
"""
Visual evolution of a three-dimensional morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
"""
from mayavi import mlab
# import matplotlib.pyplot as ppl
if levelset is not None:
msnake.levelset = levelset
fig = mlab.gcf()
mlab.clf()
src = mlab.pipeline.scalar_field(msnake.data)
mlab.pipeline.image_plane_widget(
src, plane_orientation='x_axes', colormap='gray')
cnt = mlab.contour3d(msnake.levelset, contours=[0.5])
@mlab.animate(ui=True)
def anim():
for i in range(num_iters):
msnake.step()
cnt.mlab_source.scalars = msnake.levelset
yield
anim()
mlab.show()
# Return the last levelset.
return msnake.levelset
| 24,107 | 34.040698 | 86 | py |
rivuletpy | rivuletpy-master/rivuletpy/__init__.py | 0 | 0 | 0 | py | |
rivuletpy | rivuletpy-master/rivuletpy/trace.py | import math
from tqdm import tqdm
import numpy as np
import skfmm
import msfm
from scipy.interpolate import RegularGridInterpolator
from scipy.ndimage.morphology import binary_dilation
from skimage.morphology import skeletonize_3d
from .soma import Soma
from .swc import SWC
class Tracer(object):
def __init__(self):
pass
def reset(self):
pass
def trace(self):
pass
class R2Tracer(Tracer):
def __init__(self, quality=False, silent=False, speed=False,
clean=False, non_stop=False, skeletonize=False):
self._quality = quality
self._bimg = None
self._dilated_bimg = None
self._bsum = 0 # For counting the covered foreground
self._bb = None # For making the erasing contour
self._t = None # Original timemap
self._tt = None # The copy of the timemap
self._grad = None
self._coverage = 0.
self._soma = None # soma
self._silent = silent # Disable all console outputs
# Tracing stops when 98% of the foreground has been covered
self._target_coverage = 0.98
self._cover_ctr_old = 0.
self._cover_ctr_new = 0.
# The type of speed image to use. Options are ['dt', 'ssm']
self._speed = speed
self._erase_ratio = 1.5
# Whether the unconnected branches will be discarded
self._clean = clean
# Whether to ignore the gap and online confidence stopping criteria
self._non_stop = non_stop
self.skeletonize = skeletonize
self._eps = 1e-5
def trace(self, img, threshold):
'''
The main entry for Rivulet2
'''
self.img = img
self._bimg = (img > threshold).astype('int') # Segment image
if not self._silent:
print('(1) -- Detecting Soma...', end='')
self._soma = Soma()
self._soma.detect(self._bimg, not self._quality, self._silent)
self._prep()
# Iterative Back Tracking with Erasing
if not self._silent:
print('(5) --Start Backtracking with {} ...'.format(
'non stop' if self._non_stop else 'standard stopping criteria'))
swc = self._iterative_backtrack()
if self._clean:
swc.prune()
return swc, self._soma
def _prep(self):
if self.skeletonize:
print('Skeletonize the binary image...')
self._bimg = skeletonize_3d(self._bimg)
self._bimg = binary_dilation(self._bimg, iterations=1)
self._nforeground = self._bimg.sum()
# Dilate bimg to make it less strict for the big gap criteria
# It is needed since sometimes the tracing goes along the
# boundary of the thin fibre in the binary img
self._dilated_bimg = binary_dilation(self._bimg)
if not self._silent:
print('(2) --Boundary DT...')
self._make_dt()
if not self._silent:
print('(3) --Fast Marching with %s quality...' %
('high' if self._quality else 'low'))
self._fast_marching()
if not self._silent:
print('(4) --Compute Gradients...')
self._make_grad()
# Make copy of the timemap
self._tt = self._t.copy()
self._tt[self._bimg <= 0] = -2
# Label all voxels of soma with -3
self._tt[self._soma.mask > 0] = -3
# For making a large tube to contain the last traced branch
self._bb = np.zeros(shape=self._tt.shape)
def _update_coverage(self):
self._cover_ctr_new = np.logical_and(
self._tt < 0, self._bimg > 0).sum()
self._coverage = self._cover_ctr_new / self._nforeground
if not self._silent:
self._pbar.update(self._cover_ctr_new - self._cover_ctr_old)
self._cover_ctr_old = self._cover_ctr_new
def _make_grad(self):
# Get the gradient of the Time-crossing map
dx, dy, dz = self._dist_gradient()
standard_grid = (np.arange(self._t.shape[0]), np.arange(self._t.shape[1]),
np.arange(self._t.shape[2]))
self._grad = (RegularGridInterpolator(standard_grid, dx),
RegularGridInterpolator(standard_grid, dy),
RegularGridInterpolator(standard_grid, dz))
def _make_dt(self):
'''
Make the distance transform according to the speed type
'''
if self._speed:
self._dt = self.img.astype(float) # The input image
self._dt /= self._dt.max()
else:
self._dt = skfmm.distance(self._bimg, dx=5e-2) # Boundary DT
def _fast_marching(self):
speed = self._make_speed()
# # Fast Marching
if self._quality:
# if not self._silent: print('--MSFM...')
self._t = msfm.run(speed, self._bimg.copy().astype(
'int64'), self._soma.centroid, True, True)
else:
# if not self._silent: print('--FM...')
marchmap = np.ones(self._bimg.shape)
marchmap[self._soma.centroid[0],
self._soma.centroid[1], self._soma.centroid[2]] = -1
self._t = skfmm.travel_time(marchmap, speed, dx=5e-3)
def _make_speed(self):
F = self._dt ** 4
F[F <= 0] = 1e-10
return F
def _dist_gradient(self):
fx = np.zeros(shape=self._t.shape)
fy = np.zeros(shape=self._t.shape)
fz = np.zeros(shape=self._t.shape)
J = np.zeros(shape=[s + 2 for s in self._t.shape]) # Padded Image
J[:, :, :] = self._t.max()
J[1:-1, 1:-1, 1:-1] = self._t
Ne = [[-1, -1, -1], [-1, -1, 0], [-1, -1, 1], [-1, 0, -1], [-1, 0, 0],
[-1, 0, 1], [-1, 1, -1], [-1, 1, 0], [-1, 1, 1], [0, -1, -1],
[0, -1, 0], [0, -1, 1], [0, 0, -1], [0, 0, 1], [0, 1, -1],
[0, 1, 0], [0, 1, 1], [1, -1, -1], [1, -1, 0], [1, -1, 1],
[1, 0, -1], [1, 0, 0], [1, 0, 1], [1, 1, -1], [1, 1, 0], [1, 1, 1]]
for n in Ne:
In = J[1 + n[0]:J.shape[0] - 1 + n[0],
1 + n[1]:J.shape[1] - 1 + n[1],
1 + n[2]:J.shape[2] - 1 + n[2]]
check = In < self._t
self._t[check] = In[check]
D = np.divide(n, np.linalg.norm(n))
fx[check] = D[0]
fy[check] = D[1]
fz[check] = D[2]
return -fx, -fy, -fz
def _step(self, branch):
# RK4 Walk for one step
p = rk4(branch.pts[-1], self._grad, self._t, 1)
branch.update(p, self._bimg, self._dilated_bimg)
def _erase(self, branch):
# Erase it from the timemap
for i in range(len(branch.pts)):
n = [math.floor(n) for n in branch.pts[i]]
r = 1 if branch.radius[i] < 1 else branch.radius[i]
# To make sure all the foreground voxels are included in bb
r = math.ceil(r * self._erase_ratio)
X, Y, Z = np.meshgrid(
constrain_range(n[0] - r, n[0] + r + 1, 0, self._tt.shape[0]),
constrain_range(n[1] - r, n[1] + r + 1, 0, self._tt.shape[1]),
constrain_range(n[2] - r, n[2] + r + 1, 0, self._tt.shape[2]))
self._bb[X, Y, Z] = 1
startidx, endidx = [math.floor(p) for p in branch.pts[0]], [
math.floor(p) for p in branch.pts[-1]]
if len(branch.pts) > 5 and self._t[endidx[0], endidx[1], endidx[2]] < self._t[
startidx[0], startidx[1], startidx[2]]:
erase_region = np.logical_and(
self._t[endidx[0], endidx[1], endidx[2]] <= self._t,
self._t <= self._t[startidx[0], startidx[1], startidx[2]])
erase_region = np.logical_and(self._bb, erase_region)
else:
erase_region = self._bb.astype('bool')
if np.count_nonzero(erase_region) > 0:
self._tt[erase_region] = -2 if branch.low_conf else -1
self._bb.fill(0)
def _iterative_backtrack(self):
# Initialise swc with the soma centroid
swc = SWC(self._soma)
soma_node = np.asarray([0, 1, self._soma.centroid[0],
self._soma.centroid[1],
self._soma.centroid[2],
self._soma.radius, -1, 1.])
swc.add(np.reshape(soma_node, (1, 8)))
if not self._silent:
self._pbar = tqdm(total=math.floor(self._nforeground * self._target_coverage))
# Loop for all branches
while self._coverage < self._target_coverage:
self._update_coverage()
# Find the geodesic furthest point on foreground time-crossing-map
srcpt = np.asarray(np.unravel_index(
self._tt.argmax(), self._tt.shape)).astype('float64')
branch = R2Branch()
branch.add(srcpt, 1., 1.)
# Erase the source point just in case
self._tt[math.floor(srcpt[0]), math.floor(
srcpt[1]), math.floor(srcpt[2])] = -2
keep = True
# Loop for 1 back-tracking iteration
while True:
self._step(branch)
head = branch.pts[-1]
tt_head = self._tt[math.floor(head[0]), math.floor(
head[1]), math.floor(head[2])]
# 1. Check out of bound
if not inbound(head, self._bimg.shape):
branch.slice(0, -1)
break
# 2. Check for the large gap criterion
if branch.gap > np.asarray(branch.radius).mean() * 8 and not self._non_stop:
break
else:
branch.reset_gap()
# 3. Check if Soma has been reached
if tt_head == -3:
keep = True if branch.branchlen > self._soma.radius * 3 else False
branch.reached_soma = True
break
# 4. Check if not moved for 15 iterations
if branch.is_stucked():
break
# 5. Check for low online confidence
if branch.low_conf and not self._non_stop:
keep = False
break
# 6. Check for branch merge
# Consider reaches previous explored area traced with branch
# Note: when the area was traced due to noise points
# (erased with -2), not considered as 'reached'
if tt_head == -1:
branch.touched = True
if swc.size() == 1:
break
matched, matched_idx = swc.match(head, branch.radius[-1])
if matched > 0:
branch.touch_idx = matched_idx
break
if branch.steps_after_reach > 200:
break
self._erase(branch)
# Add to SWC if it was decided to be kept
if keep:
pidx = None
if branch.reached_soma:
pidx = 0
elif branch.touch_idx >= 0:
pidx = branch.touch_idx
swc.add_branch(branch, pidx)
return swc
class Branch(object):
def __init__(self):
self.pts = []
self.radius = []
class R2Branch(Branch):
def __init__(self):
self.pts = []
self.conf = []
self.radius = []
self.steps_after_reach = 0
self.low_conf = False
self.touch_idx = -2
self.reached_soma = False
self.branchlen = 0
self.gap = 0
self.online_voxsum = 0
self.stepsz = 0
self.touched = False
self.ma_short = -1
self.ma_long = -1
self.ma_short_window = 4
self.ma_long_window = 10
self.in_valley = False
def add(self, pt, conf, radius):
self.pts.append(pt)
self.conf.append(conf)
self.radius.append(radius)
def is_stucked(self):
if self.stepsz == 0:
return True
if len(self.pts) > 15:
if np.linalg.norm(np.asarray(self.pts[-1]) - np.asarray(self.pts[-15])) < 1:
return True
else:
return False
else:
return False
def reset_gap(self):
self.gap = 0
def update(self, pt, bimg, dilated_bimg):
eps = 1e-5
head = self.pts[-1]
velocity = np.asarray(pt) - np.asarray(head)
self.stepsz = np.linalg.norm(velocity)
self.branchlen += self.stepsz
b = dilated_bimg[math.floor(pt[0]), math.floor(
pt[1]), math.floor(pt[2])]
if b > 0:
self.gap += self.stepsz
self.online_voxsum += b
oc = self.online_voxsum / (len(self.pts) + 1)
self.update_ma(oc)
# We are stepping in a valley
if (self.ma_short < self.ma_long - eps and
oc < 0.5 and not self.in_valley):
self.in_valley = True
# Cut at the valley
if self.in_valley and self.ma_short > self.ma_long:
valleyidx = np.asarray(self.conf).argmin()
# Only cut if the valley confidence is below 0.5
if self.conf[valleyidx] < 0.5:
self.slice(0, valleyidx)
self.low_conf = True
else:
in_valley = False
if oc <= 0.2:
self.low_conf = True
if self.touched:
self.steps_after_reach += 1
r = estimate_radius(pt, bimg)
self.add(pt, oc, r)
def update_ma(self, oc):
if len(self.pts) > self.ma_long_window:
if self.ma_short == -1:
self.ma_short = oc
else:
self.ma_short = exponential_moving_average(
oc, self.ma_short, self.ma_short_window
if len(self.pts) >= self.ma_short_window else len(self.pts))
if self.ma_long == -1:
self.ma_long = oc
else:
self.ma_long = exponential_moving_average(
oc, self.ma_long, self.ma_long_window
if len(self.pts) >= self.ma_long_window else len(self.pts))
def slice(self, start, end):
self.pts = self.pts[start: end]
self.radius = self.radius[start: end]
self.conf = self.conf[start: end]
def estimate_radius(pt, bimg):
r = 0
x = math.floor(pt[0])
y = math.floor(pt[1])
z = math.floor(pt[2])
while True:
r += 1
try:
if bimg[max(x - r, 0): min(x + r + 1, bimg.shape[0]),
max(y - r, 0): min(y + r + 1, bimg.shape[1]),
max(z - r, 0): min(z + r + 1, bimg.shape[2])].sum() / (2 * r + 1) ** 3 < .6:
break
except IndexError:
break
return r
def exponential_moving_average(p, ema, n):
'''
The exponential moving average (EMA) traditionally
used in analysing stock market.
EMA_{i+1} = (p * \alpha) + (EMA_{i} * (1 - \alpha))
where p is the new value; EMA_{i} is the last ema value;
n is the time period; \alpha=2/(1+n) is the smoothing factor.
---------------------------------------------
Parameters:
p: The new value in the sequence
ema: the last EMA value
n: The period window size
'''
alpha = 2 / (1 + n)
return p * alpha + ema * (1 - alpha)
def rk4(srcpt, ginterp, t, stepsize):
# Compute K1
k1 = np.asarray([g(srcpt)[0] for g in ginterp])
k1 *= stepsize / max(np.linalg.norm(k1), 1.)
tp = srcpt - 0.5 * k1 # Position of temporary point
if not inbound(tp, t.shape):
return srcpt
# Compute K2
k2 = np.asarray([g(tp)[0] for g in ginterp])
k2 *= stepsize / max(np.linalg.norm(k2), 1.)
tp = srcpt - 0.5 * k2 # Position of temporary point
if not inbound(tp, t.shape):
return srcpt
# Compute K3
k3 = np.asarray([g(tp)[0] for g in ginterp])
k3 *= stepsize / max(np.linalg.norm(k3), 1.)
tp = srcpt - k3 # Position of temporary point
if not inbound(tp, t.shape):
return srcpt
# Compute K4
k4 = np.asarray([g(tp)[0] for g in ginterp])
k4 *= stepsize / max(np.linalg.norm(k4), 1.)
return srcpt - (k1 + k2 * 2 + k3 * 2 + k4) / 6.0 # Compute final point
def inbound(pt, shape):
return all([True if 0 <= p <= s - 1 else False for p, s in zip(pt, shape)])
def constrain_range(min, max, minlimit, maxlimit):
return list(
range(min if min > minlimit else minlimit, max
if max < maxlimit else maxlimit))
| 16,754 | 33.054878 | 96 | py |
rivuletpy | rivuletpy-master/rivuletpy/swc.py | import math
import numpy as np
from .utils.io import saveswc
from collections import Counter
from random import gauss
from random import random
from random import randrange
from scipy.spatial.distance import cdist
class SWC(object):
def __init__(self, soma=None):
self._data = np.zeros((1, 8))
if soma:
self._data[0, :] = np.asarray(
[
0,
1,
soma.centroid[0],
soma.centroid[1],
soma.centroid[2],
soma.radius,
-1,
1,
]
)
def add(self, swc_nodes):
np.vstack((self._data, swc_nodes))
def add_branch(self, branch, pidx=None, random_color=True):
"""
Add a branch to swc.
Note: This swc is special with N X 8 shape. The 8-th column is the online confidence
"""
if random_color:
rand_node_type = randrange(6, 257)
new_branch = np.zeros((len(branch.pts), 8))
id_start = 1 if self._data.shape[0] == 1 else self._data[:, 0].max() + 1
for i in range(len(branch.pts)):
p, r, c = branch.pts[i], branch.radius[i], branch.conf[i]
id = id_start + i
# 3 for basal dendrite; 4 for apical dendrite;
# However now we cannot differentiate them automatically
nodetype = 3
if i == len(branch.pts) - 1: # The end of this branch
pid = self._data[pidx, 0] if pidx is not None else -2
if pid != -2 and pid != 0 and self._data.shape[0] != 1:
# Its connected node is fork point
self._data[self._data[:, 0] == pid, 1] = 5
else:
pid = id_start + i + 1
if i == 0:
nodetype = 6 # Endpoint
assert pid != id
new_branch[i] = np.asarray(
[
id,
rand_node_type if random_color else nodetype,
p[0],
p[1],
p[2],
r,
pid,
c,
]
)
# Check if any tail should be connected to its tail
tail = new_branch[0]
matched, minidx = self.match(tail[2:5], tail[5])
if matched and self._data[minidx, 6] == -2:
self._data[minidx, 6] = tail[0]
self._data = np.vstack((self._data, new_branch))
def _prune_leaves(self):
# Find all the leaves
childctr = Counter(self._data[:, 6])
leafidlist = [id for id in self._data[:, 0] if id not in self._data[:, 6]]
id2dump = []
rmean = self._data[:, 5].mean() # Mean radius
for leafid in leafidlist: # Iterate each leaf node
nodeid = leafid
branch = []
while True: # Get the leaf branch out
node = self._data[self._data[:, 0] == nodeid, :].flatten()
if node.size == 0:
break
branch.append(node)
parentid = node[6]
if childctr[parentid] != 1:
break # merged / unconnected
nodeid = parentid
# Get the length of the leaf
leaflen = sum(
[
np.linalg.norm(branch[i][2:5] - branch[i - 1][2:5])
for i in range(1, len(branch))
]
)
# Prune if the leave is too short or
# the confidence of the leave branch is too low
if leaflen <= 4 * rmean:
id2dump.extend([node[0] for node in branch])
# Only keep the swc nodes not in the dump id list
cutted = []
for nodeidx in range(self._data.shape[0]):
if self._data[nodeidx, 0] not in id2dump:
cutted.append(self._data[nodeidx, :])
cutted = np.squeeze(np.dstack(cutted)).T
self._data = cutted
def _prune_unreached(self):
"""
Only keep the largest connected component
"""
swcdict = {}
for n in self._data: # Hash all the swc nodes
swcdict[n[0]] = Node(n[0])
# Try to join all the unconnected branches at first
for i, n in enumerate(self._data):
if n[6] not in swcdict:
# Try to match it
matched, midx = self.match(n[2:5], n[5])
if matched:
self._data[i, 6] = self._data[midx, 0]
# Add mutual links for all nodes
for n in self._data:
id = n[0]
pid = n[6]
if pid >= 0:
swcdict[id].add_link(swcdict[pid])
groups = connected_components(set(swcdict.values()))
lenlist = [len(g) for g in groups]
maxidx = lenlist.index(max(lenlist))
set2keep = groups[maxidx]
id2keep = [n.id for n in set2keep]
self._data = self._data[np.in1d(self._data[:, 0], np.asarray(id2keep)), :]
def prune(self):
self._prune_unreached()
self._prune_leaves()
def reset(self, crop_region, zoom_factor):
"""
Pad and rescale swc back to the original space
"""
tswc = self._data.copy()
if zoom_factor != 1.0: # Pad the swc back to original space
tswc[:, 2:5] *= 1.0 / zoom_factor
# Pad the swc back
tswc[:, 2] += crop_region[0, 0]
tswc[:, 3] += crop_region[1, 0]
tswc[:, 4] += crop_region[2, 0]
self._data = tswc
def get_id(self, idx):
return self._data[idx, 0]
def match(self, pos, radius):
"""
Find the closest ground truth node
"""
nodes = self._data[:, 2:5]
distlist = np.squeeze(cdist(pos.reshape(1, 3), nodes))
if distlist.size == 0:
return False, -2
minidx = distlist.argmin()
minnode = self._data[minidx, 2:5]
# See if either of them can cover each other with a ball of their own
# radius
mindist = np.linalg.norm(pos - minnode)
return radius > mindist or self._data[minidx, 5] > mindist, minidx
def size(self):
return self._data.shape[0]
def save(self, fname):
saveswc(fname, self._data)
def get_array(self):
return self._data[:, :7]
def view(self):
from rivuletpy.utils.rendering3 import Viewer3, Line3
# Compute the center of mass
center = self._data[:, 2:5].mean(axis=0)
translated = self._data[:, 2:5] - np.tile(center, (self._data.shape[0], 1))
# Init viewer
viewer = Viewer3(800, 800, 800)
viewer.set_bounds(
self._data[:, 2].min(),
self._data[:, 2].max(),
self._data[:, 3].min(),
self._data[:, 3].max(),
self._data[:, 4].min(),
self._data[:, 4].max(),
)
lid = self._data[:, 0]
line_color = [random(), random(), random()]
for i in range(self._data.shape[0]):
# Change color if its a bifurcation
if (self._data[i, 0] == self._data[:, -1]).sum() > 1:
line_color = [random(), random(), random()]
# Draw a line between this node and its parent
if (
i < self._data.shape[0] - 1
and self._data[i, 0] == self._data[i + 1, -1]
):
l = Line3(translated[i, :], translated[i + 1, :])
l.set_color(*line_color)
viewer.add_geom(l)
else:
pid = self._data[i, -1]
pidx = np.argwhere(pid == lid).flatten()
if len(pidx) == 1:
l = Line3(translated[i, :], translated[pidx, :].flatten())
l.set_color(*line_color)
viewer.add_geom(l)
while True:
try:
viewer.render(return_rgb_array=False)
except KeyboardInterrupt:
break
def push_nodes_with_binary(self, b, step_ratio=0.1, niter=0):
"""
Push the nodes towards the center with the binary image boundaries
"""
lid = list(self._data[:, 0])
lpid = list(self._data[:, -2])
t_data = self._data.copy()
children_idx = {
pid: [i for i, p in enumerate(lpid) if p == t_data[i, 0]] for pid in lpid
}
for _ in range(niter):
for i in range(t_data.shape[0]):
pid, radius, (x, y, z) = (
int(t_data[i, -2]),
t_data[i, -3],
t_data[i, 2:5],
)
cidx = children_idx[pid]
if pid != i and pid in lid and len(cidx) <= 1:
px, py, pz = t_data[t_data[:, 0] == pid, 2:5][0]
vnorm = norm_vec(np.asarray([x - px, y - py, z - pz]))
if len(cidx) == 1:
cx, cy, cz = t_data[cidx[0], 2:5]
vnorm = (
vnorm + norm_vec(np.asarray([cx - x, cy - y, cz - z]))
) / 2
if all([v == 0 for v in vnorm]):
continue
pt = np.asarray([x, y, z])
p_vectors = get_perpendicular_vectors(pt, vnorm)
p_distances = [
get_distance_to_boundary(pt, pvec, b) for pvec in p_vectors
]
dx, dy, dz = np.sum(
[pv * pd for pv, pd in zip(p_vectors, p_distances)], 0
)
# Constrain the displacement by the nodo radii
tx = x + dx * step_ratio
ty = y + dy * step_ratio
tz = z + dz * step_ratio
dist = (
(tx - self._data[i, 2]) ** 2
+ (ty - self._data[i, 3]) ** 2
+ (tz - self._data[i, 4]) ** 2
) ** 0.5
if dist <= radius / 2:
t_data[i, 2] = tx
t_data[i, 3] = ty
t_data[i, 4] = tz
else:
pass
self._data = t_data
def get_distance_to_boundary(pt, vec, b):
temp_pt = pt.copy()
while True:
next_pt = temp_pt + vec
if (
b[math.floor(next_pt[0]), math.floor(next_pt[1]), math.floor(next_pt[2])]
<= 0
):
return ((temp_pt - pt) ** 2).sum() ** 0.5
else:
temp_pt = next_pt
def norm_vec(vec):
norm = (vec**2).sum() ** 0.5
return vec / norm
def get_perpendicular_vectors(pt, vec):
v1 = perpendicular_vector(vec)
v2 = -v1
v3 = perpendicular_vector(vec, v1)
v4 = -v3
return v1, v2, v3, v4
def make_rand_vector3d():
vec = [gauss(0, 1) for i in range(3)]
mag = sum(x**2 for x in vec) ** 0.5
return [x / mag for x in vec]
def perpendicular_vector(v, vr=None):
return np.cross(v, make_rand_vector3d() if vr is None else vr)
def get_subtree_nodeids(swc, node):
subtreeids = np.array([])
# Find children
chidx = np.argwhere(node[0] == swc[:, 6])
# Recursion stops when there this node is a
# leaf with no children, return itself
if chidx.size == 0:
return node[0]
else:
# Get the node ids of each children
for c in chidx:
subids = get_subtree_nodeids(swc, swc[c, :].squeeze())
subtreeids = np.hstack((subtreeids, subids, node[0]))
return subtreeids
class Node(object):
def __init__(self, id):
self.__id = id
self.__links = set()
@property
def id(self):
return self.__id
@property
def links(self):
return set(self.__links)
def add_link(self, other):
self.__links.add(other)
other.__links.add(self)
def connected_components(nodes):
"""
The function to look for connected components.
Reference: https://breakingcode.wordpress.com/2013/04/08/finding-connected-components-in-a-graph/
"""
# List of connected components found. The order is random.
result = []
# Make a copy of the set, so we can modify it.
nodes = set(nodes)
# Iterate while we still have nodes to process.
while nodes:
# Get a random node and remove it from the global set.
n = nodes.pop()
# This set will contain the next group of nodes
# connected to each other.
group = {n}
# Build a queue with this node in it.
queue = [n]
# Iterate the queue.
# When it's empty, we finished visiting a group of connected nodes.
while queue:
# Consume the next item from the queue.
n = queue.pop(0)
# Fetch the neighbors.
neighbors = n.links
# Remove the neighbors we already visited.
neighbors.difference_update(group)
# Remove the remaining nodes from the global set.
nodes.difference_update(neighbors)
# Add them to the group of connected nodes.
group.update(neighbors)
# Add them to the queue, so we visit them in the next iterations.
queue.extend(neighbors)
# Add the group to the list of groups.
result.append(group)
# Return the list of groups.
return result
| 13,630 | 30.407834 | 101 | py |
rivuletpy | rivuletpy-master/rivuletpy/utils/rendering3.py | import os
import numpy as np
import math
# from gym.envs.classic_control.rendering import *
from .rendering import *
from PIL import Image # PIL library is required
import pyglet
from pyglet.gl import glu
from .io import *
# colors
black = (0, 0, 0, 1)
gray = (0.5, 0.5, 0.5)
red = (1, 0, 0)
def _add_attrs(geom, attrs):
if "color" in attrs:
geom.set_color(*attrs["color"])
if "linewidth" in attrs:
geom.set_linewidth(attrs["linewidth"])
class Cylinder3(Geom):
def __init__(self, centre=(0.0, 0.0, 0.0), radius=2, face=(1,0,0)):
Geom.__init__(self)
self._centre = centre
self._radius = radius
self._face = face/np.linalg.norm(face)
def render1(self):
# http://stackoverflow.com/questions/6992541/opengl-rotation-in-given-direction
glPushMatrix()
glTranslatef(*self._centre)
# glRotatef(-90, 0, 1, 0) # Rotate to face (1,0,0)
# print('== Rotate to face', self._face)
# glRotatef(-np.arcsin(self._face[2]) * 180 / np.pi, 0, 1, 0)
# glRotatef(np.arctan2(self._face[1], self._face[0]) * 180 / np.pi, 0, 0, 1)
T = np.array((0., 0., 1.)) # TODO: Need to be face vector
Y = np.array((0., 1., 0.)) # TODO: need to be up vector
U = (T - np.dot(T, Y))
U /= np.linalg.norm(U)
L = np.cross(U, T)
M = np.zeros(shape=(4, 4))
M[0:3, 0] = L
M[0:3, 1] = U
M[0:3, 2] = T
M[0:3, 3] = np.array(self._centre)
M[-1, -1] = 1
print('M:', M)
M = M.flatten('F')
M = (GLfloat*len(M))(*M)
# M = glGetFloatv(GL_MODELVIEW_MATRIX, M)
glMultMatrixf(M)
gluCylinder(gluNewQuadric(), self._radius, 0, 4*self._radius, 100, 100)
glPopMatrix()
class Ball3(Geom):
def __init__(self, centre=(0.0, 0.0, 0.0), radius=2):
Geom.__init__(self)
self._centre = centre
self._radius = radius
def render1(self):
glPushMatrix()
glTranslatef(*self._centre) # translate to GL loc ppint
gluSphere(gluNewQuadric(), self._radius, 100, 100)
glPopMatrix()
class Line3(Geom):
def __init__(self, start=(0.0, 0.0, 0.0), end=(0.0, 0.0, 0.0)):
Geom.__init__(self)
self.start = start
self.end = end
self.linewidth = LineWidth(1)
self.add_attr(self.linewidth)
def render1(self):
# pyglet.graphics.draw(2, gl.GL_LINES, ('v3f', self.start + self.end))
glBegin(GL_LINES)
glVertex3f(*self.start)
glVertex3f(*self.end)
glEnd()
def set_line_width(self, x):
self.linewidth.stroke = x
class Transform3(Transform):
def __init__(self, translation=(0.0, 0.0, 0.0), rotation=(0.0, 0.0, 0.0), scale=(1, 1, 1)):
self.set_translation(*translation)
self.set_rotation(*rotation)
self.set_scale(*scale)
def enable(self):
glPushMatrix()
glLoadIdentity()
glTranslatef(*self.translation) # translate to GL loc ppint
glRotatef(self.rotation[0], 1, 0, 0)
glRotatef(self.rotation[1], 0, 1, 0)
glRotatef(self.rotation[2], 0, 0, 1)
glScalef(*self.scale)
def set_translation(self, newx, newy, newz):
self.translation = (float(newx), float(newy), float(newz))
def set_rotation(self, newx, newy, newz):
self.rotation = (float(newx), float(newy), float(newz))
def set_scale(self, newx, newy, newz):
self.scale = (float(newx), float(newy), float(newz))
class Viewer3(Viewer):
def __init__(self, width, height, depth, display=None):
super(Viewer3, self).__init__(width, height, display)
self.depth = depth
self.transform = Transform3()
@self.window.event
def on_resize(width, height):
# sets the viewport
gl.glViewport(0, 0, width, height)
# sets the projection
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
glu.gluPerspective(90, width / float(height), 0.1, 2*self.depth)
# sets the model view
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
return pyglet.event.EVENT_HANDLED
@self.window.event
def on_mouse_scroll(x, y, scroll_x, scroll_y):
# scroll the MOUSE WHEEL to zoom
self.transform.set_translation(self.transform.translation[0],
self.transform.translation[1],
self.transform.translation[2] - scroll_y *20)
@self.window.event
def on_mouse_drag(x, y, dx, dy, button, modifiers):
# press the LEFT MOUSE BUTTON to rotate
if button == pyglet.window.mouse.LEFT:
self.transform.set_rotation(self.transform.rotation[0] - dy / 5.0,
self.transform.rotation[1] + dx / 5.0, self.transform.rotation[2])
# press the LEFT and RIGHT MOUSE BUTTON simultaneously to pan
if button == pyglet.window.mouse.LEFT | pyglet.window.mouse.RIGHT:
self.transform.set_translation(self.transform.translation[0] + dx/2,
self.transform.translation[1] + dy/2, self.transform.translation[2])
def set_bounds(self, left, right, bottom, top, towards, away):
assert right > left and top > bottom and away > towards
scalex = self.width / (right-left)
scaley = self.height / (top-bottom)
scalez = self.depth / (away-towards)
self.transform = Transform3(
translation=(0, 0, -1.5*away))
def draw_line(self, start, end, **attrs):
geom = Line3(start, end)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def render(self, return_rgb_array=False):
glClear(GL_COLOR_BUFFER_BIT);
self.window.clear()
self.window.switch_to()
self.window.dispatch_events()
# glLoadIdentity()
self.transform.enable()
for geom in self.geoms:
geom.render()
for geom in self.onetime_geoms:
geom.render()
self.transform.disable()
arr = None
if return_rgb_array:
image_data = pyglet.image.get_buffer_manager().get_color_buffer().get_image_data()
arr = np.fromstring(image_data.data, dtype=np.uint8, sep='')
arr = arr.reshape(self.height, self.width, 4)
arr = arr[::-1,:,0:3]
self.window.flip()
self.onetime_geoms = []
return arr
# super(Viewer3, self).render(return_rgb_array)
| 6,644 | 33.252577 | 95 | py |
rivuletpy | rivuletpy-master/rivuletpy/utils/metrics.py | from collections import deque
import numpy as np
from scipy.spatial.distance import cdist
def precision_recall(swc1, swc2, dist1=4, dist2=4):
'''
Calculate the precision, recall and F1 score between swc1 and swc2 (ground truth)
It generates a new swc file with node types indicating the agreement between two input swc files
In the output swc file: node type - 1. the node is in both swc1 agree with swc2
- 2. the node is in swc1, not in swc2 (over-traced)
- 3. the node is in swc2, not in swc1 (under-traced)
target: The swc from the tracing method
gt: The swc from the ground truth
dist1: The distance to consider for precision
dist2: The distance to consider for recall
'''
TPCOLOUR, FPCOLOUR, FNCOLOUR = 3, 2, 180 # COLOUR is the SWC node type defined for visualising in V3D
d = cdist(swc1[:, 2:5], swc2[:, 2:5])
mindist1 = d.min(axis=1)
tp = (mindist1 < dist1).sum()
fp = swc1.shape[0] - tp
mindist2 = d.min(axis=0)
fn = (mindist2 > dist2).sum()
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1 = 2 * precision * recall / (precision + recall)
# Make the swc for visual comparison
swc1[mindist1 <= dist1, 1] = TPCOLOUR
swc1[mindist1 > dist1, 1] = FPCOLOUR
swc2_fn = swc2[mindist2 > dist2, :]
swc2_fn[:, 0] = swc2_fn[:, 0] + 100000
swc2_fn[:, -1] = swc2_fn[:, -1] + 100000
swc2_fn[:, 1] = FNCOLOUR
swc_compare = np.vstack((swc1, swc2_fn))
swc_compare[:, -2] = 1
# Compute the SD, SSD, SSD% defined in Peng.et.al 2010
SD = (np.mean(mindist1) + np.mean(mindist2)) / 2
far1, far2 = mindist1[mindist1 > dist1], mindist2[mindist2 > dist2]
SSD = (np.mean(far1) + np.mean(far2)) / 2
pSSD = (len(far1) / len(mindist1) + len(far2) / len(mindist2)) / 2
return (precision, recall, f1), (SD, SSD, pSSD), swc_compare
def upsample_swc(swc):
tswc = swc.copy()
id_idx = {}
# Build a nodeid->idx hash table
for nodeidx in range(tswc.shape[0]):
id_idx[tswc[nodeidx, 0]] = nodeidx
newid = tswc[:,0].max() + 1
newnodes = []
for nodeidx in range(tswc.shape[0]):
pid = tswc[nodeidx, -1] # parent id
if pid not in id_idx:
# raise Exception('Parent with id %d not found' % pid)
continue
nodepos = tswc[nodeidx, 2:5]
parentpos = tswc[id_idx[pid], 2:5]
if np.linalg.norm(nodepos - parentpos) > 1.: # Add a node in the middle if too far
mid_pos = nodepos + 0.5 * (parentpos - nodepos)
newnodes.append( np.asarray([newid, 2, mid_pos[0], mid_pos[1], mid_pos[2], 1, pid]) )
newid += 1
tswc[nodeidx, -1] = newid
# Stack the new nodes to the end of the swc file
newnodes = np.vstack(newnodes)
tswc = np.vstack((tswc, newnodes))
return tswc
def gaussian_distance(swc1, swc2, sigma=2.):
'''
The geometric metrics of NetMets. The gaussian distances between the closest neighbours
returns : (M1, M2) where M1 is the gaussian distances from the nodes in swc1 to their closest neighbour in swc2;
vise versa for M2
D. Mayerich, C. Bjornsson, J. Taylor, and B. Roysam,
“NetMets: software for quantifying and visualizing errors in biological network segmentation.,”
BMC Bioinformatics, vol. 13 Suppl 8, no. Suppl 8, p. S7, 2012.
'''
swc1 = upsample_swc(swc1)
swc2 = upsample_swc(swc2)
d = cdist(swc1[:, 2:5], swc2[:, 2:5]) # Pairwise distances between 2 swc files
mindist1 = d.min(axis=1)
M1 = 1 - np.exp(mindist1 ** 2 / (2 * sigma ** 2))
mindist2 = d.min(axis=0)
M2 = 1 - np.exp(mindist2 ** 2 / (2 * sigma ** 2))
return M1, M2
def connectivity_distance(swc1, swc2, sigma=2., ignore_leaf=True):
'''
The connectivity metrics of NetMets.
Returns (midx1, midx2): the indices of nodes in each swc that have connection errors
D. Mayerich, C. Bjornsson, J. Taylor, and B. Roysam,
“NetMets: software for quantifying and visualizing errors in biological network segmentation.,”
BMC Bioinformatics, vol. 13 Suppl 8, no. Suppl 8, p. S7, 2012.
'''
# graph Initialisation
d = cdist(swc1[:, 2:5], swc2[:, 2:5]) # Pairwise distances between 2 swc files
mindist1, mindist2 = d.min(axis=1), d.min(axis=0)
minidx1, minidx2 = d.argmin(axis=1), d.argmin(axis=0)
# Colour nodes - matched nodes have the same colour
cnodes1, cnodes2 = {}, {}# Coloured Nodes <id, colour>
for i in range(swc1.shape[0]):
if mindist1[i] < sigma:
cnodes1[swc1[i, 0]] = i
cnodes2[swc2[minidx1[i], 0]] = i
# Build Initial graphs, Edge: <id_i, id_j>: 1
g1 = build_graph_from_swc(swc1)
g2 = build_graph_from_swc(swc2)
# BFS to build the core graph for both swc, returns the remaining edges not used to build the core graph
dg1 = build_core_graph(g1, cnodes1)
dg2 = build_core_graph(g2, cnodes2)
# Find the diff edges with coloured nodes involved
mid1 = set()
for id in dg1:
for nid in g1[id]:
if nid in cnodes1: mid1.add(nid)
mid2 = set()
for id in dg2:
for nid in g2[id]:
if nid in cnodes2: mid2.add(nid)
id_idx_hash1 = {}
for i in range(swc1.shape[0]): id_idx_hash1[swc1[i, 0]] = i
id_idx_hash2 = {}
for i in range(swc2.shape[0]): id_idx_hash2[swc2[i, 0]] = i
midx1 = [ int(id_idx_hash1[id]) for id in mid1 ] # Mistake coloured nodes in edges of dg1
midx2 = [ int(id_idx_hash2[id]) for id in mid2 ] # Mistake coloured nodes in edges of dg2
# Filter out the midx of nodes on leaf segments
if ignore_leaf:
leafidx1 = find_leaf_idx(swc1)
midx1 = set(midx1) - set(leafidx1)
leafidx2 = find_leaf_idx(swc2)
midx2 = set(midx2) - set(leafidx2)
return len(midx1) / len(mid1), len(midx2) / len(mid2)
def find_leaf_idx(swc):
# The degree of a node is the number of children + 1 except the root
degree = np.zeros(swc.shape[0])
for i in range(swc.shape[0]):
degree[i] = np.count_nonzero(swc[:, -1] == swc[i, 0]) + 1
# A node is a leaf node if it is parent to no other node
leaf_segment_idx = []
leaf_node_idx = np.where(degree == 1)[0]
for idx in leaf_node_idx:
# Add its parent to the leaf segment idx list if its parent degree < 3
nodeidx = idx
while degree[nodeidx] < 3:
leaf_segment_idx.append(int(nodeidx))
if swc[nodeidx, -1] < 0:
break
nodeidx = np.where(swc[:, 0] == swc[nodeidx, -1])[0]
return leaf_segment_idx
def build_graph_from_swc(swc):
g = {}
for i in range(swc.shape[0]):
id, pid = swc[i, 0], swc[i, -1]
if id in g:
g[id].append(pid)
else:
g[id] = [pid]
if pid in g:
g[pid].append(id)
else:
g[pid] = [id]
for key, value in g.items():
g[key] = set(value)
return g
def build_core_graph(g, cnodes):
'''
Returns the edges not used in building the core graph (topologically matched between two graphs)
'''
cnodes = cnodes.copy() # Coloured node list to mark which have not been discovered
dg = g.copy()
while cnodes:
root = next(iter(cnodes))
core_neighbours = find_core_neighbours_bfs(dg, root, cnodes) # BFS to discover the neighbour
nodes_on_path = set()
if core_neighbours:
for id in core_neighbours:
nodes_on_path = nodes_on_path.union(track_path_nodes_dijstra(dg, id, root))
else:
nodes_on_path.add(root)
cnodes.pop(root) # Remove the discovered coloured nodes
for n in nodes_on_path:
dg.pop(n, None)
for n in dg:
dg[n] = dg[n].difference(nodes_on_path)
return dg
def find_core_neighbours_bfs(g, root, cnodes):
'''
Find the coloured neighbours of root node with BFS search
'''
visited = {}
node_queue = deque()
visited[root] = True
node_queue.append(root)
core_neighbours = []
while node_queue:
r = node_queue.popleft()
if r in cnodes and r != root:
core_neighbours.append(r) # If this node is coloured, bfs stops on it and add it to the core neighbours
else:
for n in g[r]: # visit all the neighbours of r
if n not in visited:
visited[n] = True
node_queue.append(n)
return core_neighbours
def track_path_nodes_dijstra(g, target, source):
path = {}
visited = {source: 0}
nodes = g.copy()
while nodes:
min_node = None
for node in nodes:
if node in visited:
if min_node is None:
min_node = node
elif visited[node] < visited[min_node]:
min_node = node
if min_node is None:
break
nodes.pop(min_node)
tweight = visited[min_node]
for n in g[min_node]:
weight = tweight + 1
if n not in visited or weight < visited[n]:
visited[n] = weight
path[n] = min_node
if min_node == target:
break
nodes_on_path, n = set(), target
while n != source:
n = path[n]
nodes_on_path.add(n)
return nodes_on_path
| 9,515 | 31.367347 | 116 | py |
rivuletpy | rivuletpy-master/rivuletpy/utils/__init__.py | 0 | 0 | 0 | py | |
rivuletpy | rivuletpy-master/rivuletpy/utils/io.py | import os
import numpy as np
from scipy import io as sio
import SimpleITK as sitk
def loadimg(file, target_resolution):
if file.endswith('.mat'):
filecont = sio.loadmat(file)
img = filecont['img']
for z in range(img.shape[-1]): # Flip the image upside down
img[:, :, z] = np.flipud(img[:, :, z])
img = np.swapaxes(img, 0, 1)
elif file.endswith('.tif'):
img = loadtiff3d(file)
elif file.endswith('.mhd'):
from scipy.ndimage.interpolation import zoom
mhd = sitk.ReadImage(file)
img = sitk.GetArrayFromImage(mhd)
# Resample the image to isotropic resolution
print('Resample Image to isotropic resolution 1mmx1mmx1mm')
sx, sy, sz = mhd.GetSpacing()
img = zoom(img, (sz / target_resolution,
sy / target_resolution,
sx / target_resolution), order=0)
img = np.transpose(img, (2, 1, 0))
elif file.endswith('.nii') or file.endswith('.nii.gz'):
import nibabel as nib
img = nib.load(file)
img = img.get_data()
else:
raise IOError("The extension of " + file +
'is not supported. File extension supported are: *.tif, *.mat, *.nii')
return img
def loadtiff3d(filepath):
"""Load a tiff file into 3D numpy array"""
from libtiff import TIFF
tiff = TIFF.open(filepath, mode='r')
stack = []
for sample in tiff.iter_images():
stack.append(np.rot90(np.fliplr(np.flipud(sample))))
out = np.dstack(stack)
tiff.close()
return out
def writetiff3d(filepath, block):
from libtiff import TIFF
try:
os.remove(filepath)
except OSError:
pass
tiff = TIFF.open(filepath, mode='w')
block = np.swapaxes(block, 0, 1)
for z in range(block.shape[2]):
tiff.write_image(np.flipud(block[:, :, z]), compression=None)
tiff.close()
def loadswc(filepath):
'''
Load swc file as a N X 7 numpy array
'''
swc = []
with open(filepath) as f:
lines = f.read().split("\n")
for l in lines:
if not l.startswith('#'):
cells = l.split(' ')
if len(cells) == 7:
cells = [float(c) for c in cells]
# cells[2:5] = [c-1 for c in cells[2:5]]
swc.append(cells)
return np.array(swc)
def saveswc(filepath, swc):
if swc.shape[1] > 7:
swc = swc[:, :7]
with open(filepath, 'w') as f:
for i in range(swc.shape[0]):
print('%d %d %.3f %.3f %.3f %.3f %d' %
tuple(swc[i, :].tolist()), file=f)
def crop(img, thr):
"""Crop a 3D block with value > thr"""
ind = np.argwhere(img > thr)
x = ind[:, 0]
y = ind[:, 1]
z = ind[:, 2]
xmin = max(x.min() - 10, 0)
xmax = min(x.max() + 10, img.shape[0])
ymin = max(y.min() - 10, 1)
ymax = min(y.max() + 10, img.shape[1])
zmin = max(z.min() - 10, 2)
zmax = min(z.max() + 10, img.shape[2])
return img[xmin:xmax, ymin:ymax, zmin:zmax], np.array(
[[xmin, xmax], [ymin, ymax], [zmin, zmax]])
def world2ras(voxpos):
'''Get the vox2ras-tkr transform. Inspired
by get_vox2ras_tkr in
https://discourse.slicer.org/t/building-the-ijk-to-ras-transform-from-a-nrrd-file/1513
'''
x, y, z = voxpos
lps_to_ras = np.diag([-1, -1, 1, 1])
p = lps_to_ras.dot(np.asarray([x, y, z, 1]).T)
return np.squeeze(p)[:3]
def swc2world(swc, mhd, spacing, slicer=False):
# First transfer the image coordinates to the original image size
sp = mhd.GetSpacing()
swc[:, 2] *= spacing[0] / sp[0]
swc[:, 3] *= spacing[1] / sp[1]
swc[:, 4] *= spacing[2] / sp[2]
# USe SimpleITK to transform back to physical coordinates
for i in range(swc.shape[0]):
swc[i, 2:5] = mhd.TransformContinuousIndexToPhysicalPoint(swc[i, 2:5])
swc[i, 5] *= spacing[0]
# Transform to RAS spacing coordinates that can be rendered in 3D Slicer if requested
if slicer:
print('Converting the vtk coordinates to RAS space')
for i in range(swc.shape[0]):
swc[i, 2:5] = world2ras(swc[i, 2:5])
return swc
def swc2vtk(swc, outvtkpath):
swc_arr = swc.get_array()
nnode = swc_arr.shape[0]
vtkstr = '# vtk DataFile Version 2.0\n'
vtkstr += 'Generated with Rivuletpy\n'
vtkstr += 'ASCII\n'
vtkstr += 'DATASET POLYDATA\n'
vtkstr += 'POINTS {} float\n'.format(nnode)
id2vtkidx = {}
for i in range(nnode):
vtkstr += '{} {} {}\n'.format(swc_arr[i, 2],
swc_arr[i, 3],
swc_arr[i, 4])
id2vtkidx[int(swc_arr[i, 0])] = i
linectr = 0
vtklinestr = ''
for i in range(nnode):
id, pid = swc_arr[i, 0], swc_arr[i, -1]
if pid >= 0 and int(pid) in id2vtkidx:
linectr += 1
vtklinestr += '{} {} {}\n'.format(2,
id2vtkidx[int(id)],
id2vtkidx[int(pid)])
vtkstr += 'LINES {} {}\n'.format(linectr, linectr * 3)
vtkstr += vtklinestr
vtkstr += "POINT_DATA {}\n".format(nnode)
vtkstr += "SCALARS contourArray double\n"
vtkstr += "LOOKUP_TABLE default\n"
for i in range(nnode):
vtkstr += '{}\n'.format(swc_arr[i, -2])
vtkstr += "SCALARS indicatorArray char\n"
vtkstr += "LOOKUP_TABLE default\n"
for i in range(nnode):
vtkstr += '0\n'
with open(outvtkpath, 'w') as f:
f.write(vtkstr)
| 5,620 | 29.22043 | 92 | py |
rivuletpy | rivuletpy-master/rivuletpy/utils/rendering.py | """
2D rendering framework
"""
from __future__ import division
import os
import six
import sys
import pyglet
from pyglet.gl import *
import math
import numpy as np
RAD2DEG = 57.29577951308232
def get_display(spec):
"""Convert a display specification (such as :0) into an actual Display
object.
Pyglet only supports multiple Displays on Linux.
"""
if spec is None:
return None
elif isinstance(spec, six.string_types):
return pyglet.canvas.Display(spec)
else:
raise error.Error(
'Invalid display specification: {}. (Must be a string like :0 or None.)'.format(spec))
class Viewer(object):
def __init__(self, width, height, display=None):
display = get_display(display)
self.width = width
self.height = height
self.window = pyglet.window.Window(
width=width, height=height, display=display)
self.window.on_close = self.window_closed_by_user
self.geoms = []
self.onetime_geoms = []
self.transform = Transform()
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
def close(self):
self.window.close()
def window_closed_by_user(self):
self.close()
def set_bounds(self, left, right, bottom, top):
assert right > left and top > bottom
scalex = self.width/(right-left)
scaley = self.height/(top-bottom)
self.transform = Transform(
translation=(-left*scalex, -bottom*scalex),
scale=(scalex, scaley))
def add_geom(self, geom):
self.geoms.append(geom)
def add_onetime(self, geom):
self.onetime_geoms.append(geom)
def render(self, return_rgb_array=False):
glClearColor(1, 1, 1, 1)
self.window.clear()
self.window.switch_to()
self.window.dispatch_events()
self.transform.enable()
for geom in self.geoms:
geom.render()
for geom in self.onetime_geoms:
geom.render()
self.transform.disable()
arr = None
if return_rgb_array:
image_data = pyglet.image.get_buffer_manager().get_color_buffer().get_image_data()
arr = np.fromstring(image_data.data, dtype=np.uint8, sep='')
arr = arr.reshape(self.height, self.width, 4)
arr = arr[::-1, :, 0:3]
self.window.flip()
self.onetime_geoms = []
return arr
# Convenience
def draw_circle(self, radius=10, res=30, filled=True, **attrs):
geom = make_circle(radius=radius, res=res, filled=filled)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def draw_polygon(self, v, filled=True, **attrs):
geom = make_polygon(v=v, filled=filled)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def draw_polyline(self, v, **attrs):
geom = make_polyline(v=v)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def draw_line(self, start, end, **attrs):
geom = Line(start, end)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def get_array(self):
self.window.flip()
image_data = pyglet.image.get_buffer_manager().get_color_buffer().get_image_data()
self.window.flip()
arr = np.fromstring(image_data.data, dtype=np.uint8, sep='')
arr = arr.reshape(self.height, self.width, 4)
return arr[::-1, :, 0:3]
def _add_attrs(geom, attrs):
if "color" in attrs:
geom.set_color(*attrs["color"])
if "linewidth" in attrs:
geom.set_linewidth(attrs["linewidth"])
class Geom(object):
def __init__(self):
self._color = Color((0, 0, 0, 1.0))
self.attrs = [self._color]
def render(self):
for attr in reversed(self.attrs):
attr.enable()
self.render1()
for attr in self.attrs:
attr.disable()
def render1(self):
raise NotImplementedError
def add_attr(self, attr):
self.attrs.append(attr)
def set_color(self, r, g, b):
self._color.vec4 = (r, g, b, 1)
class Attr(object):
def enable(self):
raise NotImplementedError
def disable(self):
pass
class Transform(Attr):
def __init__(self, translation=(0.0, 0.0), rotation=0.0, scale=(1, 1)):
self.set_translation(*translation)
self.set_rotation(rotation)
self.set_scale(*scale)
def enable(self):
glPushMatrix()
# translate to GL loc ppint
glTranslatef(self.translation[0], self.translation[1], 0)
glRotatef(RAD2DEG * self.rotation, 0, 0, 1.0)
glScalef(self.scale[0], self.scale[1], 1)
def disable(self):
glPopMatrix()
def set_translation(self, newx, newy):
self.translation = (float(newx), float(newy))
def set_rotation(self, new):
self.rotation = float(new)
def set_scale(self, newx, newy):
self.scale = (float(newx), float(newy))
class Color(Attr):
def __init__(self, vec4):
self.vec4 = vec4
def enable(self):
glColor4f(*self.vec4)
class LineStyle(Attr):
def __init__(self, style):
self.style = style
def enable(self):
glEnable(GL_LINE_STIPPLE)
glLineStipple(1, self.style)
def disable(self):
glDisable(GL_LINE_STIPPLE)
class LineWidth(Attr):
def __init__(self, stroke):
self.stroke = stroke
def enable(self):
glLineWidth(self.stroke)
class Point(Geom):
def __init__(self):
Geom.__init__(self)
def render1(self):
glBegin(GL_POINTS) # draw point
glVertex3f(0.0, 0.0, 0.0)
glEnd()
class FilledPolygon(Geom):
def __init__(self, v):
Geom.__init__(self)
self.v = v
def render1(self):
if len(self.v) == 4:
glBegin(GL_QUADS)
elif len(self.v) > 4:
glBegin(GL_POLYGON)
else:
glBegin(GL_TRIANGLES)
for p in self.v:
glVertex3f(p[0], p[1], 0) # draw each vertex
glEnd()
def make_circle(radius=10, res=30, filled=True):
points = []
for i in range(res):
ang = 2*math.pi*i / res
points.append((math.cos(ang)*radius, math.sin(ang)*radius))
if filled:
return FilledPolygon(points)
else:
return PolyLine(points, True)
def make_polygon(v, filled=True):
if filled:
return FilledPolygon(v)
else:
return PolyLine(v, True)
def make_polyline(v):
return PolyLine(v, False)
def make_capsule(length, width):
l, r, t, b = 0, length, width/2, -width/2
box = make_polygon([(l, b), (l, t), (r, t), (r, b)])
circ0 = make_circle(width/2)
circ1 = make_circle(width/2)
circ1.add_attr(Transform(translation=(length, 0)))
geom = Compound([box, circ0, circ1])
return geom
class Compound(Geom):
def __init__(self, gs):
Geom.__init__(self)
self.gs = gs
for g in self.gs:
g.attrs = [a for a in g.attrs if not isinstance(a, Color)]
def render1(self):
for g in self.gs:
g.render()
class PolyLine(Geom):
def __init__(self, v, close):
Geom.__init__(self)
self.v = v
self.close = close
self.linewidth = LineWidth(1)
self.add_attr(self.linewidth)
def render1(self):
glBegin(GL_LINE_LOOP if self.close else GL_LINE_STRIP)
for p in self.v:
glVertex3f(p[0], p[1], 0) # draw each vertex
glEnd()
def set_linewidth(self, x):
self.linewidth.stroke = x
class Line(Geom):
def __init__(self, start=(0.0, 0.0), end=(0.0, 0.0)):
Geom.__init__(self)
self.start = start
self.end = end
self.linewidth = LineWidth(1)
self.add_attr(self.linewidth)
def render1(self):
glBegin(GL_LINES)
glVertex2f(*self.start)
glVertex2f(*self.end)
glEnd()
class Image(Geom):
def __init__(self, fname, width, height):
Geom.__init__(self)
self.width = width
self.height = height
img = pyglet.image.load(fname)
self.img = img
self.flip = False
def render1(self):
self.img.blit(-self.width/2, -self.height/2,
width=self.width, height=self.height)
# ================================================================
class SimpleImageViewer(object):
def __init__(self, display=None):
self.window = None
self.isopen = False
self.display = display
def imshow(self, arr):
if self.window is None:
height, width, channels = arr.shape
self.window = pyglet.window.Window(
width=width, height=height, display=self.display)
self.width = width
self.height = height
self.isopen = True
assert arr.shape == (
self.height, self.width, 3), "You passed in an image with the wrong number shape"
image = pyglet.image.ImageData(
self.width, self.height, 'RGB', arr.tobytes(), pitch=self.width * -3)
self.window.clear()
self.window.switch_to()
self.window.dispatch_events()
image.blit(0, 0)
self.window.flip()
def close(self):
if self.isopen:
self.window.close()
self.isopen = False
def __del__(self):
self.close()
| 9,508 | 24.980874 | 98 | py |
rivuletpy | rivuletpy-master/filtering/thresholding.py | import numpy as np
def fuzzy(img, level=128, p=2):
'''
Image auto thresholding with measure of fuzziness using the Yager's measure
Implemented the algorithm following Eq.(11) in
L. K. Huang and M. J. J. Wang, “Image thresholding by minimizing the
measures of fuzziness,” Pattern Recognit., vol. 28, no. 1, pp. 41–51, 1995.
img: a ND numpy matrix
level: number of levels to partition the histogram
p: order of metric - 1 for Hamming metric - 2 for Euclidean metric
'''
assert (p == 1 or p == 2)
high, low = img.max(), img.min(),
step = (high - low) / level
backweighted = foreweighted = 0
nvox = img.size
count = np.asarray([((img >= t - step / 2) & (img < t + step / 2)).sum()
for t in np.arange(low, high, step)])
weighted = np.arange(low, high, step) * count
backcount = np.asarray(
[(img < t + step / 2).sum() for t in np.arange(low, high, step)])
forecount = np.asarray([nvox] * level) - backcount
backweighted = np.cumsum(weighted)
# foreweighted = np.full((level,), backweighted[-1]) - backweighted
foreweighted = np.cumsum(weighted[::-1])[::-1]
muback = backweighted / backcount
mufore = foreweighted / forecount
yager = np.zeros((level, ))
for i, t1 in enumerate(np.arange(low, high, step)):
gsum = 0.
for j, t2 in enumerate(np.arange(low, high, step)):
mu = muback[i] if t2 <= t1 else mufore[i]
C = np.abs(t1 - low) if t2 <= t1 else np.abs(high - t1)
C = 1e-4 if C == 0 else C
mux = 1. / (1. + np.abs(t2 - mu) / C)
mux_reversed = 1. - mux
gsum += np.abs(mux - mux_reversed)**p
yager[i] = gsum**1 / p
# The chosen threshold with least fuzziness
return yager.argmin() * step + low, yager
def suppress(img, threshold):
img[img <= threshold] = 0
return img
def rescale(img, overwrite=False):
'''
Rescale image intensities linearly to 0~255
'''
if not overwrite:
result = img.copy()
else:
result = img
result = result.astype('float')
result -= result.min()
result /= result.max()
result *= 255
return result
| 2,224 | 31.720588 | 79 | py |
rivuletpy | rivuletpy-master/filtering/anisotropic.py | import numpy as np
from scipy.special import jv # Bessel Function of the first kind
from scipy.linalg import eig
from scipy.fftpack import fftn, ifftn, ifft
# import progressbar
from tqdm import tqdm
from scipy.ndimage import filters as fi
import math
# An implementation of the Optimally Oriented
# M.W.K. Law and A.C.S. Chung, ``Three Dimensional Curvilinear
# Structure Detection using Optimally Oriented Flux'', ECCV 2008, pp.
# 368--382.
# Max W. K. Law et al., ``Dilated Divergence based Scale-Space
# Representation for Curve Analysis'', ECCV 2012, pp. 557--571.
# Author: Siqi Liu
def response(img, rsptype='oof', **kwargs):
eps = 1e-12
rsp = np.zeros(img.shape)
# bar = progressbar.ProgressBar(max_value=kwargs['radii'].size)
# bar.update(0)
W = np.zeros((img.shape[0], img.shape[1], img.shape[2], 3)) # Eigen values to save
V = np.zeros((img.shape[0], img.shape[1], img.shape[2], 3, 3)) # Eigen vectors to save
if rsptype == 'oof' :
rsptensor = ooftensor(img, kwargs['radii'], kwargs['memory_save'])
elif rsptype == 'bg':
rsptensor = bgtensor(img, kwargs['radii'], kwargs['rho'])
pbar = tqdm(total=len(kwargs['radii']))
for i, tensorfield in enumerate(rsptensor):
# Make the tensor from tensorfield
f11, f12, f13, f22, f23, f33 = tensorfield
tensor = np.stack((f11, f12, f13, f12, f22, f23, f13, f23, f33), axis=-1)
del f11
del f12
del f13
del f22
del f23
del f33
tensor = tensor.reshape(img.shape[0], img.shape[1], img.shape[2], 3, 3)
w, v = np.linalg.eigh(tensor)
del tensor
sume = w.sum(axis=-1)
nvox = img.shape[0] * img.shape[1] * img.shape[2]
sortidx = np.argsort(np.abs(w), axis=-1)
sortidx = sortidx.reshape((nvox, 3))
# Sort eigenvalues according to their abs
w = w.reshape((nvox, 3))
for j, (idx, value) in enumerate(zip(sortidx, w)):
w[j,:] = value[idx]
w = w.reshape(img.shape[0], img.shape[1], img.shape[2], 3)
# Sort eigenvectors according to their abs
v = v.reshape((nvox, 3, 3))
for j, (idx, vec) in enumerate(zip(sortidx, v)):
v[j,:,:] = vec[:, idx]
del sortidx
v = v.reshape(img.shape[0], img.shape[1], img.shape[2], 3, 3)
mine = w[:,:,:, 0]
mide = w[:,:,:, 1]
maxe = w[:,:,:, 2]
if rsptype == 'oof':
feat = maxe
elif rsptype == 'bg':
feat = -mide / maxe * (mide + maxe) # Medialness measure response
cond = sume >= 0
feat[cond] = 0 # Filter the non-anisotropic voxels
del mine
del maxe
del mide
del sume
cond = np.abs(feat) > np.abs(rsp)
W[cond, :] = w[cond, :]
V[cond, :, :] = v[cond, :, :]
rsp[cond] = feat[cond]
del v
del w
del tensorfield
del feat
del cond
pbar.update(1)
return rsp, V, W
def bgkern3(kerlen, mu=0, sigma=3., rho=0.2):
'''
Generate the bi-gaussian kernel
'''
sigma_b = rho * sigma
k = rho ** 2
kr = (kerlen - 1) / 2
X, Y, Z = np.meshgrid(np.arange(-kr, kr+1),
np.arange(-kr, kr+1),
np.arange(-kr, kr+1))
dist = np.linalg.norm(np.stack((X, Y, Z)), axis=0)
G = gkern3(dist, mu, sigma) # Normal Gaussian with mean at origin
Gb = gkern3(dist, sigma-sigma_b, sigma_b)
c0 = k * Gb[0, 0, math.floor(sigma_b)] - G[0, 0, math.floor(sigma)]
c1 = G[0, 0, math.floor(sigma)] - k * Gb[0, 0, math.floor(sigma_b)] + c0
G += c0
Gb = k * Gb + c1 # Inverse Gaussian with phase shift
# Replace the centre of Gb with G
central_region = dist <= sigma
del dist
X = (X[central_region] + kr).astype('int')
Y = (Y[central_region] + kr).astype('int')
Z = (Z[central_region] + kr).astype('int')
Gb[X, Y, Z] = G[X, Y, Z]
return Gb
def eigh(a, UPLO='L'):
# I Borrowed from Dipy
"""Iterate over `np.linalg.eigh` if it doesn't support vectorized operation
Parameters
----------
a : array_like (..., M, M)
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray (..., M)
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : ndarray (..., M, M)
The column ``v[..., :, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[..., i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
np.linalg.eigh
"""
a = np.asarray(a)
if a.ndim > 2 and NUMPY_LESS_1_8:
shape = a.shape[:-2]
a = a.reshape(-1, a.shape[-2], a.shape[-1])
evals = np.empty((a.shape[0], a.shape[1]))
evecs = np.empty((a.shape[0], a.shape[1], a.shape[1]))
for i, item in enumerate(a):
evals[i], evecs[i] = np.linalg.eigh(item, UPLO)
return (evals.reshape(shape + (a.shape[1], )),
evecs.reshape(shape + (a.shape[1], a.shape[1])))
return np.linalg.eigh(a, UPLO)
def gkern3(dist, mu=0., sigma=3.):
'''
Make 3D gaussian kernel
'''
# Make a dirac spherical function
return np.exp(-0.5 * (((dist - mu) / sigma)**2)) / (sigma * np.sqrt(2. * np.pi))
def hessian3(x):
"""
Calculate the hessian matrix with finite differences
Parameters:
- x : ndarray
Returns:
an array of shape (x.dim, x.ndim) + x.shape
where the array[i, j, ...] corresponds to the second derivative x_ij
"""
x_grad = np.gradient(x)
tmpgrad = np.gradient(x_grad[0])
f11 = tmpgrad[0]
f12 = tmpgrad[1]
f13 = tmpgrad[2]
tmpgrad = np.gradient(x_grad[1])
f22 = tmpgrad[1]
f23 = tmpgrad[2]
tmpgrad = np.gradient(x_grad[2])
f33 = tmpgrad[2]
return [f11, f12, f13, f22, f23, f33]
def bgtensor(img, lsigma, rho=0.2):
eps = 1e-12
fimg = fftn(img, overwrite_x=True)
for s in lsigma:
jvbuffer = bgkern3(kerlen=math.ceil(s)*6+1, sigma=s, rho=rho)
jvbuffer = fftn(jvbuffer, shape=fimg.shape, overwrite_x=True) * fimg
fimg = ifftn(jvbuffer, overwrite_x=True)
yield hessian3(np.real(fimg))
def eigval33(tensorfield):
''' Calculate the eigenvalues of massive 3x3 real symmetric matrices. '''
a11, a12, a13, a22, a23, a33 = tensorfield
eps = 1e-50
b = a11 + eps
d = a22 + eps
j = a33 + eps
c = - a12**2. - a13**2. - a23**2. + b * d + d * j + j* b
d = - b * d * j + a23**2. * b + a12**2. * j - a13**2. * d + 2. * a13 * a12 * a23
b = - a11 - a22 - a33 - 3. * eps
d = d + (2. * b**3. - 9. * b * c) / 27
c = b**2. / 3. - c
c = c**3.
c = c / 27
c[c < 0] = 0
c = np.sqrt(c)
j = c ** (1./3.)
c = c + (c==0).astype('float')
d = -d /2. /c
d[d>1] = 1
d[d<-1] = 1
d = np.real(np.arccos(d) / 3.)
c = j * np.cos(d)
d = j * np.sqrt(3.) * np.sin(d)
b = -b / 3.
j = -c - d + b
d = -c + d + b
b = 2. * c + b
return b, j, d
def oofftkernel(kernel_radius, r, sigma=1, ntype=1):
eps = 1e-12
normalisation = 4/3 * np.pi * r**3 / (jv(1.5, 2*np.pi*r*eps) / eps ** (3/2)) / r**2 * \
(r / np.sqrt(2.*r*sigma - sigma**2)) ** ntype
jvbuffer = normalisation * np.exp( (-2 * sigma**2 * np.pi**2 * kernel_radius**2) / (kernel_radius**(3/2) ))
return (np.sin(2 * np.pi * r * kernel_radius) / (2 * np.pi * r * kernel_radius) - np.cos(2 * np.pi * r * kernel_radius)) * \
jvbuffer * np.sqrt( 1./ (np.pi**2 * r *kernel_radius ))
def ooftensor(img, radii, memory_save=True):
'''
type: oof, bg
'''
# sigma = 1 # TODO: Pixel spacing
eps = 1e-12
# ntype = 1 # The type of normalisation
fimg = fftn(img, overwrite_x=True)
shiftmat = ifftshiftedcoormatrix(fimg.shape)
x, y, z = shiftmat
x = x / fimg.shape[0]
y = y / fimg.shape[1]
z = z / fimg.shape[2]
kernel_radius = np.sqrt(x ** 2 + y ** 2 + z ** 2) + eps # The distance from origin
for r in radii:
# Make the fourier convolutional kernel
jvbuffer = oofftkernel(kernel_radius, r) * fimg
if memory_save:
# F11
buffer = ifftshiftedcoordinate(img.shape, 0) ** 2 * x * x * jvbuffer
buffer = ifft(buffer, axis=0)
buffer = ifft(buffer, axis=1)
buffer = ifft(buffer, axis=2)
f11 = buffer.copy()
# F12
buffer = ifftshiftedcoordinate(img.shape, 0) * ifftshiftedcoordinate(img.shape, 1) * x * y * jvbuffer
buffer = ifft(buffer, axis=0)
buffer = ifft(buffer, axis=1)
buffer = ifft(buffer, axis=2)
f12 = buffer.copy()
# F13
buffer = ifftshiftedcoordinate(img.shape, 0) * ifftshiftedcoordinate(img.shape, 2) * x * z * jvbuffer
buffer = ifft(buffer, axis=0)
buffer = ifft(buffer, axis=1)
buffer = ifft(buffer, axis=2)
f13 = buffer.copy()
# F22
buffer = ifftshiftedcoordinate(img.shape, 1) ** 2 * y ** 2 * jvbuffer
buffer = ifft(buffer, axis=0)
buffer = ifft(buffer, axis=1)
buffer = ifft(buffer, axis=2)
f22 = buffer.copy()
# F23
buffer = ifftshiftedcoordinate(img.shape, 1) * ifftshiftedcoordinate(img.shape, 2) * y * z * jvbuffer
buffer = ifft(buffer, axis=0)
buffer = ifft(buffer, axis=1)
buffer = ifft(buffer, axis=2)
f23 = buffer.copy()
# F33
buffer = ifftshiftedcoordinate(img.shape, 2) * ifftshiftedcoordinate(img.shape, 2) * z * z * jvbuffer
buffer = ifft(buffer, axis=0)
buffer = ifft(buffer, axis=1)
buffer = ifft(buffer, axis=2)
f33 = buffer.copy()
else:
f11 = np.real(ifftn(x * x * jvbuffer))
f12 = np.real(ifftn(x * y * jvbuffer))
f13 = np.real(ifftn(x * z * jvbuffer))
f22 = np.real(ifftn(y * y * jvbuffer))
f23 = np.real(ifftn(y * z * jvbuffer))
f33 = np.real(ifftn(z * z * jvbuffer))
yield [f11, f12, f13, f22, f23, f33]
# The dimension is a vector specifying the size of the returned coordinate
# matrices. The number of output argument is equals to the dimensionality
# of the vector "dimension". All the dimension is starting from "1"
def ifftshiftedcoormatrix(shape):
shape = np.asarray(shape)
p = np.floor(np.asarray(shape) / 2).astype('int')
coord = []
for i in range(shape.size):
a = np.hstack((np.arange(p[i], shape[i]), np.arange(0, p[i]))) - p[i] - 1.
repmatpara = np.ones((shape.size,)).astype('int')
repmatpara[i] = shape[i]
A = a.reshape(repmatpara)
repmatpara = shape.copy()
repmatpara[i] = 1
coord.append(np.tile(A, repmatpara))
return coord
def ifftshiftedcoordinate(shape, axis):
shape = np.asarray(shape)
p = np.floor(np.asarray(shape) / 2).astype('int')
a = (np.hstack((np.arange(p[axis], shape[axis]), np.arange(0, p[axis]))) - p[axis] - 1.).astype('float')
a /= shape[axis].astype('float')
reshapepara = np.ones((shape.size,)).astype('float');
reshapepara[axis] = shape[axis];
A = a.reshape(reshapepara);
repmatpara = shape.copy();
repmatpara[axis] = 1;
return np.tile(A, repmatpara)
def nonmaximal_suppression3(img, evl, evt, radius, threshold=0):
'''
Non-maximal suppression with oof eigen vector
img: The input image or filter response
V: The eigenvector generated by anisotropic filtering algorithm
radius: the radius to consider for suppression
'''
# THE METHOD with ROTATED STENCILS -- Deprecated for now
# basevec = np.asarray([0., 0., 1.])
# basestencils = np.asarray([[1., 0., 0.],
# [1./np.sqrt(2.), 1./np.sqrt(2.), 0.],
# [0., 1., 0.],
# [-1./np.sqrt(2.), 1./np.sqrt(2.), 0.],
# [-1, 0., 0.],
# [-1./np.sqrt(2.), -1./np.sqrt(2.), 0.],
# [0., -1, 0.],
# [1./np.sqrt(2.), -1./np.sqrt(2.), 0.]])
# fg = np.argwhere(img > threshold)
# nvox = fg.shape[0]
# imgnew = img.copy()
# for idx in fg:
# ev = V[idx[0], idx[1], idx[2], :, -1]
# # Get rotation matrix (http://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d)
# v = np.cross(basevec, ev)
# s = np.linalg.norm(v)
# c = np.dot(basevec, ev)
# skw = np.asarray([[0., -v[2], v[1]],
# [v[2], 0, -v[0]],
# [-v[1], v[0], 0.]])
# R = np.identity(3) + skw + skw**2 * ((1.-c)/s**2)
# # Rotate the stencils
# rotstencils = np.dot(R, basestencils.T)
# # Get neighbours based on the rotated stencils
# neighbours = rotstencils.T + np.tile(idx, (basestencils.shape[0], 1))
# # suppress the current image if it is not local maxima
# neighbourvox = [img[n[0], n[1], n[2]] for n in neighbours if ] # TODO check in bound
# if np.any(neighbourvox > img[idx[0], idx[1], idx[2]]):
# imgnew[idx[0], idx[1], idx[2]] = 0.
suppressed = img.copy()
suppressed[suppressed <= threshold] = 0
suppressed_ctr = -1
while suppressed_ctr is not 0:
suppressed_ctr = 0
fgidx = np.argwhere(suppressed > threshold) # Find foreground voxels
while fgidx.shape[0] > 0:
randidx = np.random.randint(0, fgidx.shape[0])
v = fgidx[randidx, :] # Randomly choose a foreground voxel
fgidx = np.delete(fgidx, randidx, 0)
e = evt[v[0], v[1], v[2], :, 0] # The primary eigenvector on v
# Select the voxels on the orthogonal plane of eigenvector and within a distance to v
vtile = np.tile(v, (fgidx.shape[0], 1))
etile = np.tile(e, (fgidx.shape[0], 1))
cond1 = np.abs(etile * (fgidx - vtile)).sum(axis=-1) < (1.5 * np.sqrt(6) / 4.) # http://math.stackexchange.com/questions/82151/find-the-equation-of-the-plane-passing-through-a-point-and-a-vector-orthogonal
radius = (evl[v[0], v[1], v[2], :]).sum()
cond2 = np.linalg.norm(vtile - fgidx, axis=-1) < radius
cond = np.logical_and(cond1, cond2)
l = fgidx[cond, :]
if l.size == 0:
continue
lv = np.asarray([suppressed[l[i, 0], l[i, 1], l[i, 2]] for i in range(l.shape[0])]) # The voxel values of the voxels in l
if lv.max() > suppressed[v[0], v[1], v[2]]:
suppressed[v[0], v[1], v[2]] = 0
suppressed_ctr += 1
return suppressed | 15,197 | 34.180556 | 217 | py |
rivuletpy | rivuletpy-master/filtering/riveal.py | import numpy as np
import math
import skfmm
from tqdm import tqdm
from scipy.ndimage.morphology import binary_dilation
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers.noise import GaussianDropout, GaussianNoise
from keras.layers.advanced_activations import SReLU
def riveal(img, swc, K=9, nsample=8e4, epoch=20):
print('-- oiginal image size: ', img.shape)
K = math.floor(K) # Make sure K is integer to avoid warnings
# Pad the image and swc
margin = 3 * K
img = padimg(img, margin)
swc = padswc(swc, margin)
# Make the skeleton distance transform
print('-- Distance transform for swc...')
dt, foreground_region = make_skdt(img.shape, swc, K)
# Normalise data
# img = standardise(img)
# dt = standardise(dt)
img /= img.max()
# dt /= dt.max()
# Make the confident region
print('==swc shape:', swc.shape)
print('-- Making the confidence regions...(1/4)')
high_conf_region = make_conf_region(
img.shape, swc, K, low_conf=0.5, high_conf=1.)
# print('-- Making the confidence regions...(2/4)')
# mid_conf_region = make_conf_region(img.shape, swc, K,
# low_conf=0.25, high_conf=0.5)
print('-- Making the confidence regions...(3/4)')
low_conf_region = make_conf_region(
img.shape, swc, K, low_conf=0., high_conf=0.25)
# # Fill only the central part of background region
print('-- Making the confidence regions...(4/4)')
background_region = np.zeros(img.shape)
bg = np.logical_not(foreground_region)
bg = np.logical_and(foreground_region, img > 0)
for i in range(3):
bg = binary_dilation(bg)
background_region[margin:-margin, margin:-margin, margin:-margin] = bg[
margin:-margin, margin:-margin, margin:-margin]
from matplotlib import pyplot as plt
plt.subplot(3, 1, 1)
plt.imshow(high_conf_region.max(axis=-1))
plt.title('high conf')
plt.subplot(3, 1, 2)
plt.imshow(low_conf_region.max(axis=-1))
plt.title('low conf')
plt.subplot(3, 1, 3)
plt.imshow(background_region.max(axis=-1))
plt.title('bg')
plt.show()
# Randomly sample 2.5D blocks from the include region
print('-- Sampling blocks')
x1, y1 = sample_block(img, dt, high_conf_region, K,
math.ceil(nsample * 0.75))
# x2, y2 = sample_block(img, dt, mid_conf_region,
# K, math.ceil(nsample * 0.2))
x3, y3 = sample_block(img, dt, low_conf_region, K,
math.ceil(nsample * 0.1))
y3.fill(0.)
x4, y4 = sample_block(img, dt, background_region, K,
math.ceil(nsample * 0.15))
y4.fill(0.)
train_x = np.vstack((x1, x3, x4))
train_y = np.vstack((y1, y3, y4))
# Build the CNN with keras+tensorflow
print('--Training CNN...')
model = traincnn(train_x, train_y, K, epoch)
# Make the prediction within an area larger than
# the segmentation of the image
print('-- Predicting...')
bimg = img > 0
for i in range(6):
bimg = binary_dilation(bimg)
include_region = bimg > 0
include_idx = np.argwhere(include_region)
nidx = include_idx.shape[0]
predict_x = np.zeros((nsample, 2 * K + 1, 2 * K + 1, 3))
rest = nidx
resultimg = np.zeros(img.shape)
pbar = tqdm(total=nidx)
# Predict every batch of blocks
while rest > 0:
startidx = -rest
endidx = -rest + nsample if -rest + nsample < nidx else nidx
rest -= nsample
# Write the value to each include voxel
for i, gidx in enumerate(range(int(startidx), int(endidx))):
bx, by, bz = include_idx[gidx, :]
predict_x[i, :, :, 0] = img[bx - K:bx + K + 1, by - K:by + K + 1,
bz]
predict_x[i, :, :, 1] = img[bx - K:bx + K + 1, by, bz - K:bz + K +
1]
predict_x[i, :, :, 2] = img[bx, by - K:by + K + 1, bz - K:bz + K +
1]
pd = model.predict(predict_x, batch_size=64, verbose=0).flatten()
for i, gidx in enumerate(range(int(startidx), int(endidx))):
bx, by, bz = include_idx[gidx, :]
resultimg[bx, by, bz] = pd[i]
pbar.update(nsample)
resultimg = unpadimg(resultimg, margin)
return resultimg
def standardise(img, zeromean=True):
img = (img - img.mean()) / img.std()
return img
def constrain_range(min, max, minlimit, maxlimit):
return list(
range(min if min > minlimit else minlimit, max
if max < maxlimit else maxlimit))
def sample_block(img, dt, include_region, K, nsample):
include_idx = np.argwhere(include_region)
nidx = include_idx.shape[0]
nsample = nidx if nsample > nidx else nsample
idx2train = include_idx[np.random.choice(nidx, nsample), :]
# Claim the memory for 2.5D blocks
x = np.zeros((nsample, 2 * K + 1, 2 * K + 1, 3))
y = np.zeros((nsample, 1)) # Claim the memory for 2.5D blocks
for i in range(idx2train.shape[0]):
bx, by, bz = idx2train[i, :]
x[i, :, :, 0] = img[bx - K:bx + K + 1, by - K:by + K + 1, bz]
x[i, :, :, 1] = img[bx - K:bx + K + 1, by, bz - K:bz + K + 1]
x[i, :, :, 2] = img[bx, by - K:by + K + 1, bz - K:bz + K + 1]
y[i] = dt[bx, by, bz]
return x, y
def make_conf_region(imshape, swc, K, low_conf=0.0, high_conf=1.0):
if low_conf != 0.0 or high_conf != 1.0:
confswc = np.vstack((swc[np.logical_and(swc[:, 7] >= low_conf,
swc[:, 7] <= high_conf), :]))
region = np.zeros(imshape)
r = math.ceil(K * 0.75)
for i in range(confswc.shape[0]):
node = confswc[i, :]
n = [math.floor(n) for n in node[2:5]]
rg1 = constrain_range(n[0] - r, n[0] + r + 1, 0, imshape[0])
rg2 = constrain_range(n[1] - r, n[1] + r + 1, 0, imshape[1])
rg3 = constrain_range(n[2] - r, n[2] + r + 1, 0, imshape[2])
X, Y, Z = np.meshgrid(rg1, rg2, rg3)
# Skip if any node has empty box
if len(X) == 0 or len(Y) == 0 or len(Z) == 0:
continue
region[X, Y, Z] = 1
# _, region = make_skdt(imshape, confswc, K)
return region
def make_skdt(imshape, swc, K, a=6):
skimg = make_sk_img(imshape, swc)
dm = math.floor(K / 2)
dt = skfmm.distance(skimg, dx=1)
include_region = dt <= 1.5 * dm
zeromask = dt >= dm
dt = np.exp(a * (1 - dt / dm)) - 1
dt[zeromask] = 0
return dt, include_region
def makecnn(in_shape, K):
model = Sequential()
model.add(
Convolution2D(
32, 3, 3, border_mode='same', input_shape=in_shape[1:]))
model.add(SReLU())
model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='tf'))
model.add(GaussianNoise(1))
model.add(GaussianDropout(0.4))
model.add(Convolution2D(32, 3, 3, border_mode='same'))
model.add(SReLU())
model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='tf'))
model.add(GaussianNoise(1))
model.add(GaussianDropout(0.4))
model.add(Flatten())
model.add(Dense(64))
model.add(SReLU())
model.add(Dense(64))
# model.add(SReLU())
model.add(Dense(1))
model.add(Activation('linear'))
return model
def traincnn(x, y, K, epoch):
x = x.astype('float32')
y = y.astype('float32')
x /= x.max()
y /= y.max()
model = makecnn(x.shape, K)
model.compile(loss='mse', optimizer='rmsprop')
model.fit(x,
y,
batch_size=64,
nb_epoch=epoch,
validation_split=0.15,
shuffle=True)
return model
def make_sk_img(imshape, swc):
skimg = np.ones(imshape)
for i in range(swc.shape[0]):
node = [math.floor(n) for n in swc[i, 2:5]]
skimg[node[0], node[1], node[2]] = 0
return skimg
def padimg(img, margin):
pimg = np.zeros((img.shape[0] + 2 * margin, img.shape[1] + 2 * margin,
img.shape[2] + 2 * margin))
pimg[margin:margin + img.shape[0], margin:margin + img.shape[1], margin:
margin + img.shape[2]] = img
return pimg
def unpadimg(img, margin):
pimg = np.zeros((img.shape[0] - 2 * margin, img.shape[1] - 2 * margin,
img.shape[2] - 2 * margin))
pimg = img[margin:margin + img.shape[0], margin:margin + img.shape[1],
margin:margin + img.shape[2]]
return pimg
def padswc(swc, margin):
swc[:, 2:5] = swc[:, 2:5] + margin
return swc
| 8,673 | 32.233716 | 78 | py |
rivuletpy | rivuletpy-master/filtering/__init__.py | 0 | 0 | 0 | py | |
rivuletpy | rivuletpy-master/filtering/morphology.py | import numpy as np
from scipy.ndimage import gaussian_filter1d
from scipy.ndimage.filters import laplace
try:
from skimage import filters
except ImportError:
from skimage import filter as filters
from tqdm import tqdm
from functools import reduce
from scipy.interpolate import RegularGridInterpolator
import skfmm
def ssmdt(dt, ssmiter):
dt = ssm(dt, anisotropic=True, iterations=ssmiter)
dt[dt < filters.threshold_otsu(dt)] = 0
dt = skfmm.distance(dt, dx=5e-2)
dt = skfmm.distance(np.logical_not(dt), dx=5e-3)
dt[dt > 0.04] = 0.04
dt = dt.max() - dt
dt[dt <= 0.038] = 0
return dt
def ssm(img, anisotropic=False, iterations=30):
'''
Skeleton strength map
img: the input image
anisotropic: True if using anisotropic diffusion
iterations: number of iterations to optimise the GVF
'''
# f = gaussian_gradient_magnitude(img, 1)
# f = 1 - gimg # Inverted version of the smoothed
# gradient of the distance transform
gvfmap = gvf(img, mu=0.001, iterations=iterations, anisotropic=anisotropic)
shifts = [-1, 1, -1, -1, -1, 1]
axis = [1, 1, 2, 2, 3, 3]
shiftmat = np.zeros(gvfmap.shape)
f = np.zeros(img.shape) # reuse f for saving the SSM
for i, (s, a) in enumerate(zip(shifts, axis)):
# Only the orthogonal neighbours
shiftmat.fill(0)
shiftmat[a - 1, :, :, :] = s
# Dot product gvf and neighbour displacement fields /
# distance between neighbour
f += np.sum(np.roll(
gvfmap, s, axis=a) * shiftmat, axis=0) / np.linalg.norm(
shiftmat, axis=0)
f[np.isnan(f)] = 0
f[f < 0] = 0
return f
def nonmax(img, sigma=2, threshold=0):
'''
Finds directional local maxima
in a gradient array, as used in the Canny edge detector, but made
separately accessible here for greater flexibility. The result is a
logical array with the value true where the gradient magnitude is a
local maximum along the gradient direction.
'''
# Get normalised gaussian gradients
eps = 1e-12
gx = gaussian_filter1d(img, sigma, axis=0, order=1)
gy = gaussian_filter1d(img, sigma, axis=1, order=1)
gz = gaussian_filter1d(img, sigma, axis=2, order=1)
gmag = np.sqrt(gx**2 + gy**2 + gz**2)
gx = gx / (gmag + eps)
gy = gy / (gmag + eps)
gz = gz / (gmag + eps)
standard_grid = (np.arange(gmag.shape[0]), np.arange(gmag.shape[1]),
np.arange(gmag.shape[2]))
ginterp = RegularGridInterpolator(standard_grid, gmag, bounds_error=False)
# Interpolate the graident magnitudes
idx = np.argwhere(
img > threshold
) # Double-check if the original image should be used to check
xidx = idx[:, 0]
yidx = idx[:, 1]
zidx = idx[:, 2]
dx = gx[xidx, yidx, zidx]
dy = gy[xidx, yidx, zidx]
dz = gz[xidx, yidx, zidx]
gmag_0 = gmag[xidx, yidx, zidx]
gmag_1 = ginterp(np.stack((xidx + dx, yidx + dy, zidx + dz), axis=-1))
gmag_2 = ginterp(np.stack((xidx - dx, yidx - dy, zidx - dz), axis=-1))
# Suppress nonmax voxels
keep = np.logical_and(gmag_0 > gmag_1, gmag_0 > gmag_2)
gmag.fill(0)
gmag[xidx, yidx, zidx] = keep.astype('float')
return gmag
def d(x):
'''
The difference between a voxel and its six neighbours
The boundary is messed, however it does not matter
'''
diff = np.zeros((6, x.shape[0], x.shape[1], x.shape[2]))
shifts = [-1, 1, -1, -1, -1, 1]
axis = [0, 0, 1, 1, 2, 2]
for i, (s, a) in enumerate(zip(shifts, axis)):
diff[i] = np.roll(x, s, axis=a) - x
return diff
# The decreasing funciton for angles
def g_all(u, v, w):
G = np.zeros((6, u.shape[0], u.shape[1], u.shape[2])) # Result
cvec = np.stack((u, v, w), axis=0) # The flow vector on central voxel
shifts = [-1, 1, -1, -1, -1, 1]
axis = [0, 0, 1, 1, 2, 2]
for i, (s, a) in enumerate(zip(shifts, axis)):
G[i] = g(
cvec,
np.stack(
(
np.roll(
u, s,
axis=a), # The flow vector on the surronding voxel
np.roll(
v, s, axis=a),
np.roll(
w, s, axis=a)),
axis=0))
return G
# Calculate the G function between two vector fields
def g(cvec, svec, K=1):
cnorm = np.linalg.norm(cvec, axis=0)
snorm = np.linalg.norm(svec, axis=0)
t = np.sum(cvec * svec, axis=0) / (cnorm * snorm + 1e-12)
t -= 1
t = np.exp(K * t)
t[np.logical_or(cnorm == 0, snorm == 0)] = 0
return t
# Divergence
def div(x):
""" compute the divergence of n-D scalar field `F` """
return reduce(
np.add, np.gradient(x)) # http://stackoverflow.com/a/21134289/1890513
def gvf(f, mu=0.05, iterations=30, anisotropic=False,
ignore_second_term=False):
# Gradient vector flow
# Translated from https://github.com/smistad/3D-Gradient-Vector-Flow-for-Matlab
f = (f - f.min()) / (f.max() - f.min())
f = enforce_mirror_boundary(
f) # Enforce the mirror conditions on the boundary
dx, dy, dz = np.gradient(f) # Initialse with normal gradients
'''
Initialise the GVF vectors following S3 in
Yu, Zeyun, and Chandrajit Bajaj.
"A segmentation-free approach for skeletonization of gray-scale images via anisotropic vector diffusion."
CVPR, 2004. CVPR 2004.
It only uses one of the surronding neighbours with the lowest intensity
'''
magsq = dx**2 + dy**2 + dz**2
# Set up the initial vector field
u = dx.copy()
v = dy.copy()
w = dz.copy()
for i in tqdm(range(iterations)):
# The boundary might not matter here
# u = enforce_mirror_boundary(u)
# v = enforce_mirror_boundary(v)
# w = enforce_mirror_boundary(w)
# Update the vector field
if anisotropic:
G = g_all(u, v, w)
u += mu / 6. * div(np.sum(G * d(u), axis=0))
v += mu / 6. * div(np.sum(G * d(v), axis=0))
w += mu / 6. * div(np.sum(G * d(w), axis=0))
else:
u += mu * 6 * laplace(u)
v += mu * 6 * laplace(v)
w += mu * 6 * laplace(w)
if not ignore_second_term:
u -= (u - dx) * magsq
v -= (v - dy) * magsq
w -= (w - dz) * magsq
return np.stack((u, v, w), axis=0)
def enforce_mirror_boundary(f):
'''
This function enforces the mirror boundary conditions
on the 3D input image f. The values of all voxels at
the boundary is set to the values of the voxels 2 steps
inward
'''
N, M, O = f.shape
# Indices in the middle
xi = np.arange(1, M - 1)
yi = np.arange(1, N - 1)
zi = np.arange(1, O - 1)
# Corners
f[[0, -1], [0, -1], [0, -1]] = f[[2, -3], [2, -3], [2, -3]]
# Edges
f[np.ix_([0, -1], [0, -1], zi)] = f[np.ix_([2, -3], [2, -3], zi)]
f[np.ix_(yi, [0, -1], [0, -1])] = f[np.ix_(yi, [2, -3], [2, -3])]
f[np.ix_([0, -1], xi, [0, -1])] = f[np.ix_([2, -3], xi, [2, -3])]
# Faces
f[np.ix_([0, -1], xi, zi)] = f[np.ix_([2, -3], xi, zi)]
f[np.ix_(yi, [0, -1], zi)] = f[np.ix_(yi, [2, -3], zi)]
f[np.ix_(yi, xi, [0, -1])] = f[np.ix_(yi, xi, [2, -3])]
return f
| 7,344 | 30.122881 | 110 | py |
Mr.Right | Mr.Right-main/main.py | import yaml
import os
import utils
import warnings
from argparse import ArgumentParser
from torch import nn
from pytorch_lightning import Trainer,seed_everything
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks import ModelCheckpoint,LearningRateMonitor
from data.data_module import TextToMultiDataModule
from pltrainer import TextToMultiTrainer
from functools import partial
from models.model import TextToMultiModel
from transformers import (
BertTokenizer, RobertaTokenizerFast
)
warnings.filterwarnings("ignore")
def main(args,config):
seed_everything(config.seed)
# tokenizer
if args.pretrain == "ALBEF" or args.pretrain == "ViLT":
tokenizer = BertTokenizer.from_pretrained(
config.text_encoder,
cache_dir= args.cache_dir,
)
elif args.pretrain == "METER":
tokenizer = RobertaTokenizerFast.from_pretrained(
config.text_encoder,
cache_dir= args.cache_dir,
)
# dataset
print("Create Dataset")
data_module = TextToMultiDataModule(args,config,tokenizer)
if args.mode == "test":
data_module.prepare_data(test=config['test_file'],document=config['document'])
else:
data_module.prepare_data(train=config['train_file'],val=config['val_file'],test=config['test_file'],document=config['document'])
data_module.setup()
# mutli model
print("Create multi modal")
model = TextToMultiModel(tokenizer=tokenizer,config=config,args=args)
pltrainer = TextToMultiTrainer(args,config,model,tokenizer)
# logger
wandb_logger = WandbLogger(name=args.wandb_task_name,project="multimodalembedding", entity=args.wandb_entity)
checkpoint_callback = ModelCheckpoint(
filename= '{loss:.2f}-{val_loss:.2f}-{multi_r1:.2f}',
save_top_k=3,
verbose=False,
monitor='multi_r1',
mode='max'
)
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer_config = {
"default_root_dir": args.save_checkpoint,
"fast_dev_run": False,
# "gradient_clip_val": config.gradient_clip_value,
# "replace_sampler_ddp":False,
"strategy": "ddp",
"logger": wandb_logger,
"gpus": args.num_gpus,
"max_epochs": config["schedular"]["epochs"],
# "max_steps": config["schedular"]["max_steps"],
"auto_scale_batch_size": 'binsearch',
"progress_bar_refresh_rate": 1,
"precision": 16,
"check_val_every_n_epoch": 10,
"log_every_n_steps": 1,
"flush_logs_every_n_steps": 1,
"callbacks":[checkpoint_callback, lr_monitor],
}
trainer = Trainer(**trainer_config)
if args.mode == "train":
trainer.fit(pltrainer, data_module,ckpt_path=args.pl_checkpoint)
elif args.mode == "test":
trainer.test(pltrainer, data_module,ckpt_path=args.pl_checkpoint)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--wandb_task_name', default='testing')
parser.add_argument('--wandb_entity', default='multimodalembedding')
parser.add_argument('--num_gpus', default=1, type=int)
parser.add_argument("--config", default="configs/ALBEF.yaml", type=str)
parser.add_argument("--cache_dir", default="cache/", type=str)
parser.add_argument('--log_dir', default='logs/',type=str)
parser.add_argument("--mode", default="train", type=str,choices=['train','test'],help='choose your mode')
parser.add_argument("--pretrain", default="ALBEF", type=str,choices=['ALBEF','ViLT','MDETR','METER'],help='choose pretrain work')
parser.add_argument("--embeds_feats", default="avg", type=str,choices=['cls','avg','iavg_tcls'],help='how to deal with text and image embeddings')
parser.add_argument("--pickle_output", default="./", type=str,help='directory of testing pickle files')
parser.add_argument("--test_output", default="output.json", type=str,help='json files of testing result')
parser.add_argument("--save_checkpoint", default="checkpoints", type=str)
parser.add_argument('--pl_checkpoint', default=None,type=str,help='Load pytorch lightning checkpoint')
parser.add_argument('--batch_size', type=int, default=32,help='The batch size of each dataloader')
parser.add_argument('--num_workers', type=int, default=8, help='The number of workers in the DataLoader')
parser.add_argument('--shuffle', type=bool, default=True,help='Whether shuffle dataloader')
parser.add_argument('--ctx_prediction', action='store_true', help='Whether do context prediction')
parser.add_argument('--neg_matching', action='store_true', help='Whether do negative matching')
parser.add_argument('--neg_matchingv2', action='store_true', help='Whether do negative matching version2')
parser.add_argument('--test_rank', default=10, type=int, help='Step1. Contrastive -> rank -> Step2. Matching')
parser.add_argument('--re_ranking', action='store_true', help='Whether do re ranking for matching')
args = parser.parse_args()
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir, exist_ok=True)
if not os.path.exists(args.cache_dir):
os.makedirs(args.cache_dir, exist_ok=True)
if not os.path.exists(args.save_checkpoint):
os.makedirs(args.save_checkpoint, exist_ok=True)
if args.pretrain == "ALBEF":
args.config = "configs/ALBEF.yaml"
elif args.pretrain == "ViLT":
args.config = "configs/ViLT.yaml"
elif args.pretrain == "MDETR":
args.config = "configs/MDETR.yaml"
elif args.pretrain == "METER":
args.config = "configs/METER.yaml"
with open(args.config) as f:
config = yaml.safe_load(f)
config = utils.AttrDict(config)
print(args)
print(config)
main(args,config) | 5,871 | 42.496296 | 150 | py |
Mr.Right | Mr.Right-main/compute_pickle.py | import pickle
import os
from argparse import ArgumentParser
from metric import score
def main(args):
print("Load pickle...")
path = args.pickle_intput
multi_doc_path = os.path.join(path,'multimodal_documents.pickle')
img_embd_path = os.path.join(path,'img_query.pickle')
txt_embd_path = os.path.join(path,'txt_query.pickle')
mr_embd_path = os.path.join(path,'multi_query.pickle')
true_label_path = os.path.join(path,'labels.pickle')
with open(multi_doc_path, 'rb') as handle:
all_docs = pickle.load(handle)
with open(img_embd_path, 'rb') as handle:
all_img_queries = pickle.load(handle)
with open(txt_embd_path, 'rb') as handle:
all_txt_queries = pickle.load(handle)
with open(mr_embd_path, 'rb') as handle:
all_multi_queries = pickle.load(handle)
with open(true_label_path, 'rb') as handle:
all_queries_doc_ids = pickle.load(handle)
print(f"Document size: {all_docs.shape}")
print(f"Text-related query size: {all_txt_queries.shape}")
print(f"Image-related query size: {all_img_queries.shape}")
print(f"Mixed query size: {all_multi_queries.shape}")
img_sims_matrix = all_img_queries @ all_docs.t()
img_output_score = score(img_sims_matrix,all_queries_doc_ids)
print(f"IR score:{img_output_score}")
del img_sims_matrix
txt_sims_matrix = all_txt_queries @ all_docs.t()
txt_output_score = score(txt_sims_matrix,all_queries_doc_ids)
print(f"TR score:{txt_output_score}")
del txt_sims_matrix
multi_sims_matrix = all_multi_queries @ all_docs.t()
multi_output_score = score(multi_sims_matrix,all_queries_doc_ids)
print(f"MR score:{multi_output_score}")
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--pickle_intput', default='pickle-albef/')
args = parser.parse_args()
print(args)
main(args) | 1,894 | 34.754717 | 69 | py |
Mr.Right | Mr.Right-main/utils.py | import json
def prepare_pretrain_data(files):
print("\nReading json files")
image_text_pairs = []
for f in files:
print(f"File: {f}",end="\r")
image_text_pairs += json.load(open(f,'r'))
return image_text_pairs
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self | 367 | 25.285714 | 55 | py |
Mr.Right | Mr.Right-main/pltrainer.py | import pdb
import utils
import json
import pickle
import torch
import os
import torch.nn.functional as F
import torch.distributed as dist
import pytorch_lightning as pl
from metric import score
from scheduler import CosineLRScheduler
from tqdm import tqdm
class TextToMultiTrainer(pl.LightningModule):
def __init__(self,
args,
config,
model,
tokenizer,
):
super().__init__()
self.args = args
self.config = config
self.arg_opt = utils.AttrDict(config['optimizer'])
self.arg_sche = utils.AttrDict(config['schedular'])
self.model = model
self.tokenizer = tokenizer
self.automatic_optimization = False # pytorch lightning turn off Optimize
self.step_size = 100
self.warmup_iterations = self.arg_sche.warmup_epochs*self.step_size
self.save_hyperparameters()
def training_step(self, train_batch, idx):
opt = self.optimizers()
opt.zero_grad()
query = train_batch['query_str_tensor']
doc_text = train_batch['doc_str_tensor']
doc_image = train_batch['doc_image_tensor']
doc_id = train_batch['doc_id']
context_labels = train_batch.get('context_labels', None)
loss_ita, loss_ctx_labels, loss_itm = self.model.forward(
query, doc_text, doc_image, doc_id, context_labels, self.args.neg_matching, self.args.neg_matchingv2)
loss = loss_ita + loss_ctx_labels + loss_itm
self.manual_backward(loss)
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.gradient_clip_value)
opt.step()
self.log("loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("loss_ita", loss_ita, on_step=True, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
if self.args.ctx_prediction:
self.log("loss_ctx", loss_ctx_labels, on_step=True, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
if self.args.neg_matching:
self.log("loss_itm", loss_itm, on_step=True, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
sch = self.lr_schedulers()
# step every `n` epochs
if self.current_epoch==0 and self.global_step%self.step_size==0 and self.global_step<=self.warmup_iterations:
sch.step(self.global_step//self.step_size)
if self.trainer.is_last_batch:
sch.step(self.current_epoch+self.arg_sche.warmup_epochs+1)
return loss
def validation_step(self, val_batch, idx, dataloader_idx=0):
if dataloader_idx == 0: # query
img_query = val_batch['img_query_str_tensor']
txt_query = val_batch['txt_query_str_tensor']
multi_query = val_batch['multi_query_str_tensor']
doc_id = val_batch['doc_id']
img_query['input_ids'] = img_query['input_ids'].view(img_query['input_ids'].shape[0],-1)
img_query['attention_mask'] = img_query['attention_mask'].view(img_query['input_ids'].shape[0],-1)
if "token_type_ids" in img_query.keys():
img_query["token_type_ids"] = img_query["token_type_ids"].view(img_query['input_ids'].shape[0],-1)
txt_query['input_ids'] = txt_query['input_ids'].view(txt_query['input_ids'].shape[0],-1)
txt_query['attention_mask'] = txt_query['attention_mask'].view(txt_query['input_ids'].shape[0],-1)
if "token_type_ids" in txt_query.keys():
txt_query["token_type_ids"] = txt_query["token_type_ids"].view(txt_query['input_ids'].shape[0],-1)
multi_query['input_ids'] = multi_query['input_ids'].view(multi_query['input_ids'].shape[0],-1)
multi_query['attention_mask'] = multi_query['attention_mask'].view(multi_query['input_ids'].shape[0],-1)
if "token_type_ids" in multi_query.keys():
multi_query["token_type_ids"] = multi_query["token_type_ids"].view(multi_query['input_ids'].shape[0],-1)
img_query_embeds, img_query_feats = self.model.output_query_feats(img_query)
txt_query_embeds, txt_query_feats = self.model.output_query_feats(txt_query)
multi_query_embeds, multi_query_feats = self.model.output_query_feats(multi_query)
result = {
"img_query_text": val_batch['img_query_str'],
"img_query_feats": img_query_feats,
"txt_query_text": val_batch['txt_query_str'],
"txt_query_feats": txt_query_feats,
"multi_query_text": val_batch['multi_query_str'],
"multi_query_feats": multi_query_feats,
"docs_id": doc_id,
}
if self.args.re_ranking:
result["img_query_embeds"] = img_query_embeds
result["img_query_att"] = img_query['attention_mask']
result["txt_query_embeds"] = txt_query_embeds
result["txt_query_att"] = txt_query['attention_mask']
result["multi_query_embeds"] = multi_query_embeds
result["multi_query_att"] = multi_query['attention_mask']
elif dataloader_idx == 1: # document
doc_text = val_batch['doc_str_tensor']
doc_id = val_batch['doc_id']
doc_image = val_batch['doc_image_tensor']
doc_text['input_ids'] = doc_text['input_ids'].view(doc_text['input_ids'].shape[0],-1)
doc_text['attention_mask'] = doc_text['attention_mask'].view(doc_text['input_ids'].shape[0],-1)
if "token_type_ids" in doc_text.keys():
doc_text["token_type_ids"] = doc_text["token_type_ids"].view(doc_text['input_ids'].shape[0],-1)
docs_embeds,docs_feats,doc_masks = self.model.output_doc_feats(doc_text,doc_image)
result = {
"docs_text": val_batch['doc_str'],
"docs_image": val_batch['image_path'],
"docs_feats": docs_feats,
"docs_id": doc_id
}
# ===== Context Prediction =====
if self.args.ctx_prediction:
prediction_scores = self.model.context(docs_embeds)
mean_prediction_scores = torch.mean(prediction_scores,1)
top_context = torch.topk(mean_prediction_scores,20,dim=1)
result["top_context"] = top_context
# ===== Re-rank =====
if self.args.re_ranking:
result["doc_embeds"] = docs_embeds
result["doc_att"] = doc_masks
return result
def validation_epoch_end(self, validation_step_outputs):
queries = validation_step_outputs[0]
all_queries_doc_ids = torch.stack([feat for output in queries for feat in output["docs_id"]])
all_img_queries = torch.stack([feat for output in queries for feat in output["img_query_feats"]])
all_img_queries_text = [str(feat) for output in queries for feat in output["img_query_text"]]
all_txt_queries = torch.stack([feat for output in queries for feat in output["txt_query_feats"]])
all_txt_queries_text = [str(feat) for output in queries for feat in output["txt_query_text"]]
all_multi_queries = torch.stack([feat for output in queries for feat in output["multi_query_feats"]])
all_multi_queries_text = [str(feat) for output in queries for feat in output["multi_query_text"]]
docs = validation_step_outputs[1]
all_docs_ids = torch.stack([feat for output in docs for feat in output["docs_id"]])
all_docs = torch.stack([feat for output in docs for feat in output["docs_feats"]])
all_docs_captions = [str(feat) for output in docs for feat in output["docs_text"]]
all_docs_images = [str(feat) for output in docs for feat in output["docs_image"]]
if self.local_rank != None:
# id
all_queries_doc_ids_list = [torch.zeros_like(all_queries_doc_ids) for _ in range(dist.get_world_size())]
dist.all_gather(all_queries_doc_ids_list, all_queries_doc_ids)
all_queries_doc_ids = torch.cat(all_queries_doc_ids_list, dim=0)
temp_all_queries_doc_ids = all_queries_doc_ids
all_queries_doc_ids, rm_repeat_indices = self.unique(all_queries_doc_ids)
# image_query
all_img_queries_list = [torch.zeros_like(all_img_queries) for _ in range(dist.get_world_size())]
dist.all_gather(all_img_queries_list, all_img_queries)
all_img_queries = torch.cat(all_img_queries_list, dim=0)
all_img_queries = all_img_queries[rm_repeat_indices]
all_img_queries_text_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(all_img_queries_text_list, all_img_queries_text)
all_img_queries_text = [text for queries_text in all_img_queries_text_list for text in queries_text]
all_img_queries_text = [all_img_queries_text[i] for i in rm_repeat_indices.tolist()]
# text_query
all_txt_queries_list = [torch.zeros_like(all_txt_queries) for _ in range(dist.get_world_size())]
dist.all_gather(all_txt_queries_list, all_txt_queries)
all_txt_queries = torch.cat(all_txt_queries_list, dim=0)
all_txt_queries = all_txt_queries[rm_repeat_indices]
all_txt_queries_text_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(all_txt_queries_text_list, all_txt_queries_text)
all_txt_queries_text = [text for queries_text in all_txt_queries_text_list for text in queries_text]
all_txt_queries_text = [all_txt_queries_text[i] for i in rm_repeat_indices.tolist()]
# multi_query
all_multi_queries_list = [torch.zeros_like(all_multi_queries) for _ in range(dist.get_world_size())]
dist.all_gather(all_multi_queries_list, all_multi_queries)
all_multi_queries = torch.cat(all_multi_queries_list, dim=0)
all_multi_queries = all_multi_queries[rm_repeat_indices]
all_multi_queries_text_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(all_multi_queries_text_list, all_multi_queries_text)
all_multi_queries_text = [text for queries_text in all_multi_queries_text_list for text in queries_text]
all_multi_queries_text = [all_multi_queries_text[i] for i in rm_repeat_indices.tolist()]
# multimodal_doc
all_doc_ids_list = [torch.zeros_like(all_docs_ids) for _ in range(dist.get_world_size())]
dist.all_gather(all_doc_ids_list, all_docs_ids)
all_docs_ids = torch.cat(all_doc_ids_list, dim=0)
all_docs_ids, rm_repeat_doc_indices = self.unique(all_docs_ids)
all_docs_list = [torch.zeros_like(all_docs) for _ in range(dist.get_world_size())]
dist.all_gather(all_docs_list, all_docs)
all_docs = torch.cat(all_docs_list, dim=0)
all_docs = all_docs[rm_repeat_doc_indices]
all_docs_captions_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(all_docs_captions_list, all_docs_captions)
all_docs_captions = [caption for docs_captions in all_docs_captions_list for caption in docs_captions]
all_docs_captions = [all_docs_captions[i] for i in rm_repeat_doc_indices.tolist()]
all_docs_image_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(all_docs_image_list, all_docs_images)
all_docs_images = [image for docs_images in all_docs_image_list for image in docs_images]
all_docs_images = [all_docs_images[i] for i in rm_repeat_doc_indices.tolist()]
if self.args.re_ranking:
all_docs_embeds = torch.stack([feat for output in docs for feat in output["doc_embeds"]])
all_docs_masks = torch.stack([feat for output in docs for feat in output["doc_att"]])
all_docs_embeds_list = [torch.zeros_like(all_docs_embeds) for _ in range(dist.get_world_size())]
dist.all_gather(all_docs_embeds_list, all_docs_embeds)
all_docs_embeds = torch.cat(all_docs_embeds_list, dim=0)
all_docs_embeds = all_docs_embeds[all_docs_ids]
all_docs_masks_list = [torch.zeros_like(all_docs_masks) for _ in range(dist.get_world_size())]
dist.all_gather(all_docs_masks_list, all_docs_masks)
all_docs_masks = torch.cat(all_docs_masks_list, dim=0)
all_docs_masks = all_docs_masks[all_docs_ids]
img_sims_matrix = all_img_queries @ all_docs.t()
txt_sims_matrix = all_txt_queries @ all_docs.t()
multi_sims_matrix = all_multi_queries @ all_docs.t()
matrix_list = [img_sims_matrix,txt_sims_matrix,multi_sims_matrix]
labels = F.one_hot(all_queries_doc_ids, len(all_docs)).to(self.device)
if self.args.re_ranking:
all_img_queries_embeds = torch.stack([feat for output in queries for feat in output["img_query_embeds"]])
all_img_queries_masks = torch.stack([feat for output in queries for feat in output["img_query_att"]])
all_txt_queries_embeds = torch.stack([feat for output in queries for feat in output["txt_query_embeds"]])
all_txt_queries_masks = torch.stack([feat for output in queries for feat in output["txt_query_att"]])
all_multi_queries_embeds = torch.stack([feat for output in queries for feat in output["multi_query_embeds"]])
all_multi_queries_masks = torch.stack([feat for output in queries for feat in output["multi_query_att"]])
score_matrix_i2m = torch.full((len(all_img_queries),len(all_docs)),-100.0).to(self.device)
score_matrix_t2m = torch.full((len(all_txt_queries),len(all_docs)),-100.0).to(self.device)
score_matrix_m2m = torch.full((len(all_multi_queries),len(all_docs)),-100.0).to(self.device)
for type in range(3):
temp_matrix = matrix_list[type]
for i,sims in enumerate(tqdm(temp_matrix)):
topk_sim, topk_idx = sims.topk(k=self.args.test_rank, dim=0)
if type == 0:
queries = all_img_queries_embeds[i].unsqueeze(0).repeat(self.args.test_rank,1,1)
queries_mask = all_img_queries_masks[i].unsqueeze(0).repeat(self.args.test_rank,1)
elif type == 1:
queries = all_txt_queries_embeds[i].unsqueeze(0).repeat(self.args.test_rank,1,1)
queries_mask = all_txt_queries_masks[i].unsqueeze(0).repeat(self.args.test_rank,1)
else:
queries = all_multi_queries_embeds[i].unsqueeze(0).repeat(self.args.test_rank,1,1)
queries_mask = all_multi_queries_masks[i].unsqueeze(0).repeat(self.args.test_rank,1)
docs = all_docs_embeds[topk_idx]
docs_masks = all_docs_masks[topk_idx]
itm_logits = self.model.matching_classifier(
query_embeds=queries,
query_attns=queries_mask,
multi_embeds=docs,
multi_attns=docs_masks
)
if type == 0:
score_matrix_i2m[i,topk_idx] = itm_logits[:,1]
elif type == 1:
score_matrix_t2m[i,topk_idx] = itm_logits[:,1]
else:
score_matrix_m2m[i,topk_idx] = itm_logits[:,1]
if type == 0:
img_sims_matrix = score_matrix_i2m
img_sims_matrix = img_sims_matrix.cpu()
elif type == 1:
txt_sims_matrix = score_matrix_t2m
txt_sims_matrix = txt_sims_matrix.cpu()
else:
multi_sims_matrix = score_matrix_m2m
multi_sims_matrix = multi_sims_matrix.cpu()
matrix_list = [img_sims_matrix,txt_sims_matrix,multi_sims_matrix]
loss_i2m = -torch.sum(F.log_softmax(img_sims_matrix / self.model.temp, dim=1)*labels,dim=1).mean()
loss_t2m = -torch.sum(F.log_softmax(txt_sims_matrix / self.model.temp, dim=1)*labels,dim=1).mean()
loss_m2m = -torch.sum(F.log_softmax(multi_sims_matrix / self.model.temp, dim=1)*labels,dim=1).mean()
img_output_score = score(img_sims_matrix,all_queries_doc_ids)
txt_output_score = score(txt_sims_matrix,all_queries_doc_ids)
multi_output_score = score(multi_sims_matrix,all_queries_doc_ids)
val_loss = (loss_i2m + loss_t2m + loss_m2m) / 3
if self.args.ctx_prediction:
loss_ctx_pred = torch.stack([output["loss_ctx_pred"] for output in validation_step_outputs]).mean()
val_loss = (val_loss+loss_ctx_pred) / 2
self.log("val_loss_t2m", loss_t2m, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("val_loss_ctx_pred", loss_ctx_pred, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("val_loss", val_loss, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("img_r1", img_output_score['r1'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("img_r5", img_output_score['r5'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("img_r10", img_output_score['r10'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("img_r_mean", img_output_score['r_mean'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("img_mrr10", img_output_score['mrr10'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("txt_r1", txt_output_score['r1'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("txt_r5", txt_output_score['r5'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("txt_r10", txt_output_score['r10'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("txt_r_mean", txt_output_score['r_mean'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("txt_mrr10", txt_output_score['mrr10'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("multi_r1", multi_output_score['r1'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("multi_r5", multi_output_score['r5'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("multi_r10", multi_output_score['r10'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("multi_r_mean", multi_output_score['r_mean'], on_epoch=True, prog_bar=False, logger=True, sync_dist=True)
self.log("multi_mrr10", multi_output_score['mrr10'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
return {
"val_loss":val_loss,
"loss_t2m":loss_t2m,
"loss_i2m":loss_i2m,
"loss_m2m":loss_m2m,
"img_r1":img_output_score['r1'],
"img_r5":img_output_score['r5'],
"img_r10":img_output_score['r10'],
"img_r_mean":img_output_score['r_mean'],
"img_mrr10":img_output_score['mrr10'],
"txt_r1":txt_output_score['r1'],
"txt_r5":txt_output_score['r5'],
"txt_r10":txt_output_score['r10'],
"txt_r_mean":txt_output_score['r_mean'],
"txt_mrr10":txt_output_score['mrr10'],
"multi_r1":multi_output_score['r1'],
"multi_r5":multi_output_score['r5'],
"multi_r10":multi_output_score['r10'],
"multi_r_mean":multi_output_score['r_mean'],
"multi_mrr10":multi_output_score['mrr10']
}
def test_step(self, test_batch, idx, dataloader_idx=0):
if dataloader_idx == 0: # query
img_query = test_batch['img_query_str_tensor']
txt_query = test_batch['txt_query_str_tensor']
multi_query = test_batch['multi_query_str_tensor']
doc_id = test_batch['doc_id']
img_query['input_ids'] = img_query['input_ids'].view(img_query['input_ids'].shape[0],-1)
img_query['attention_mask'] = img_query['attention_mask'].view(img_query['input_ids'].shape[0],-1)
if "token_type_ids" in img_query.keys():
img_query["token_type_ids"] = img_query["token_type_ids"].view(img_query['input_ids'].shape[0],-1)
txt_query['input_ids'] = txt_query['input_ids'].view(txt_query['input_ids'].shape[0],-1)
txt_query['attention_mask'] = txt_query['attention_mask'].view(txt_query['input_ids'].shape[0],-1)
if "token_type_ids" in txt_query.keys():
txt_query["token_type_ids"] = txt_query["token_type_ids"].view(txt_query['input_ids'].shape[0],-1)
multi_query['input_ids'] = multi_query['input_ids'].view(multi_query['input_ids'].shape[0],-1)
multi_query['attention_mask'] = multi_query['attention_mask'].view(multi_query['input_ids'].shape[0],-1)
if "token_type_ids" in multi_query.keys():
multi_query["token_type_ids"] = multi_query["token_type_ids"].view(multi_query['input_ids'].shape[0],-1)
img_query_embeds, img_query_feats = self.model.output_query_feats(img_query)
txt_query_embeds, txt_query_feats = self.model.output_query_feats(txt_query)
multi_query_embeds, multi_query_feats = self.model.output_query_feats(multi_query)
result = {
"img_query_text": test_batch['img_query_str'],
"img_query_feats": img_query_feats,
"txt_query_text": test_batch['txt_query_str'],
"txt_query_feats": txt_query_feats,
"multi_query_text": test_batch['multi_query_str'],
"multi_query_feats": multi_query_feats,
"docs_id": doc_id,
}
if self.args.re_ranking:
result["img_query_embeds"] = img_query_embeds
result["img_query_att"] = img_query['attention_mask']
result["txt_query_embeds"] = txt_query_embeds
result["txt_query_att"] = txt_query['attention_mask']
result["multi_query_embeds"] = multi_query_embeds
result["multi_query_att"] = multi_query['attention_mask']
elif dataloader_idx == 1: # document
doc_text = test_batch['doc_str_tensor']
doc_id = test_batch['doc_id']
doc_image = test_batch['doc_image_tensor']
doc_text['input_ids'] = doc_text['input_ids'].view(doc_text['input_ids'].shape[0],-1)
doc_text['attention_mask'] = doc_text['attention_mask'].view(doc_text['input_ids'].shape[0],-1)
if "token_type_ids" in doc_text.keys():
doc_text["token_type_ids"] = doc_text["token_type_ids"].view(doc_text['input_ids'].shape[0],-1)
docs_embeds,docs_feats,doc_masks = self.model.output_doc_feats(doc_text,doc_image)
result = {
"docs_text": test_batch['doc_str'],
"docs_image": test_batch['image_path'],
"docs_feats": docs_feats,
"docs_id": doc_id
}
# ===== Context Prediction =====
if self.args.ctx_prediction:
prediction_scores = self.model.context(docs_embeds)
mean_prediction_scores = torch.mean(prediction_scores,1)
top_context = torch.topk(mean_prediction_scores,20,dim=1)
result["top_context"] = top_context
# ===== Re-rank =====
if self.args.re_ranking:
result["doc_embeds"] = docs_embeds
result["doc_att"] = doc_masks
return result
def test_epoch_end(self, test_step_outputs):
queries = test_step_outputs[0]
all_queries_doc_ids = torch.stack([feat for output in queries for feat in output["docs_id"]])
all_img_queries = torch.stack([feat for output in queries for feat in output["img_query_feats"]])
all_img_queries_text = [str(feat) for output in queries for feat in output["img_query_text"]]
all_txt_queries = torch.stack([feat for output in queries for feat in output["txt_query_feats"]])
all_txt_queries_text = [str(feat) for output in queries for feat in output["txt_query_text"]]
all_multi_queries = torch.stack([feat for output in queries for feat in output["multi_query_feats"]])
all_multi_queries_text = [str(feat) for output in queries for feat in output["multi_query_text"]]
docs = test_step_outputs[1]
all_docs_ids = torch.stack([feat for output in docs for feat in output["docs_id"]])
all_docs = torch.stack([feat for output in docs for feat in output["docs_feats"]])
all_docs_captions = [str(feat) for output in docs for feat in output["docs_text"]]
all_docs_images = [str(feat) for output in docs for feat in output["docs_image"]]
if self.local_rank != None:
# id
all_queries_doc_ids_list = [torch.zeros_like(all_queries_doc_ids) for _ in range(dist.get_world_size())]
dist.all_gather(all_queries_doc_ids_list, all_queries_doc_ids)
all_queries_doc_ids = torch.cat(all_queries_doc_ids_list, dim=0)
temp_all_queries_doc_ids = all_queries_doc_ids
all_queries_doc_ids, rm_repeat_indices = self.unique(all_queries_doc_ids)
# image_query
all_img_queries_list = [torch.zeros_like(all_img_queries) for _ in range(dist.get_world_size())]
dist.all_gather(all_img_queries_list, all_img_queries)
all_img_queries = torch.cat(all_img_queries_list, dim=0)
all_img_queries = all_img_queries[rm_repeat_indices]
all_img_queries_text_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(all_img_queries_text_list, all_img_queries_text)
all_img_queries_text = [text for queries_text in all_img_queries_text_list for text in queries_text]
all_img_queries_text = [all_img_queries_text[i] for i in rm_repeat_indices.tolist()]
# text_query
all_txt_queries_list = [torch.zeros_like(all_txt_queries) for _ in range(dist.get_world_size())]
dist.all_gather(all_txt_queries_list, all_txt_queries)
all_txt_queries = torch.cat(all_txt_queries_list, dim=0)
all_txt_queries = all_txt_queries[rm_repeat_indices]
all_txt_queries_text_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(all_txt_queries_text_list, all_txt_queries_text)
all_txt_queries_text = [text for queries_text in all_txt_queries_text_list for text in queries_text]
all_txt_queries_text = [all_txt_queries_text[i] for i in rm_repeat_indices.tolist()]
# multi_query
all_multi_queries_list = [torch.zeros_like(all_multi_queries) for _ in range(dist.get_world_size())]
dist.all_gather(all_multi_queries_list, all_multi_queries)
all_multi_queries = torch.cat(all_multi_queries_list, dim=0)
all_multi_queries = all_multi_queries[rm_repeat_indices]
all_multi_queries_text_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(all_multi_queries_text_list, all_multi_queries_text)
all_multi_queries_text = [text for queries_text in all_multi_queries_text_list for text in queries_text]
all_multi_queries_text = [all_multi_queries_text[i] for i in rm_repeat_indices.tolist()]
# multimodal_doc
all_doc_ids_list = [torch.zeros_like(all_docs_ids) for _ in range(dist.get_world_size())]
dist.all_gather(all_doc_ids_list, all_docs_ids)
temp_all_queries_list = all_doc_ids_list
all_docs_ids = torch.cat(all_doc_ids_list, dim=0)
temp_all_queries_list_fix = all_docs_ids
all_docs_ids, rm_repeat_doc_indices = self.unique(all_docs_ids)
all_docs_list = [torch.zeros_like(all_docs) for _ in range(dist.get_world_size())]
dist.all_gather(all_docs_list, all_docs)
all_docs = torch.cat(all_docs_list, dim=0)
temp_all_docs = all_docs
all_docs = all_docs[rm_repeat_doc_indices]
all_docs_captions_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(all_docs_captions_list, all_docs_captions)
all_docs_captions = [caption for docs_captions in all_docs_captions_list for caption in docs_captions]
temp_all_docs_captions = all_docs_captions
all_docs_captions = [all_docs_captions[i] for i in rm_repeat_doc_indices.tolist()]
all_docs_image_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(all_docs_image_list, all_docs_images)
all_docs_images = [image for docs_images in all_docs_image_list for image in docs_images]
all_docs_images = [all_docs_images[i] for i in rm_repeat_doc_indices.tolist()]
if self.args.re_ranking:
all_docs_embeds = torch.stack([feat for output in docs for feat in output["doc_embeds"]])
all_docs_masks = torch.stack([feat for output in docs for feat in output["doc_att"]])
all_docs_embeds_list = [torch.zeros_like(all_docs_embeds) for _ in range(dist.get_world_size())]
dist.all_gather(all_docs_embeds_list, all_docs_embeds)
all_docs_embeds = torch.cat(all_docs_embeds_list, dim=0)
all_docs_embeds = all_docs_embeds[all_docs_ids]
all_docs_masks_list = [torch.zeros_like(all_docs_masks) for _ in range(dist.get_world_size())]
dist.all_gather(all_docs_masks_list, all_docs_masks)
all_docs_masks = torch.cat(all_docs_masks_list, dim=0)
all_docs_masks = all_docs_masks[all_docs_ids]
if self.local_rank == 0:
output_dir = self.args.pickle_output
doc_path = os.path.join(output_dir,"multimodal_documents.pickle")
img_path = os.path.join(output_dir,"img_query.pickle")
txt_path = os.path.join(output_dir,"txt_query.pickle")
multi_path = os.path.join(output_dir,"multi_query.pickle")
labels_path = os.path.join(output_dir,"labels.pickle")
with open(doc_path, 'wb') as handle:
pickle.dump(all_docs, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(img_path, 'wb') as handle:
pickle.dump(all_img_queries, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(txt_path, 'wb') as handle:
pickle.dump(all_txt_queries, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(multi_path, 'wb') as handle:
pickle.dump(all_multi_queries, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(labels_path, 'wb') as handle:
pickle.dump(all_queries_doc_ids, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("Finish saving pickle. Now you can compute the score.")
return {"img":True}
img_sims_matrix = all_img_queries @ all_docs.t()
txt_sims_matrix = all_txt_queries @ all_docs.t()
multi_sims_matrix = all_multi_queries @ all_docs.t()
matrix_list = [img_sims_matrix,txt_sims_matrix,multi_sims_matrix]
if self.args.re_ranking:
all_img_queries_embeds = torch.stack([feat for output in queries for feat in output["img_query_embeds"]])
all_img_queries_masks = torch.stack([feat for output in queries for feat in output["img_query_att"]])
all_txt_queries_embeds = torch.stack([feat for output in queries for feat in output["txt_query_embeds"]])
all_txt_queries_masks = torch.stack([feat for output in queries for feat in output["txt_query_att"]])
all_multi_queries_embeds = torch.stack([feat for output in queries for feat in output["multi_query_embeds"]])
all_multi_queries_masks = torch.stack([feat for output in queries for feat in output["multi_query_att"]])
score_matrix_i2m = torch.full((len(all_img_queries),len(all_docs)),-100.0).to(self.device)
score_matrix_t2m = torch.full((len(all_txt_queries),len(all_docs)),-100.0).to(self.device)
score_matrix_m2m = torch.full((len(all_multi_queries),len(all_docs)),-100.0).to(self.device)
for type in range(3):
temp_matrix = matrix_list[type]
for i,sims in enumerate(tqdm(temp_matrix)):
topk_sim, topk_idx = sims.topk(k=self.args.test_rank, dim=0)
if type == 0:
queries = all_img_queries_embeds[i].unsqueeze(0).repeat(self.args.test_rank,1,1)
queries_mask = all_img_queries_masks[i].unsqueeze(0).repeat(self.args.test_rank,1)
elif type == 1:
queries = all_txt_queries_embeds[i].unsqueeze(0).repeat(self.args.test_rank,1,1)
queries_mask = all_txt_queries_masks[i].unsqueeze(0).repeat(self.args.test_rank,1)
else:
queries = all_multi_queries_embeds[i].unsqueeze(0).repeat(self.args.test_rank,1,1)
queries_mask = all_multi_queries_masks[i].unsqueeze(0).repeat(self.args.test_rank,1)
docs = all_docs_embeds[topk_idx]
docs_masks = all_docs_masks[topk_idx]
itm_logits = self.model.matching_classifier(
query_embeds=queries,
query_attns=queries_mask,
multi_embeds=docs,
multi_attns=docs_masks
)
if type == 0:
score_matrix_i2m[i,topk_idx] = itm_logits[:,1]
elif type == 1:
score_matrix_t2m[i,topk_idx] = itm_logits[:,1]
else:
score_matrix_m2m[i,topk_idx] = itm_logits[:,1]
if type == 0:
img_sims_matrix = score_matrix_i2m
img_sims_matrix = img_sims_matrix.cpu()
elif type == 1:
txt_sims_matrix = score_matrix_t2m
txt_sims_matrix = txt_sims_matrix.cpu()
else:
multi_sims_matrix = score_matrix_m2m
multi_sims_matrix = multi_sims_matrix.cpu()
matrix_list = [img_sims_matrix,txt_sims_matrix,multi_sims_matrix]
# img_output_score = score(img_sims_matrix,labels)
# txt_output_score = score(txt_sims_matrix,labels)
# multi_output_score = score(multi_sims_matrix,labels)
img_output_score = score_v2(img_sims_matrix,all_queries_doc_ids)
txt_output_score = score_v2(txt_sims_matrix,all_queries_doc_ids)
multi_output_score = score_v2(multi_sims_matrix,all_queries_doc_ids)
# output context
if self.args.ctx_prediction:
all_topk_context_values = torch.stack([feat for output in docs for feat in output["top_context"].values])
all_topk_context_indices = torch.stack([feat for output in docs for feat in output["top_context"].indices])
output_data = {"img_score":img_output_score,"txt_score":txt_output_score,"multi_score":multi_output_score,"result":[]}
# output result
for index,doc_scores in enumerate(img_sims_matrix):
temp_dict = {"img":{},"txt":{},"multi":{}}
for type in range(3):
if type == 0:
inds = torch.argsort(img_sims_matrix[index], descending=True)[:10].tolist()
temp_all_queries_text = all_img_queries_text
elif type == 1:
inds = torch.argsort(txt_sims_matrix[index], descending=True)[:10].tolist()
temp_all_queries_text = all_txt_queries_text
else:
inds = torch.argsort(multi_sims_matrix[index], descending=True)[:10].tolist()
temp_all_queries_text = all_multi_queries_text
temp = {"id": all_queries_doc_ids[index].item(),
"query":temp_all_queries_text[index],
"true_doc": all_docs_captions[all_queries_doc_ids[index]],
"true_image":all_docs_images[all_queries_doc_ids[index]],
}
rank = 1e20
true_id = all_queries_doc_ids[index]
if inds[0] == true_id:
temp["correct"] = True
else:
temp["correct"] = False
for rank,top_id in enumerate(inds):
dic_key_cap = str(rank)+"_doc"
dic_key_img = str(rank)+"_img"
temp[dic_key_cap] = all_docs_captions[top_id]
temp[dic_key_img] = all_docs_images[top_id]
if self.args.ctx_prediction:
context = self.tokenizer.decode(all_topk_context_indices[top_id])
dic_key_ctx = str(rank)+"_ctx"
temp[dic_key_ctx] = context
if type == 0:
temp_dict["img"] = temp
elif type == 1:
temp_dict["txt"] = temp
else:
temp_dict["multi"] = temp
output_data["result"].append(temp_dict)
with open(self.args.test_output, "w") as outfile:
json.dump(output_data, outfile, indent = 4)
self.log("img_r1", img_output_score['r1'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("img_r5", img_output_score['r5'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("img_r10", img_output_score['r10'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("img_r_mean", img_output_score['r_mean'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("img_mrr10", img_output_score['mrr10'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("txt_r1", txt_output_score['r1'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("txt_r5", txt_output_score['r5'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("txt_r10", txt_output_score['r10'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("txt_r_mean", txt_output_score['r_mean'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("txt_mrr10", txt_output_score['mrr10'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("multi_r1", multi_output_score['r1'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("multi_r5", multi_output_score['r5'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("multi_r10", multi_output_score['r10'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("multi_r_mean", multi_output_score['r_mean'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
self.log("multi_mrr10", multi_output_score['mrr10'], on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
return {
"img_r1":img_output_score['r1'],
"img_r5":img_output_score['r5'],
"img_r10":img_output_score['r10'],
"img_r_mean":img_output_score['r_mean'],
"img_mrr10":img_output_score['mrr10'],
"txt_r1":txt_output_score['r1'],
"txt_r5":txt_output_score['r5'],
"txt_r10":txt_output_score['r10'],
"txt_r_mean":txt_output_score['r_mean'],
"txt_mrr10":txt_output_score['mrr10'],
"multi_r1":multi_output_score['r1'],
"multi_r5":multi_output_score['r5'],
"multi_r10":multi_output_score['r10'],
"multi_r_mean":multi_output_score['r_mean'],
"multi_mrr10":multi_output_score['mrr10']
}
def configure_optimizers(self):
# optimizer
opt_args = dict(lr=float(self.arg_opt.lr), weight_decay=float(self.arg_opt.weight_decay))
if hasattr(self.arg_opt, 'opt_eps') and self.arg_opt.opt_eps is not None:
opt_args['eps'] = self.args.opt_eps
if hasattr(self.arg_opt, 'opt_betas') and self.arg_opt.opt_betas is not None:
opt_args['betas'] = self.args.opt_betas
if hasattr(self.arg_opt, 'opt_args') and self.arg_opt.opt_args is not None:
opt_args.update(self.args.opt_args)
if self.arg_opt.opt == "adamW":
optimizer = torch.optim.AdamW(self.model.parameters(),**opt_args)
# scheduler
lr_scheduler = None
if self.arg_sche.sched == 'cosine':
lr_scheduler = CosineLRScheduler(
optimizer,
t_initial=self.trainer.max_epochs,
t_mul=getattr(self.arg_sche, 'lr_cycle_mul', 1.),
lr_min=float(self.arg_sche.min_lr),
decay_rate=self.arg_sche.decay_rate,
warmup_lr_init=float(self.arg_sche.warmup_lr),
warmup_t=self.arg_sche.warmup_epochs,
cycle_limit=getattr(self.arg_sche, 'lr_cycle_limit', 1),
)
# num_epochs = lr_scheduler.get_cycle_length() + self.arg_sche.cooldown_epochs
# self.trainer.max_epochs = num_epochs
return {'optimizer': optimizer, 'lr_scheduler': lr_scheduler}
# https://github.com/pytorch/pytorch/issues/36748
def unique(self,x, dim=-1):
unique, inverse = torch.unique(x, return_inverse=True, dim=dim)
perm = torch.arange(inverse.size(dim), dtype=inverse.dtype, device=inverse.device)
inverse, perm = inverse.flip([dim]), perm.flip([dim])
return unique, inverse.new_empty(unique.size(dim)).scatter_(dim, inverse, perm) | 43,280 | 57.095302 | 126 | py |
Mr.Right | Mr.Right-main/metric.py | import numpy as np
import torch
import pdb
from torchmetrics.functional import retrieval_recall,retrieval_reciprocal_rank
@torch.no_grad()
def score(scores_t2m, query_doc_id):
"""
scores_t2m: (q_size, d_size)
query_doc_id: (q_size)
"""
ids = query_doc_id.unsqueeze(1)
top1_i = torch.topk(scores_t2m, k=1, dim=1).indices
top5_i = torch.topk(scores_t2m, k=5, dim=1).indices
top10_v, top10_i = torch.topk(scores_t2m, k=10, dim=1)
r1 = torch.mean(torch.sum(top1_i == ids, dim=1).float()).item() * 100
r5 = torch.mean(torch.sum(top5_i == ids, dim=1).float()).item() * 100
r10 = torch.mean(torch.sum(top10_i == ids, dim=1).float()).item() * 100
rmean = np.mean([r1, r5, r10])
top10_m = (top10_i==ids)
mrr10 = np.mean([retrieval_reciprocal_rank(v, m).item() for v, m in zip(top10_v, top10_m)]) * 100
r1, r5, r10, mrr10, rmean
eval_result = {'r1': r1,
'r5': r5,
'r10': r10,
'mrr10': mrr10,
'r_mean': rmean,
}
return eval_result | 1,097 | 30.371429 | 103 | py |
Mr.Right | Mr.Right-main/scheduler/cosine_lr.py | """ Cosine Scheduler
Cosine LR schedule with warmup, cycle/restarts, noise.
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
import math
import numpy as np
import torch
from .scheduler import Scheduler
from pdb import set_trace as breakpoint
_logger = logging.getLogger(__name__)
class CosineLRScheduler(Scheduler):
"""
Cosine decay with restarts.
This is described in the paper https://arxiv.org/abs/1608.03983.
Inspiration from
https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
t_initial: int,
t_mul: float = 1.,
lr_min: float = 0.,
decay_rate: float = 1.,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=True,
cycle_limit=0,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True) -> None:
super().__init__(
optimizer, param_group_field="lr",
noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed,
initialize=initialize)
assert t_initial > 0
assert lr_min >= 0
if t_initial == 1 and t_mul == 1 and decay_rate == 1:
_logger.warning("Cosine annealing scheduler will have no effect on the learning "
"rate since t_initial = t_mul = eta_mul = 1.")
self.t_initial = t_initial
self.t_mul = t_mul
self.lr_min = lr_min
self.decay_rate = decay_rate
self.cycle_limit = cycle_limit
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
self.t_in_epochs = t_in_epochs
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
if self.t_mul != 1:
i = math.floor(math.log(1 - t / self.t_initial * (1 - self.t_mul), self.t_mul))
t_i = self.t_mul ** i * self.t_initial
t_curr = t - (1 - self.t_mul ** i) / (1 - self.t_mul) * self.t_initial
else:
i = t // self.t_initial
t_i = self.t_initial
t_curr = t - (self.t_initial * i)
gamma = self.decay_rate ** i
lr_min = self.lr_min * gamma
lr_max_values = [v * gamma for v in self.base_values]
if self.cycle_limit == 0 or (self.cycle_limit > 0 and i < self.cycle_limit):
lrs = [
lr_min + 0.5 * (lr_max - lr_min) * (1 + math.cos(math.pi * t_curr / t_i)) for lr_max in lr_max_values
]
else:
lrs = [self.lr_min for _ in self.base_values]
return lrs
def get_epoch_values(self, epoch: int):
if self.t_in_epochs:
return self._get_lr(epoch)
else:
return None
def get_update_values(self, num_updates: int):
if not self.t_in_epochs:
return self._get_lr(num_updates)
else:
return None
def get_cycle_length(self, cycles=0):
if not cycles:
cycles = self.cycle_limit
cycles = max(1, cycles)
if self.t_mul == 1.0:
return self.t_initial * cycles
else:
return int(math.floor(-self.t_initial * (self.t_mul ** cycles - 1) / (1 - self.t_mul)))
| 4,027 | 33.135593 | 121 | py |
Mr.Right | Mr.Right-main/scheduler/scheduler.py | from typing import Dict, Any
import torch
class Scheduler:
""" Parameter Scheduler Base Class
A scheduler base class that can be used to schedule any optimizer parameter groups.
Unlike the builtin PyTorch schedulers, this is intended to be consistently called
* At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value
* At the END of each optimizer update, after incrementing the update count, to calculate next update's value
The schedulers built on this should try to remain as stateless as possible (for simplicity).
This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch'
and -1 values for special behaviour. All epoch and update counts must be tracked in the training
code and explicitly passed in to the schedulers on the corresponding step or step_update call.
Based on ideas from:
* https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler
* https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
param_group_field: str,
noise_range_t=None,
noise_type='normal',
noise_pct=0.67,
noise_std=1.0,
noise_seed=None,
initialize: bool = True) -> None:
self.optimizer = optimizer
self.param_group_field = param_group_field
self._initial_param_group_field = f"initial_{param_group_field}"
if initialize:
for i, group in enumerate(self.optimizer.param_groups):
if param_group_field not in group:
raise KeyError(f"{param_group_field} missing from param_groups[{i}]")
group.setdefault(self._initial_param_group_field, group[param_group_field])
else:
for i, group in enumerate(self.optimizer.param_groups):
if self._initial_param_group_field not in group:
raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]")
self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups]
self.metric = None # any point to having this for all?
self.noise_range_t = noise_range_t
self.noise_pct = noise_pct
self.noise_type = noise_type
self.noise_std = noise_std
self.noise_seed = noise_seed if noise_seed is not None else 42
self.update_groups(self.base_values)
def state_dict(self) -> Dict[str, Any]:
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
self.__dict__.update(state_dict)
def get_epoch_values(self, epoch: int):
return None
def get_update_values(self, num_updates: int):
return None
def step(self, epoch: int, metric: float = None) -> None:
self.metric = metric
values = self.get_epoch_values(epoch)
if values is not None:
values = self._add_noise(values, epoch)
self.update_groups(values)
def step_update(self, num_updates: int, metric: float = None):
self.metric = metric
values = self.get_update_values(num_updates)
if values is not None:
values = self._add_noise(values, num_updates)
self.update_groups(values)
def update_groups(self, values):
if not isinstance(values, (list, tuple)):
values = [values] * len(self.optimizer.param_groups)
for param_group, value in zip(self.optimizer.param_groups, values):
param_group[self.param_group_field] = value
def _add_noise(self, lrs, t):
if self.noise_range_t is not None:
if isinstance(self.noise_range_t, (list, tuple)):
apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1]
else:
apply_noise = t >= self.noise_range_t
if apply_noise:
g = torch.Generator()
g.manual_seed(self.noise_seed + t)
if self.noise_type == 'normal':
while True:
# resample if noise out of percent limit, brute force but shouldn't spin much
noise = torch.randn(1, generator=g).item()
if abs(noise) < self.noise_pct:
break
else:
noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct
lrs = [v + v * noise for v in lrs]
return lrs
| 4,750 | 43.820755 | 112 | py |
Mr.Right | Mr.Right-main/scheduler/__init__.py | from .cosine_lr import CosineLRScheduler
| 41 | 20 | 40 | py |
Mr.Right | Mr.Right-main/models/matching.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class MatchingModel(nn.Module):
def __init__(self, args, config, text_width, n_layers):
super().__init__()
self.config = config
from models.ALBEF.models.xbert import BertModel
self.config.num_hidden_layers = 4
self.config.fusion_layer = 0
self.itm_transformer = BertModel(self.config, add_pooling_layer=False)
self.itm_head = nn.Linear(text_width, 2)
def forward(self, query_embeds, query_attns, multi_embeds, multi_attns):
output = self.itm_transformer(
encoder_embeds = query_embeds,
attention_mask = query_attns,
encoder_hidden_states = multi_embeds,
encoder_attention_mask = multi_attns,
return_dict = True,
mode = 'fusion',
)
embeddings = output.last_hidden_state[:, 0, :]
logits = self.itm_head(embeddings)
return logits
if __name__ == "__main__":
from models.ALBEF.models.xbert import BertConfig
import yaml
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
with open('configs/model_retrieval.yaml') as f:
config = yaml.safe_load(f)
config = AttrDict(config)
bert_config = BertConfig.from_json_file(config['bert_config'])
m = MatchingModel(config=bert_config, text_width=bert_config.hidden_size, n_layers=1)
e1 = torch.rand(5, 10, 768)
e2 = torch.rand(5, 7, 768)
a1 = torch.ones(e1.size()[:-1],dtype=torch.long)
a2 = torch.ones(e2.size()[:-1],dtype=torch.long)
o = m(e1, a1, e2, a2)
print(o.shape)
| 1,817 | 29.3 | 89 | py |
Mr.Right | Mr.Right-main/models/model.py | import pdb
import torch
import torch.nn.functional as F
from torch import nn
from models.ALBEF.models.model_retrieval import ALBEF
from models.ALBEF.models.vit import interpolate_pos_embed
from models.ALBEF.models.xbert import BertOnlyMLMHead,BertConfig
from models.ViLT.vilt.modules import ViLTransformerSS
from models.METER.meter.modules import METERTransformerSS
from models.matching import MatchingModel
class TextToMultiModel(nn.Module):
def __init__(self,
args = None,
config = None,
tokenizer = None,
):
super().__init__()
self.args = args
self.tokenizer = tokenizer
embed_dim = config.embed_dim
# Choose pretrain
if args.pretrain == "ALBEF":
self.model = ALBEF(config.text_encoder,tokenizer,config)
text_width = self.model.text_encoder.config.hidden_size
bert_config = self.model.bert_config
if config.checkpoint!="":
checkpoint = torch.load(config.checkpoint, map_location='cpu')
state_dict = checkpoint['model']
# reshape positional embedding to accomodate for image resolution change
pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],self.model.visual_encoder)
state_dict['visual_encoder.pos_embed'] = pos_embed_reshaped
for key in list(state_dict.keys()):
if 'bert' in key:
encoder_key = key.replace('bert.','')
state_dict[encoder_key] = state_dict[key]
del state_dict[key]
msg = self.model.load_state_dict(state_dict,strict=False)
print('load checkpoint from %s'%config.checkpoint)
print(msg)
elif args.pretrain == "ViLT":
self.model = ViLTransformerSS(config)
text_width = config.hidden_size
bert_config = BertConfig.from_json_file(config.bert_config)
elif args.pretrain == "METER":
self.model = METERTransformerSS(config)
text_width = config.hidden_size
bert_config = BertConfig.from_json_file(config.bert_config)
if args.pretrain == "METER" and (self.args.embeds_feats == "cls" or self.args.embeds_feats == "iavg_tcls"):
self.multi_proj = nn.Linear(config.hidden_size*2, embed_dim)
else:
self.multi_proj = nn.Linear(config.hidden_size, embed_dim)
self.query_proj = nn.Linear(text_width, embed_dim)
if 'vocab_size' in config:
bert_config.vocab_size = config.vocab_size
self.context = BertOnlyMLMHead(bert_config)
self.temp = nn.Parameter(torch.ones([]) * config['temp'])
self.queue_size = config['queue_size']
# Matching
self.matching_classifier = MatchingModel(args = args, config=bert_config, text_width=text_width, n_layers=1)
# create the queue
self.register_buffer("multi_queue", torch.randn(embed_dim, self.queue_size))
# self.register_buffer("query_queue", torch.randn(embed_dim, self.queue_size))
self.register_buffer("idx_queue", torch.full((1,self.queue_size),-100))
self.register_buffer("queue_num", torch.zeros(1, dtype=torch.long))
self.multi_queue = nn.functional.normalize(self.multi_queue, dim=0)
# self.query_queue = nn.functional.normalize(self.query_queue, dim=0)
def forward_feats(self, doc_image, doc_text, query):
result = {}
if self.args.pretrain == "ALBEF":
output = self.model(doc_image,doc_text,query)
elif self.args.pretrain == "ViLT":
output = self.model(batch={
"image":doc_image,
"text_ids":doc_text['input_ids'],
"text_masks":doc_text['attention_mask'],
"query_ids":query['input_ids'],
"query_masks":query['attention_mask'],
})
elif self.args.pretrain == "METER":
output = self.model(batch={
"image":doc_image,
"text_ids":doc_text['input_ids'],
"text_masks":doc_text['attention_mask'],
"query_ids":query['input_ids'],
"query_masks":query['attention_mask'],
})
result["query_embeds"] = output["query_embeds"]
result["query_atts"] = output["query_atts"]
result["multi_embeds"] = output["multi_embeds"]
result["multi_atts"] = output["multi_atts"]
# How to get feature
if self.args.embeds_feats == "avg":
avg_query_embeds = (result["query_embeds"] * result["query_atts"].unsqueeze(-1)).sum(dim=1) / result["query_atts"].sum(dim=1).unsqueeze(-1)
result["query_feat"] = F.normalize(self.query_proj(avg_query_embeds),dim=-1)
avg_multi_embeds = (result["multi_embeds"] * result["multi_atts"].unsqueeze(-1)).sum(dim=1) / result["multi_atts"].sum(dim=1).unsqueeze(-1)
result["multi_feat"] = F.normalize(self.multi_proj(avg_multi_embeds),dim=-1)
elif self.args.embeds_feats == "cls":
result["query_feat"] = output["query_cls"].float()
if self.args.pretrain == "METER": # METER has two cls token
multi_embeds = torch.cat([output["text_cls"], output["img_cls"]], dim=-1).float()
result["multi_feat"] = F.normalize(self.multi_proj(multi_embeds),dim=-1)
else:
result["multi_feat"] = output["multi_cls"].float()
elif self.args.embeds_feats == "iavg_tcls":
result["query_feat"] = output["query_cls"].float()
text_cls = output["text_cls"]
avg_img_embeds = (output["image_feats"] * output["image_masks"].unsqueeze(-1)).sum(dim=1) / output["image_masks"].sum(dim=1).unsqueeze(-1)
concat_embeds = torch.cat([text_cls, avg_img_embeds], dim=-1).float()
avg_multi_feat = F.normalize(self.multi_proj(concat_embeds),dim=-1)
result["multi_feat"] = avg_multi_feat.float()
return result
def forward(self, query, doc_text, doc_image, doc_id, context_labels=None, matching=None, matchingv2=None):
query['input_ids'] = query['input_ids'].view(query['input_ids'].shape[0],-1)
query['attention_mask'] = query['attention_mask'].view(query['input_ids'].shape[0],-1)
if "token_type_ids" in query:
query['token_type_ids'] = query['token_type_ids'].view(query['input_ids'].shape[0],-1)
doc_text['input_ids'] = doc_text['input_ids'].view(doc_text['input_ids'].shape[0],-1)
doc_text['attention_mask'] = doc_text['attention_mask'].view(doc_text['input_ids'].shape[0],-1)
if "token_type_ids" in doc_text:
doc_text['token_type_ids'] = doc_text['token_type_ids'].view(doc_text['input_ids'].shape[0],-1)
result = self.forward_feats(doc_image, doc_text, query)
multi_feat = result['multi_feat'] # B, 1, H
query_feat = result['query_feat'] # B, 1, H
multi_embeds = result['multi_embeds'] # B, L, H
multi_atts = result['multi_atts'] # B, L, H
query_embeds = result['query_embeds']
query_atts = result['query_atts']
# [TODO]
# why only query with doc similarity
# why not add doc with query similarity?
with torch.no_grad():
multi_feat_all = torch.cat([multi_feat.t(),self.multi_queue.clone().detach()],dim=1)
# query_feat_all = torch.cat([query_feat.t(),self.query_queue.clone().detach()],dim=1)
sim_q2m = query_feat @ multi_feat_all / self.temp
# sim_m2q = multi_feat @ query_feat_all / self.temp
idx = doc_id.view(-1,1) # batch_size, id [0,1,2,0]
idx_all = torch.cat([idx.t(), self.idx_queue.clone().detach()],dim=1) # 1, queue_size [[0,1,2,0,12,13,1,0,2]]
pos_idx = torch.eq(idx, idx_all).float() # 1,queue_size [[1,0,0,1,0,0,0,1,0],...]
sim_targets = pos_idx / pos_idx.sum(1,keepdim=True) # normalzie [[0.33,0,0,0.33,0,0,0,0.33,0],...]
loss_q2m = -torch.sum(F.log_softmax(sim_q2m, dim=1)*sim_targets,dim=1).mean()
# loss_m2q = -torch.sum(F.log_softmax(sim_m2q, dim=1)*sim_targets,dim=1).mean()
# loss_ita = (loss_q2m + loss_m2q)/2
loss_ita = loss_q2m
self._dequeue_and_enqueue(multi_feat, None, idx) # text_feat_m
# self._dequeue_and_enqueue(multi_feat, query_feat, idx) # text_feat_m
# ===== Matching Loss =====
# Matching Classification
# Pos (multi_embeds, query_embeds)
# Neg (multi_embeds_neg, query_embeds)
# Neg (multi_embeds_image+multi_embeds_neg_text ,query_embeds)
# Neg (multi_embeds_text+multi_embeds_neg_image ,query_embeds)
# Neg (multi_embeds, query_embeds_neg)
loss_itm = 0.0
if matching:
with torch.no_grad():
bs = doc_image.size(0)
mask = torch.eq(idx, idx.T)
weights_q2m = F.softmax(sim_q2m[:, :bs]+1e-4, dim=1)
weights_q2m.masked_fill_(mask, 1e-10)
# weights_m2q = F.softmax(sim_m2q[:, :bs]+1e-4, dim=1)
# weights_m2q.masked_fill_(mask, 1e-10)
# [New]
if matchingv2:
neg_doc_text_inps = []
neg_doc_text_atts = []
neg_doc_image = []
multi_embeds_neg = []
multi_atts_neg = []
for b in range(bs):
neg_idx = torch.multinomial(weights_q2m[b], 1).item()
multi_embeds_neg.append(multi_embeds[neg_idx])
multi_atts_neg.append(multi_atts[neg_idx])
#[New]
if matchingv2:
neg_doc_text_inps.append(doc_text['input_ids'][neg_idx])
neg_doc_text_atts.append(doc_text['attention_mask'][neg_idx])
neg_doc_image.append(doc_image[neg_idx])
multi_embeds_neg = torch.stack(multi_embeds_neg, dim=0)
multi_atts_neg = torch.stack(multi_atts_neg, dim=0)
# [New]
if matchingv2:
neg_doc_text_inps = torch.stack(neg_doc_text_inps, dim=0)
neg_doc_text_atts = torch.stack(neg_doc_text_atts, dim=0)
neg_doc_text = {'input_ids': neg_doc_text_inps, 'attention_mask': neg_doc_text_atts}
neg_doc_image = torch.stack(neg_doc_image, dim=0)
result_neg_image = self.forward_feats(neg_doc_image, doc_text ,query)
multi_embeds_image_neg = result_neg_image['multi_embeds']
multi_atts_image_neg = result_neg_image['multi_atts']
result_neg_text = self.forward_feats(doc_image, neg_doc_text ,query)
multi_embeds_text_neg = result_neg_text['multi_embeds']
multi_atts_text_neg = result_neg_text['multi_atts']
# Quadra
if matchingv2:
query_embeds_matching = torch.cat([query_embeds, query_embeds, query_embeds, query_embeds], dim=0)
query_attn_matching = torch.cat([query_atts, query_atts, query_atts, query_atts], dim=0)
multi_embeds_matching = torch.cat([multi_embeds, multi_embeds_neg, multi_embeds_image_neg, multi_embeds_text_neg], dim=0)
multi_attn_matching = torch.cat([multi_atts, multi_atts_neg, multi_atts_image_neg, multi_atts_text_neg], dim=0)
else:
# Binary
query_embeds_matching = torch.cat([query_embeds, query_embeds], dim=0)
query_attn_matching = torch.cat([query_atts, query_atts], dim=0)
multi_embeds_matching = torch.cat([multi_embeds, multi_embeds_neg], dim=0)
multi_attn_matching = torch.cat([multi_atts, multi_atts_neg], dim=0)
# Triple
# query_embeds_matching = torch.cat([query_embeds, query_embeds, query_embeds_neg], dim=0)
# query_attn_matching = torch.cat([query_atts, query_atts, query_atts_neg], dim=0)
# multi_embeds_matching = torch.cat([multi_embeds, multi_embeds_neg, multi_embeds], dim=0)
# multi_attn_matching = torch.cat([multi_atts, multi_atts_neg, multi_atts], dim=0)
itm_logits = self.matching_classifier(
query_embeds=query_embeds_matching,
query_attns=query_attn_matching,
multi_embeds=multi_embeds_matching,
multi_attns=multi_attn_matching
)
itm_labels = torch.cat([torch.ones(bs,dtype=torch.long),
torch.zeros(bs * 3 if matchingv2 else bs * 1,dtype=torch.long)],
dim=0).to(doc_image.device)
# [TODO] Binary_CE
loss_itm = F.cross_entropy(itm_logits, itm_labels)
loss_ctx_pred = 0.0
# ===== Context Prediction Loss =====
if context_labels is not None:
ctx_targets = context_labels / context_labels.sum(1,keepdim=True)
prediction_scores = self.context(multi_embeds)
mean_prediction_scores = torch.mean(prediction_scores,1)
loss_ctx_pred = -torch.sum(F.log_softmax(mean_prediction_scores, dim=1)*ctx_targets,dim=1).mean()
return loss_ita, loss_ctx_pred, loss_itm
def output_itm_logits(self, query, doc_text, doc_image):
query['input_ids'] = query['input_ids'].view(query['input_ids'].shape[0],-1)
query['attention_mask'] = query['attention_mask'].view(query['input_ids'].shape[0],-1)
if "token_type_ids" in query:
query['token_type_ids'] = query['token_type_ids'].view(query['input_ids'].shape[0],-1)
doc_text['input_ids'] = doc_text['input_ids'].view(doc_text['input_ids'].shape[0],-1)
doc_text['attention_mask'] = doc_text['attention_mask'].view(doc_text['input_ids'].shape[0],-1)
if "token_type_ids" in doc_text:
doc_text['token_type_ids'] = doc_text['token_type_ids'].view(doc_text['input_ids'].shape[0],-1)
result = self.forward_feats(doc_image, doc_text, query)
multi_feat = result['multi_feat'] # B, 1, H
query_feat = result['query_feat'] # B, 1, H
multi_embeds = result['multi_embeds'] # B, L, H
multi_atts = result['multi_atts'] # B, L, H
query_embeds = result['query_embeds']
query_atts = result['query_atts']
itm_logits = self.matching_classifier(
query_embeds=query_embeds,
query_attns=query_atts,
multi_embeds=multi_embeds,
multi_attns=multi_atts
)
return itm_logits
@torch.no_grad()
def output_query_feats(self,query):
if self.args.pretrain == "ALBEF":
query_output = self.model.text_encoder(query['input_ids'], attention_mask = query["attention_mask"], mode='text')
query_embeds = query_output.last_hidden_state
query_masks = query["attention_mask"]
query_cls = F.normalize(self.model.text_proj(query_embeds[:,0,:]),dim=-1)
elif self.args.pretrain == "ViLT":
query_embeds = self.model.text_embeddings(query['input_ids'])
query_masks = query['attention_mask']
for i, blk in enumerate(self.model.transformer.blocks):
query_embeds, _ = blk(query_embeds, mask=query_masks)
query_embeds = self.model.transformer.norm(query_embeds)
query_cls = F.normalize(self.query_proj(query_embeds[:,0,:]),dim=-1)
elif self.args.pretrain == "METER":
query_embeds = self.model.text_transformer.embeddings(input_ids=query['input_ids'])
query_masks = query['attention_mask']
device = query_embeds.device
input_shape = query_masks.size()
extend_query_masks = self.model.text_transformer.get_extended_attention_mask(query_masks, input_shape, device)
for layer in self.model.text_transformer.encoder.layer:
query_embeds = layer(query_embeds, extend_query_masks)[0]
query_embeds = self.model.cross_modal_text_transform(query_embeds)
query_cls = self.model.cross_modal_text_pooler(query_embeds)
if self.args.embeds_feats == "avg":
avg_query_embeds = (query_embeds * query_masks.unsqueeze(-1)).sum(dim=1) / query_masks.sum(dim=1).unsqueeze(-1)
query_feat = F.normalize(self.query_proj(avg_query_embeds),dim=-1)
elif self.args.embeds_feats == "cls":
query_feat = query_cls.float()
elif self.args.embeds_feats == "iavg_tcls":
query_feat = query_cls.float()
return query_embeds,query_feat
@torch.no_grad()
def output_doc_feats(self,doc_text,doc_image):
if self.args.pretrain == "ALBEF":
image_embeds = self.model.visual_encoder(doc_image)
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(doc_image.device)
doc_text_output = self.model.text_encoder(doc_text['input_ids'], attention_mask = doc_text["attention_mask"], mode='text')
doc_text_embeds = doc_text_output.last_hidden_state
output = self.model.text_encoder(encoder_embeds = doc_text_embeds,
attention_mask = doc_text["attention_mask"],
encoder_hidden_states = image_embeds,
encoder_attention_mask = image_atts,
return_dict = True,
mode = 'fusion',
)
multi_embeds = output.last_hidden_state
multi_atts = doc_text["attention_mask"]
multi_cls = F.normalize(self.model.text_proj(multi_embeds[:,0,:]),dim=-1)
elif self.args.pretrain == "ViLT":
output = self.model.output_multi(batch={
"image":doc_image,
"text_ids":doc_text['input_ids'],
"text_masks":doc_text['attention_mask'],
})
multi_embeds = output["multi_embeds"]
multi_atts = output["multi_atts"]
multi_cls = output["multi_cls"]
elif self.args.pretrain == "METER":
output = self.model.output_multi(batch={
"image":doc_image,
"text_ids":doc_text['input_ids'],
"text_masks":doc_text['attention_mask'],
})
multi_embeds = output["multi_embeds"]
multi_atts = output["multi_atts"]
text_cls = output["text_cls"]
img_cls = output["cls_feats_image"]
if self.args.embeds_feats == "avg":
avg_multi_embeds = (multi_embeds * multi_atts.unsqueeze(-1)).sum(dim=1) / multi_atts.sum(dim=1).unsqueeze(-1)
avg_multi_feat = F.normalize(self.multi_proj(avg_multi_embeds),dim=-1)
elif self.args.embeds_feats == "cls":
if self.args.pretrain == "METER": # METER has two cls token
multi_embeds = torch.cat([text_cls, img_cls], dim=-1).float()
avg_multi_feat = F.normalize(self.multi_proj(multi_embeds),dim=-1)
else:
avg_multi_feat = multi_cls.float()
elif self.args.embeds_feats == "iavg_tcls":
text_cls = output["text_cls"]
img_embeds = output["image_feats"]
avg_img_embeds = (output["image_feats"] * output["image_masks"].unsqueeze(-1)).sum(dim=1) / output["image_masks"].sum(dim=1).unsqueeze(-1)
concat_embeds = torch.cat([text_cls, avg_img_embeds], dim=-1).float()
avg_multi_feat = F.normalize(self.multi_proj(concat_embeds),dim=-1)
return multi_embeds,avg_multi_feat, multi_atts
@torch.no_grad()
def _dequeue_and_enqueue(self, multi_feat, query_feat, idx):
# gather keys before updating queue
idxs = concat_all_gather(idx)
if multi_feat is not None:
multi_feats = concat_all_gather(multi_feat)
batch_size = multi_feats.shape[0]
ptr = int(self.queue_num)
assert self.queue_size % batch_size == 0 # for simplicity
self.multi_queue[:, ptr:ptr + batch_size] = multi_feats.T
if query_feat is not None:
query_feats = concat_all_gather(query_feat)
batch_size = query_feats.shape[0]
ptr = int(self.queue_num)
assert self.queue_size % batch_size == 0 # for simplicity
self.query_queue[:, ptr:ptr + batch_size] = query_feats.T
# replace the keys at ptr (dequeue and enqueue)
self.idx_queue[:, ptr:ptr + batch_size] = idxs.T
ptr = (ptr + batch_size) % self.queue_size # move pointer
self.queue_num[0] = ptr
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
group = torch.distributed.group.WORLD
world_size = torch.distributed.get_world_size(group)
tensors_gather = [torch.ones_like(tensor)
for _ in range(world_size)]
torch.distributed.all_gather(tensors_gather, tensor, group, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
| 21,799 | 50.294118 | 151 | py |
Mr.Right | Mr.Right-main/models/METER/azure_distributed_run.py | import os
import copy
import pytorch_lightning as pl
import os
os.environ["NCCL_DEBUG"] = "INFO"
from meter.config import ex
from meter.modules import METERTransformerSS
from meter.datamodules.multitask_datamodule import MTDataModule
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (20480, rlimit[1]))
from pytorch_lightning.plugins.environments import ClusterEnvironment
from pytorch_lightning.plugins.training_type import DDPPlugin
import torch.distributed as dist
class MyCluster(ClusterEnvironment):
def creates_children(self) -> bool:
# return True if the cluster is managed (you don't launch processes yourself)
return True
def master_address(self):
return os.environ['MASTER_ADDR']
def master_port(self) -> int:
return int(os.environ["MASTER_PORT"])
def world_size(self):
return int(os.environ['OMPI_COMM_WORLD_SIZE'])
def global_rank(self) -> int:
return int(os.environ['OMPI_COMM_WORLD_RANK'])
def local_rank(self) -> int:
return int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
def node_rank(self) -> int:
return int(os.environ["OMPI_COMM_WORLD_NODE_RANK"])
def set_global_rank(self, rank: int) -> None:
pass
def set_world_size(self, size: int) -> None:
pass
class MyDDPPlugin(DDPPlugin):
def init_ddp_connection(self, global_rank = None, world_size = None) -> None:
master_uri = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])
dist.init_process_group(
backend=self.torch_distributed_backend,
init_method=master_uri,
world_size=int(os.environ['OMPI_COMM_WORLD_SIZE']),
rank=int(os.environ['OMPI_COMM_WORLD_RANK']),
)
@ex.automain
def main(_config):
os.environ["NCCL_DEBUG"] = "INFO"
world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
local_size = int(os.environ['OMPI_COMM_WORLD_LOCAL_SIZE'])
global_rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
local_rank = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
master_addr = os.environ['MASTER_ADDR']
master_port = os.environ['MASTER_PORT']
# set environment variables for 'env://'
os.environ['WORLD_SIZE'] = str(world_size)
os.environ['NODE_RANK'] = str(os.environ["OMPI_COMM_WORLD_NODE_RANK"])
_config = copy.deepcopy(_config)
pl.seed_everything(_config["seed"])
dm = MTDataModule(_config, dist=True)
model = METERTransformerSS(_config)
exp_name = f'{_config["exp_name"]}'
os.makedirs(_config["log_dir"], exist_ok=True)
checkpoint_callback = pl.callbacks.ModelCheckpoint(
save_top_k=1,
verbose=True,
monitor="val/the_metric",
mode="max",
save_last=True,
)
logger = pl.loggers.TensorBoardLogger(
_config["log_dir"],
name=f'{exp_name}_seed{_config["seed"]}_from_{_config["load_path"].split("/")[-1][:-5]}',
)
lr_callback = pl.callbacks.LearningRateMonitor(logging_interval="step")
callbacks = [checkpoint_callback, lr_callback]
num_gpus = (
_config["num_gpus"]
if isinstance(_config["num_gpus"], int)
else len(_config["num_gpus"])
)
grad_steps = max(_config["batch_size"] // (
_config["per_gpu_batchsize"] * num_gpus * _config["num_nodes"]
), 1)
max_steps = _config["max_steps"] if _config["max_steps"] is not None else None
trainer = pl.Trainer(
plugins=[MyCluster(), MyDDPPlugin()],
gpus=_config["num_gpus"],
num_nodes=_config["num_nodes"],
precision=_config["precision"],
accelerator="ddp",
benchmark=True,
deterministic=True,
max_epochs=_config["max_epoch"] if max_steps is None else 1000,
max_steps=max_steps,
callbacks=callbacks,
logger=logger,
prepare_data_per_node=False,
replace_sampler_ddp=False,
accumulate_grad_batches=grad_steps,
log_every_n_steps=10,
flush_logs_every_n_steps=10,
resume_from_checkpoint=_config["resume_from"],
weights_summary="top",
fast_dev_run=_config["fast_dev_run"],
val_check_interval=_config["val_check_interval"],
)
if not _config["test_only"]:
trainer.fit(model, datamodule=dm)
else:
trainer.test(model, datamodule=dm)
| 4,388 | 31.272059 | 97 | py |
Mr.Right | Mr.Right-main/models/METER/setup.py | from setuptools import setup, find_packages
setup(
name="meter",
packages=find_packages(
exclude=[".dfc", ".vscode", "dataset", "notebooks", "result", "scripts"]
),
version="0.1.0",
license="MIT",
description="METER: Multimodal End-to-end TransformER",
author="Microsoft Corporation",
author_email="zdou0830@gmail.com",
url="https://github.com/zdou0830/METER",
keywords=["vision and language pretraining"],
install_requires=["torch", "pytorch_lightning"],
)
| 511 | 29.117647 | 80 | py |
Mr.Right | Mr.Right-main/models/METER/run.py | import os
import copy
import pytorch_lightning as pl
import os
os.environ["NCCL_DEBUG"] = "INFO"
from meter.config import ex
from meter.modules import METERTransformerSS
from meter.datamodules.multitask_datamodule import MTDataModule
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (20480, rlimit[1]))
@ex.automain
def main(_config):
_config = copy.deepcopy(_config)
pl.seed_everything(_config["seed"])
dm = MTDataModule(_config, dist=True)
model = METERTransformerSS(_config)
exp_name = f'{_config["exp_name"]}'
os.makedirs(_config["log_dir"], exist_ok=True)
checkpoint_callback = pl.callbacks.ModelCheckpoint(
save_top_k=1,
verbose=True,
monitor="val/the_metric",
mode="max",
save_last=True,
)
logger = pl.loggers.TensorBoardLogger(
_config["log_dir"],
name=f'{exp_name}_seed{_config["seed"]}_from_{_config["load_path"].split("/")[-1][:-5]}',
)
lr_callback = pl.callbacks.LearningRateMonitor(logging_interval="step")
callbacks = [checkpoint_callback, lr_callback]
num_gpus = (
_config["num_gpus"]
if isinstance(_config["num_gpus"], int)
else len(_config["num_gpus"])
)
grad_steps = max(_config["batch_size"] // (
_config["per_gpu_batchsize"] * num_gpus * _config["num_nodes"]
), 1)
max_steps = _config["max_steps"] if _config["max_steps"] is not None else None
trainer = pl.Trainer(
gpus=_config["num_gpus"],
num_nodes=_config["num_nodes"],
precision=_config["precision"],
accelerator="ddp",
benchmark=True,
deterministic=True,
max_epochs=_config["max_epoch"] if max_steps is None else 1000,
max_steps=max_steps,
callbacks=callbacks,
logger=logger,
prepare_data_per_node=False,
replace_sampler_ddp=False,
accumulate_grad_batches=grad_steps,
log_every_n_steps=10,
flush_logs_every_n_steps=10,
resume_from_checkpoint=_config["resume_from"],
weights_summary="top",
fast_dev_run=_config["fast_dev_run"],
val_check_interval=_config["val_check_interval"],
)
if not _config["test_only"]:
trainer.fit(model, datamodule=dm)
else:
trainer.test(model, datamodule=dm)
| 2,373 | 29.050633 | 97 | py |
Mr.Right | Mr.Right-main/models/METER/meter/config.py | from sacred import Experiment
ex = Experiment("METER")
def _loss_names(d):
ret = {
"itm": 0,
"mlm": 0,
"mpp": 0,
"vqa": 0,
"vcr": 0,
"vcr_qar": 0,
"nlvr2": 0,
"irtr": 0,
"contras": 0,
"snli": 0,
}
ret.update(d)
return ret
@ex.config
def config():
exp_name = "meter"
seed = 0
datasets = ["coco", "vg", "sbu", "gcc"]
loss_names = _loss_names({"itm": 1, "mlm": 1})
batch_size = 4096 # this is a desired batch size; pl trainer will accumulate gradients when per step batch is smaller.
# Image setting
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
image_size = 224
patch_size = 32
draw_false_image = 1
image_only = False
resolution_before = 224
# Text Setting
vqav2_label_size = 3129
max_text_len = 40
tokenizer = "bert-base-uncased"
vocab_size = 30522
whole_word_masking = False # note that whole_word_masking does not work for RoBERTa
mlm_prob = 0.15
draw_false_text = 0
# Transformer Setting
num_top_layer = 6
input_image_embed_size = 768
input_text_embed_size = 768
vit = 'ViT-B/32'
hidden_size = 768
num_heads = 12
num_layers = 6
mlp_ratio = 4
drop_rate = 0.1
# Optimizer Setting
optim_type = "adamw"
learning_rate = 1e-5
weight_decay = 0.01
decay_power = 1
max_epoch = 100
max_steps = 100000
warmup_steps = 10000
end_lr = 0
lr_mult_head = 5 # multiply lr for downstream heads
lr_mult_cross_modal = 5 # multiply lr for the cross-modal module
# Downstream Setting
get_recall_metric = False
# PL Trainer Setting
resume_from = None
fast_dev_run = False
val_check_interval = 1.0
test_only = False
# below params varies with the environment
data_root = ""
log_dir = "result"
per_gpu_batchsize = 0 # you should define this manually with per_gpu_batch_size=#
num_gpus = 8
num_nodes = 1
load_path = ""
num_workers = 8
precision = 32
@ex.named_config
def task_mlm_itm_clip_bert():
exp_name = "mlm_itm"
datasets = ["coco", "vg", "sbu", "gcc"]
loss_names = _loss_names({"itm": 1, "mlm": 1})
batch_size = 4096
max_epoch = 10
max_steps = 100000
warmup_steps = 0.1
whole_word_masking = True
vocab_size = 30522
max_text_len = 50
image_size = 224
tokenizer = "bert-base-uncased"
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
learning_rate = 1e-5
val_check_interval = 1.0
lr_mult_head = 5
lr_mult_cross_modal = 5
num_top_layer = 6
hidden_size = 768
num_heads = 12
@ex.named_config
def task_finetune_nlvr2_clip_bert():
exp_name = "finetune_nlvr2"
datasets = ["nlvr2"]
loss_names = _loss_names({"nlvr2": 1})
batch_size = 256
max_epoch = 10
max_steps = None
warmup_steps = 0.1
draw_false_image = 0
learning_rate = 1e-5
lr_mult_head = 10
lr_mult_cross_modal = 5
tokenizer = "bert-base-uncased"
max_text_len = 50
input_text_embed_size = 768
vit = 'ViT-B/32'
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
input_image_embed_size = 768
image_size = 288
@ex.named_config
def task_finetune_vqa_clip_bert():
exp_name = "finetune_vqa"
datasets = ["vqa"]
loss_names = _loss_names({"vqa": 1})
batch_size = 512
max_epoch = 10
max_steps = None
warmup_steps = 0.1
draw_false_image = 0
learning_rate = 5e-6
val_check_interval = 0.1
lr_mult_head = 50
lr_mult_cross_modal = 5
tokenizer = "bert-base-uncased"
max_text_len = 50
input_text_embed_size = 768
vit = 'ViT-B/32'
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
input_image_embed_size = 768
image_size = 576
@ex.named_config
def task_finetune_irtr_coco_clip_bert():
exp_name = "finetune_irtr_coco"
datasets = ["coco"]
loss_names = _loss_names({"itm": 0.5, "irtr": 1})
batch_size = 512
max_epoch = 10
max_steps = None
warmup_steps = 0.1
get_recall_metric = True
draw_false_text = 15
learning_rate = 5e-6
lr_mult_head = 5
lr_mult_cross_modal = 5
tokenizer = "bert-base-uncased"
input_text_embed_size = 768
vit = 'ViT-B/32'
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
input_image_embed_size = 768
image_size = 384
@ex.named_config
def task_finetune_irtr_f30k_clip_bert():
exp_name = "finetune_irtr_f30k"
datasets = ["f30k"]
loss_names = _loss_names({"itm": 0.5, "irtr": 1})
batch_size = 512
max_epoch = 10
max_steps = None
warmup_steps = 0.1
get_recall_metric = True
draw_false_text = 15
learning_rate = 5e-6
lr_mult_head = 5
lr_mult_cross_modal = 5
tokenizer = "bert-base-uncased"
input_text_embed_size = 768
vit = 'ViT-B/32'
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
input_image_embed_size = 768
image_size = 384
@ex.named_config
def task_finetune_snli_clip_bert():
exp_name = "finetune_snli"
datasets = ["snli"]
loss_names = _loss_names({"snli": 1})
batch_size = 64
max_epoch = 5
max_steps = None
warmup_steps = 0.1
draw_false_image = 0
learning_rate = 2e-6
lr_mult_head = 10
lr_mult_cross_modal = 5
tokenizer = "bert-base-uncased"
max_text_len = 50
input_text_embed_size = 768
vit = 'ViT-B/32'
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
input_image_embed_size = 768
image_size = 384
# Named configs for "etc" which are orthogonal to "env" and "task", need to be added at the end
# vision encoder
@ex.named_config
def swin32_base224():
vit = "swin_base_patch4_window7_224_in22k"
patch_size = 32
image_size = 224
train_transform_keys = ["imagenet"]
val_transform_keys = ["imagenet"]
input_image_embed_size = 1024
resolution_before = 224
@ex.named_config
def swin32_base384():
vit = "swin_base_patch4_window12_384_in22k"
patch_size = 32
image_size = 384
train_transform_keys = ["imagenet"]
val_transform_keys = ["imagenet"]
input_image_embed_size = 1024
resolution_before = 384
@ex.named_config
def swin32_large384():
vit = "swin_large_patch4_window12_384_in22k"
patch_size = 32
image_size = 384
train_transform_keys = ["imagenet"]
val_transform_keys = ["imagenet"]
input_image_embed_size = 1536
resolution_before = 384
@ex.named_config
def clip32():
vit = 'ViT-B/32'
image_size = 224
patch_size = 32
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
input_image_embed_size = 768
@ex.named_config
def clip16():
vit = 'ViT-B/16'
image_size = 224
patch_size = 16
train_transform_keys = ["clip"]
val_transform_keys = ["clip"]
input_image_embed_size = 768
# text encoder
@ex.named_config
def text_roberta():
tokenizer = "roberta-base"
vocab_size = 50265
input_text_embed_size = 768
@ex.named_config
def text_roberta_large():
tokenizer = "roberta-large"
vocab_size = 50265
input_text_embed_size = 1024
# random augmentation
@ex.named_config
def imagenet_randaug():
train_transform_keys = ["imagenet_randaug"]
@ex.named_config
def clip_randaug():
train_transform_keys = ["clip_randaug"]
| 7,425 | 23.671096 | 123 | py |
Mr.Right | Mr.Right-main/models/METER/meter/__init__.py | 0 | 0 | 0 | py | |
Mr.Right | Mr.Right-main/models/METER/meter/modules/clip_model.py | from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor, x_mask:torch.Tensor):
if x_mask is not None:
x_mask = x_mask.to(dtype=torch.bool, device=x.device)
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask, key_padding_mask=x_mask)[0]
def forward(self, x: torch.Tensor, x_mask:torch.Tensor=None):
x = x + self.attention(self.ln_1(x), x_mask)
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers-1)])
def forward(self, x: torch.Tensor, x_mask: torch.Tensor=None):
for block in self.resblocks:
x = block(x, x_mask)
return x
class VisualTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int, resolution_after: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((resolution_after // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
def forward(self, x: torch.Tensor, x_mask):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
t=self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device)
x = torch.cat([t, x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x, x_mask)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x)
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int,
resolution_after=224,
):
super().__init__()
self.context_length = context_length
vision_heads = vision_width // 64
self.visual = VisualTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim,
resolution_after=resolution_after,
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
proj_std = (self.visual.transformer.width ** -0.5) * ((2 * self.visual.transformer.layers) ** -0.5)
attn_std = self.visual.transformer.width ** -0.5
fc_std = (2 * self.visual.transformer.width) ** -0.5
for block in self.visual.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def forward(self, image, image_mask=None):
return self.visual(image.type(self.dtype), image_mask)
_MODELS = {
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
}
import os
import hashlib
import urllib
from tqdm import tqdm
import warnings
def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def adapt_position_encoding(model, patch_size=32, after=384,
suffix='visual.positional_embedding'):
keys = [k for k in model if k.endswith(suffix)]
assert len(keys) == 1
key = keys[0]
origin_pos_embed = model[key]
origin_dim2 = False
if len(origin_pos_embed.shape) == 2:
origin_dim2 = True
origin_pos_embed = origin_pos_embed.unsqueeze(0)
grid_before = int(np.sqrt(origin_pos_embed.shape[1] - 1))
before = int(grid_before*patch_size)
assert (before % patch_size) == 0
grid_after = after // patch_size
assert (after % patch_size) == 0
embed_dim = origin_pos_embed.shape[-1]
pos_embed = origin_pos_embed[0, 1:, :].reshape((grid_before, grid_before, embed_dim))
new_size = (grid_after, grid_after)
pos_embed = torch.nn.functional.interpolate(pos_embed.permute((2, 0, 1)).unsqueeze(0), size=new_size, mode='bicubic')
pos_embed = pos_embed.squeeze(0).permute((1, 2, 0)).reshape((-1, embed_dim))
pos_embed = torch.cat((origin_pos_embed[0, 0:1, :], pos_embed), dim=0).unsqueeze(0)
assert pos_embed.shape == (1, grid_after * grid_after + 1, embed_dim)
if origin_dim2:
assert pos_embed.shape[0] == 1
pos_embed = pos_embed.squeeze(0)
model[key] = pos_embed
return model
def build_model(name, resolution_after=224):
if name in _MODELS:
model_path = _download(_MODELS[name])
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}"
)
try:
model = torch.jit.load(model_path, map_location="cpu")
state_dict = None
except RuntimeError:
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
state_dict = state_dict or model.state_dict()
vit = "visual.proj" in state_dict
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers,
resolution_after,
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
model_dict = model.state_dict()
pretrained_dict = state_dict
if resolution_after != image_resolution:
pretrained_dict = adapt_position_encoding(pretrained_dict, after=resolution_after, patch_size=vision_patch_size)
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
model.load_state_dict(model_dict)
return model
| 11,209 | 39.179211 | 142 | py |
Mr.Right | Mr.Right-main/models/METER/meter/modules/meter_utils.py | import torch
import random
from transformers.optimization import AdamW
from transformers import (
get_polynomial_decay_schedule_with_warmup,
get_cosine_schedule_with_warmup,
)
from .dist_utils import all_gather
from .objectives import compute_irtr_recall
from ..gadgets.my_metrics import Accuracy, VQAScore, Scalar
def set_metrics(pl_module):
for split in ["train", "val"]:
for k, v in pl_module.hparams.config["loss_names"].items():
if v <= 0:
continue
if k == "vqa":
setattr(pl_module, f"{split}_vqa_score", VQAScore())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
elif k == "nlvr2":
if split == "train":
setattr(pl_module, f"train_{k}_accuracy", Accuracy())
setattr(pl_module, f"train_{k}_loss", Scalar())
else:
setattr(pl_module, f"dev_{k}_accuracy", Accuracy())
setattr(pl_module, f"dev_{k}_loss", Scalar())
setattr(pl_module, f"test_{k}_accuracy", Accuracy())
setattr(pl_module, f"test_{k}_loss", Scalar())
elif k == "snli":
if split == "train":
setattr(pl_module, f"train_{k}_accuracy", Accuracy())
setattr(pl_module, f"train_{k}_loss", Scalar())
else:
setattr(pl_module, f"dev_{k}_accuracy", Accuracy())
setattr(pl_module, f"dev_{k}_loss", Scalar())
setattr(pl_module, f"test_{k}_accuracy", Accuracy())
setattr(pl_module, f"test_{k}_loss", Scalar())
elif k == "irtr":
setattr(pl_module, f"{split}_irtr_loss", Scalar())
elif k == "mppd" or k == "mpfr":
setattr(pl_module, f"{split}_{k}_loss", Scalar())
elif k == "itm":
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
else:
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
def epoch_wrapup(pl_module):
phase = "train" if pl_module.training else "val"
the_metric = 0
if pl_module.hparams.config["get_recall_metric"] and not pl_module.training:
(ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10) = compute_irtr_recall(pl_module)
print((ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10), pl_module.global_step)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r1", ir_r1, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r5", ir_r5, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r10", ir_r10, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r1", tr_r1, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r5", tr_r5, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r10", tr_r10, pl_module.global_step
)
the_metric += ir_r1.item() + tr_r1.item()
for loss_name, v in pl_module.hparams.config["loss_names"].items():
if v <= 0:
continue
value = 0
if loss_name == "vqa":
value = getattr(pl_module, f"{phase}_{loss_name}_score").compute()
pl_module.log(f"{loss_name}/{phase}/score_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_score").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "nlvr2" or loss_name == 'snli':
if phase == "train":
value = getattr(pl_module, f"train_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/train/accuracy_epoch", value)
getattr(pl_module, f"train_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/train/loss_epoch",
getattr(pl_module, f"train_{loss_name}_loss").compute(),
)
getattr(pl_module, f"train_{loss_name}_loss").reset()
else:
value = getattr(pl_module, f"test_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/test/accuracy_epoch", value)
getattr(pl_module, f"test_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/test/loss_epoch",
getattr(pl_module, f"test_{loss_name}_loss").compute(),
)
getattr(pl_module, f"test_{loss_name}_loss").reset()
value = getattr(pl_module, f"dev_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/dev/accuracy_epoch", value)
getattr(pl_module, f"dev_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/dev/loss_epoch",
getattr(pl_module, f"dev_{loss_name}_loss").compute(),
)
getattr(pl_module, f"dev_{loss_name}_loss").reset()
elif loss_name == "irtr":
pl_module.log(
f"{loss_name}/{phase}/irtr_loss_epoch",
getattr(pl_module, f"{phase}_irtr_loss").compute(),
)
getattr(pl_module, f"{phase}_irtr_loss").reset()
elif loss_name == "mppd" or loss_name == "mpfr":
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "itm":
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
else:
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
the_metric += value
pl_module.log(f"{phase}/the_metric", the_metric)
def check_non_acc_grad(pl_module):
if pl_module.token_type_embeddings.weight.grad is None:
return True
else:
grad = pl_module.token_type_embeddings.weight.grad
return (grad.sum() == 0).item()
def set_task(pl_module):
pl_module.current_tasks = [
k for k, v in pl_module.hparams.config["loss_names"].items() if v > 0
]
return
def set_schedule(pl_module):
lr = pl_module.hparams.config["learning_rate"]
wd = pl_module.hparams.config["weight_decay"]
no_decay = [
"bias",
"LayerNorm.bias",
"LayerNorm.weight",
"norm.bias",
"norm.weight",
"norm1.bias",
"norm1.weight",
"norm2.bias",
"norm2.weight",
]
head_names = ["vqa_classifier", "nlvr2_classifier", "mlm_score", "itm_score", "snli_classifier"]
cross_modal_names = ['cross_modal']
lr_mult_head = pl_module.hparams.config["lr_mult_head"]
lr_mult_cross_modal = pl_module.hparams.config["lr_mult_cross_modal"]
end_lr = pl_module.hparams.config["end_lr"]
decay_power = pl_module.hparams.config["decay_power"]
optim_type = pl_module.hparams.config["optim_type"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in pl_module.named_parameters()
if not any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
and not any(ht in n for ht in cross_modal_names)
],
"weight_decay": wd,
"lr": lr,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
and not any(ht in n for ht in cross_modal_names)
],
"weight_decay": 0.0,
"lr": lr,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if not any(nd in n for nd in no_decay)
and any(bb in n for bb in head_names)
and not any(ht in n for ht in cross_modal_names)
],
"weight_decay": wd,
"lr": lr * lr_mult_head,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if any(nd in n for nd in no_decay) and any(bb in n for bb in head_names)
and not any(ht in n for ht in cross_modal_names)
],
"weight_decay": 0.0,
"lr": lr * lr_mult_head,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if not any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
and any(ht in n for ht in cross_modal_names)
],
"weight_decay": wd,
"lr": lr * lr_mult_cross_modal,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
and any(ht in n for ht in cross_modal_names)
],
"weight_decay": 0.0,
"lr": lr * lr_mult_cross_modal,
},
]
if optim_type == "adamw":
optimizer = AdamW(
optimizer_grouped_parameters, lr=lr, eps=1e-8, betas=(0.9, 0.98)
)
elif optim_type == "adam":
optimizer = torch.optim.Adam(optimizer_grouped_parameters, lr=lr)
elif optim_type == "sgd":
optimizer = torch.optim.SGD(optimizer_grouped_parameters, lr=lr, momentum=0.9)
if pl_module.trainer.max_steps is None:
max_steps = (
len(pl_module.trainer.datamodule.train_dataloader())
* pl_module.trainer.max_epochs
// pl_module.trainer.accumulate_grad_batches
)
else:
max_steps = pl_module.trainer.max_steps
warmup_steps = pl_module.hparams.config["warmup_steps"]
if isinstance(pl_module.hparams.config["warmup_steps"], float):
warmup_steps = int(max_steps * warmup_steps)
if decay_power == "cosine":
scheduler = get_cosine_schedule_with_warmup(
optimizer, num_warmup_steps=warmup_steps, num_training_steps=max_steps,
)
else:
scheduler = get_polynomial_decay_schedule_with_warmup(
optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=max_steps,
lr_end=end_lr,
power=decay_power,
)
sched = {"scheduler": scheduler, "interval": "step"}
return (
[optimizer],
[sched],
)
| 11,926 | 38.363036 | 100 | py |
Mr.Right | Mr.Right-main/models/METER/meter/modules/swin_transformer.py | """ Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows`
- https://arxiv.org/pdf/2103.14030
Code/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below
"""
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import logging
import math
from copy import deepcopy
from typing import Optional
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import build_model_with_cfg, overlay_external_default_cfg
from .swin_helpers import swin_build_model_with_cfg
from timm.models.layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_
from timm.models.registry import register_model
from timm.models.vision_transformer import checkpoint_filter_fn, _init_vit_weights
_logger = logging.getLogger(__name__)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
# patch models (my experiments)
'swin_base_patch4_window12_384': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'swin_base_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth',
),
'swin_large_patch4_window12_384': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'swin_large_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth',
),
'swin_small_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth',
),
'swin_tiny_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth',
),
'swin_base_patch4_window12_384_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth',
input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841),
'swin_base_patch4_window7_224_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth',
num_classes=21841),
'swin_large_patch4_window12_384_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth',
input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841),
'swin_large_patch4_window7_224_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth',
num_classes=21841),
}
def window_partition(x, window_size: int):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size: int, H: int, W: int):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask: Optional[torch.Tensor] = None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(
dim=dim, input_resolution=input_resolution, num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2, mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if not torch.jit.is_scripting() and self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
class SwinTransformer(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24),
window_size=7, mlp_ratio=4., qkv_bias=True,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, weight_init='', **kwargs):
super().__init__()
window_size=int(img_size/32)
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
self.patch_grid = self.patch_embed.grid_size
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
else:
self.absolute_pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
layers = []
for i_layer in range(self.num_layers):
layers += [BasicLayer(
dim=int(embed_dim * 2 ** i_layer),
input_resolution=(self.patch_grid[0] // (2 ** i_layer), self.patch_grid[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
]
self.layers = nn.Sequential(*layers)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
assert weight_init in ('jax', 'jax_nlhb', 'nlhb', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in weight_init else 0.
if weight_init.startswith('jax'):
for n, m in self.named_modules():
_init_vit_weights(m, n, head_bias=head_bias, jax_impl=True)
else:
self.apply(_init_vit_weights)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
if self.absolute_pos_embed is not None:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
x = self.layers(x)
x = self.norm(x) # B L C
return x
def forward(self, x):
x = self.forward_features(x)
return x
def _create_swin_transformer(variant, pretrained=False, default_cfg=None, **kwargs):
if default_cfg is None:
default_cfg = deepcopy(default_cfgs[variant])
overlay_external_default_cfg(default_cfg, kwargs)
default_num_classes = default_cfg['num_classes']
default_img_size = default_cfg['input_size'][-2:]
num_classes = kwargs.pop('num_classes', default_num_classes)
img_size = kwargs['config']['image_size']
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = swin_build_model_with_cfg(
SwinTransformer, variant, pretrained,
default_cfg=default_cfg,
img_size=img_size,
num_classes=num_classes,
pretrained_filter_fn=checkpoint_filter_fn,
pretrained_strict=False,
**kwargs)
return model
@register_model
def swin_base_patch4_window12_384(pretrained=False, **kwargs):
""" Swin-B @ 384x384, pretrained ImageNet-22k, fine tune 1k
"""
model_kwargs = dict(
patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs)
return _create_swin_transformer('swin_base_patch4_window12_384', pretrained=pretrained, **model_kwargs)
@register_model
def swin_base_patch4_window7_224(pretrained=False, **kwargs):
""" Swin-B @ 224x224, pretrained ImageNet-22k, fine tune 1k
"""
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs)
return _create_swin_transformer('swin_base_patch4_window7_224', pretrained=pretrained, **model_kwargs)
@register_model
def swin_large_patch4_window12_384(pretrained=False, **kwargs):
""" Swin-L @ 384x384, pretrained ImageNet-22k, fine tune 1k
"""
model_kwargs = dict(
patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs)
return _create_swin_transformer('swin_large_patch4_window12_384', pretrained=pretrained, **model_kwargs)
@register_model
def swin_large_patch4_window7_224(pretrained=False, **kwargs):
""" Swin-L @ 224x224, pretrained ImageNet-22k, fine tune 1k
"""
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs)
return _create_swin_transformer('swin_large_patch4_window7_224', pretrained=pretrained, **model_kwargs)
@register_model
def swin_small_patch4_window7_224(pretrained=False, **kwargs):
""" Swin-S @ 224x224, trained ImageNet-1k
"""
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), **kwargs)
return _create_swin_transformer('swin_small_patch4_window7_224', pretrained=pretrained, **model_kwargs)
@register_model
def swin_tiny_patch4_window7_224(pretrained=False, **kwargs):
""" Swin-T @ 224x224, trained ImageNet-1k
"""
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), **kwargs)
return _create_swin_transformer('swin_tiny_patch4_window7_224', pretrained=pretrained, **model_kwargs)
@register_model
def swin_base_patch4_window12_384_in22k(pretrained=False, **kwargs):
""" Swin-B @ 384x384, trained ImageNet-22k
"""
model_kwargs = dict(
patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs)
return _create_swin_transformer('swin_base_patch4_window12_384_in22k', pretrained=pretrained, **model_kwargs)
@register_model
def swin_base_patch4_window7_224_in22k(pretrained=False, **kwargs):
""" Swin-B @ 224x224, trained ImageNet-22k
"""
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs)
return _create_swin_transformer('swin_base_patch4_window7_224_in22k', pretrained=pretrained, **model_kwargs)
@register_model
def swin_large_patch4_window12_384_in22k(pretrained=False, **kwargs):
""" Swin-L @ 384x384, trained ImageNet-22k
"""
model_kwargs = dict(
patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs)
return _create_swin_transformer('swin_large_patch4_window12_384_in22k', pretrained=pretrained, **model_kwargs)
@register_model
def swin_large_patch4_window7_224_in22k(pretrained=False, **kwargs):
""" Swin-L @ 224x224, trained ImageNet-22k
"""
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs)
return _create_swin_transformer('swin_large_patch4_window7_224_in22k', pretrained=pretrained, **model_kwargs)
| 27,086 | 41.191589 | 125 | py |
Mr.Right | Mr.Right-main/models/METER/meter/modules/bert_model.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from transformers.models.bert.configuration_bert import BertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
"bert-base-chinese",
"bert-base-german-cased",
"bert-large-uncased-whole-word-masking",
"bert-large-cased-whole-word-masking",
"bert-large-uncased-whole-word-masking-finetuned-squad",
"bert-large-cased-whole-word-masking-finetuned-squad",
"bert-base-cased-finetuned-mrpc",
"bert-base-german-dbmdz-cased",
"bert-base-german-dbmdz-uncased",
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
"TurkuNLP/bert-base-finnish-cased-v1",
"TurkuNLP/bert-base-finnish-uncased-v1",
"wietsedv/bert-base-dutch-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
self.save_attention = False
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# if True:
if self.save_attention:
self.save_attention_map(attention_probs)
attention_probs.register_hook(self.save_attn_gradients)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertCrossLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
self.crossattention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
encoder_hidden_states,
attention_mask=None,
encoder_attention_mask=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = None #past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask=None,
output_attentions=output_attentions,
past_key_value=None,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
None,
encoder_hidden_states,
encoder_attention_mask,
None,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@dataclass
class BertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.BertForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
BERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
sentence prediction (classification)` head.
""",
BERT_START_DOCSTRING,
)
class BertForPreTraining(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForPreTraining.from_pretrained('bert-base-uncased')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return BertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""Bert Model with a `language modeling` head on top for CLM fine-tuning. """, BERT_START_DOCSTRING
)
class BertLMHeadModel(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`")
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> config.is_decoder = True
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top. """,
BERT_START_DOCSTRING,
)
class BertForNextSentencePrediction(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see ``input_ids`` docstring). Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForNextSentencePrediction
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors='pt')
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_scores,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
BERT_START_DOCSTRING,
)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BERT_START_DOCSTRING,
)
class BertForMultipleChoice(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BERT_START_DOCSTRING,
)
class BertForTokenClassification(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BERT_START_DOCSTRING,
)
class BertForQuestionAnswering(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| 76,774 | 41.915036 | 213 | py |
Mr.Right | Mr.Right-main/models/METER/meter/modules/meter_module.py | import torch
import torch.nn as nn
import pytorch_lightning as pl
import numpy as np
import pdb
from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings, BertModel, BertEncoder, BertLayer
from .bert_model import BertCrossLayer, BertAttention
from . import swin_transformer as swin
from . import heads, objectives
# meter_utils
from .clip_model import build_model, adapt_position_encoding
from .swin_helpers import swin_adapt_position_encoding
from transformers import RobertaConfig, RobertaModel
class METERTransformerSS(pl.LightningModule):
def __init__(self, config):
super().__init__()
self.save_hyperparameters()
self.is_clip= (not 'swin' in config['vit'])
if 'roberta' in config['tokenizer']:
bert_config = RobertaConfig(
vocab_size=config["vocab_size"],
hidden_size=config["hidden_size"],
num_hidden_layers=config["num_layers"],
num_attention_heads=config["num_heads"],
intermediate_size=config["hidden_size"] * config["mlp_ratio"],
max_position_embeddings=config["max_text_len"],
hidden_dropout_prob=config["drop_rate"],
attention_probs_dropout_prob=config["drop_rate"],
)
else:
bert_config = BertConfig(
vocab_size=config["vocab_size"],
hidden_size=config["hidden_size"],
num_hidden_layers=config["num_layers"],
num_attention_heads=config["num_heads"],
intermediate_size=config["hidden_size"] * config["mlp_ratio"],
max_position_embeddings=config["max_text_len"],
hidden_dropout_prob=config["drop_rate"],
attention_probs_dropout_prob=config["drop_rate"],
)
resolution_after=config['image_size']
self.cross_modal_text_transform = nn.Linear(config['input_text_embed_size'], config['hidden_size'])
self.cross_modal_text_transform.apply(objectives.init_weights)
self.cross_modal_image_transform = nn.Linear(config['input_image_embed_size'], config['hidden_size'])
self.cross_modal_image_transform.apply(objectives.init_weights)
self.token_type_embeddings = nn.Embedding(2, config["hidden_size"])
self.token_type_embeddings.apply(objectives.init_weights)
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
if self.is_clip:
build_model(config['vit'], resolution_after=resolution_after)
else:
getattr(swin, self.hparams.config["vit"])(
pretrained=True, config=self.hparams.config,
)
if 'roberta' in config['tokenizer']:
RobertaModel.from_pretrained(config['tokenizer'])
else:
BertModel.from_pretrained(config['tokenizer'])
torch.distributed.barrier()
if self.is_clip:
self.vit_model = build_model(config['vit'], resolution_after=resolution_after)
else:
self.vit_model = getattr(swin, self.hparams.config["vit"])(
pretrained=True, config=self.hparams.config,
)
self.avgpool = nn.AdaptiveAvgPool1d(1)
if 'roberta' in config['tokenizer']:
self.text_transformer = RobertaModel.from_pretrained(config['tokenizer'])
else:
self.text_transformer = BertModel.from_pretrained(config['tokenizer'])
self.cross_modal_image_layers = nn.ModuleList([BertCrossLayer(bert_config) for _ in range(config['num_top_layer'])])
self.cross_modal_image_layers.apply(objectives.init_weights)
self.cross_modal_text_layers = nn.ModuleList([BertCrossLayer(bert_config) for _ in range(config['num_top_layer'])])
self.cross_modal_text_layers.apply(objectives.init_weights)
self.cross_modal_image_pooler = heads.Pooler(config["hidden_size"])
self.cross_modal_image_pooler.apply(objectives.init_weights)
self.cross_modal_text_pooler = heads.Pooler(config["hidden_size"])
self.cross_modal_text_pooler.apply(objectives.init_weights)
if config["loss_names"]["mlm"] > 0:
self.mlm_score = heads.MLMHead(bert_config)
self.mlm_score.apply(objectives.init_weights)
if config["loss_names"]["itm"] > 0:
self.itm_score = heads.ITMHead(config["hidden_size"]*2)
self.itm_score.apply(objectives.init_weights)
hs = self.hparams.config["hidden_size"]
if self.hparams.config["loss_names"]["vqa"] > 0:
vs = self.hparams.config["vqav2_label_size"]
self.vqa_classifier = nn.Sequential(
nn.Linear(hs * 2, hs * 2),
nn.LayerNorm(hs * 2),
nn.GELU(),
nn.Linear(hs * 2, vs),
)
self.vqa_classifier.apply(objectives.init_weights)
# ===================== Downstream ===================== #
if self.hparams.config["loss_names"]["nlvr2"] > 0:
self.nlvr2_classifier = nn.Sequential(
nn.Linear(hs * 4, hs * 2),
nn.LayerNorm(hs * 2),
nn.GELU(),
nn.Linear(hs * 2, 2),
)
self.nlvr2_classifier.apply(objectives.init_weights)
emb_data = self.token_type_embeddings.weight.data
self.token_type_embeddings = nn.Embedding(3, hs)
self.token_type_embeddings.apply(objectives.init_weights)
self.token_type_embeddings.weight.data[0, :] = emb_data[0, :]
self.token_type_embeddings.weight.data[1, :] = emb_data[1, :]
self.token_type_embeddings.weight.data[2, :] = emb_data[1, :]
if self.hparams.config["loss_names"]["snli"] > 0:
self.snli_classifier = nn.Sequential(
nn.Linear(hs * 2, hs * 2),
nn.LayerNorm(hs * 2),
nn.GELU(),
nn.Linear(hs * 2, 3),
)
self.snli_classifier.apply(objectives.init_weights)
if self.hparams.config["loss_names"]["irtr"] > 0:
self.rank_output = nn.Linear(hs, 1)
self.rank_output.weight.data = self.itm_score.fc.weight.data[1:, :]
self.rank_output.bias.data = self.itm_score.fc.bias.data[1:]
self.margin = 0.2
for p in self.itm_score.parameters():
p.requires_grad = False
# meter_utils.set_metrics(self)
self.current_tasks = list()
# ===================== load downstream (test_only) ======================
if self.hparams.config["checkpoint"] != "":
ckpt = torch.load(self.hparams.config["checkpoint"], map_location="cpu")
state_dict = ckpt["state_dict"]
if self.is_clip:
state_dict = adapt_position_encoding(state_dict, after=resolution_after, patch_size=self.hparams.config['patch_size'])
else:
state_dict = swin_adapt_position_encoding(state_dict, after=resolution_after, before=config['resolution_before'])
msg = self.load_state_dict(state_dict, strict=False)
print(msg)
def infer(
self,
batch,
mask_text=False,
mask_image=False,
image_token_type_idx=1,
img=None,
):
do_mlm = "_mlm" if mask_text else ""
# query text
query_ids = batch[f"query_ids"]
query_masks = batch[f"query_masks"]
query_embeds = self.text_transformer.embeddings(input_ids=query_ids)
device = query_embeds.device
input_shape = query_masks.size()
extend_query_masks = self.text_transformer.get_extended_attention_mask(query_masks, input_shape, device)
for layer in self.text_transformer.encoder.layer:
query_embeds = layer(query_embeds, extend_query_masks)[0]
query_embeds = self.cross_modal_text_transform(query_embeds)
query_cls = self.cross_modal_text_pooler(query_embeds)
# doc text
text_ids = batch[f"text_ids{do_mlm}"]
# text_labels = batch[f"text_labels{do_mlm}"]
text_masks = batch[f"text_masks"]
text_embeds = self.text_transformer.embeddings(input_ids=text_ids)
device = text_embeds.device
input_shape = text_masks.size()
extend_text_masks = self.text_transformer.get_extended_attention_mask(text_masks, input_shape, device)
for layer in self.text_transformer.encoder.layer:
text_embeds = layer(text_embeds, extend_text_masks)[0]
text_embeds = self.cross_modal_text_transform(text_embeds)
img = batch["image"]
image_embeds = self.vit_model(img)
image_embeds = self.cross_modal_image_transform(image_embeds)
image_masks = torch.ones((image_embeds.size(0), image_embeds.size(1)), dtype=torch.long, device=device)
extend_image_masks = self.text_transformer.get_extended_attention_mask(image_masks, image_masks.size(), device)
text_embeds, image_embeds = (
text_embeds + self.token_type_embeddings(torch.zeros_like(text_masks)),
image_embeds
+ self.token_type_embeddings(
torch.full_like(image_masks, image_token_type_idx)
),
)
x, y = text_embeds, image_embeds
for text_layer, image_layer in zip(self.cross_modal_text_layers, self.cross_modal_image_layers):
x1 = text_layer(x, y, extend_text_masks, extend_image_masks)
y1 = image_layer(y, x, extend_image_masks, extend_text_masks)
x, y = x1[0], y1[0]
text_feats, image_feats = x, y
cls_feats_text = self.cross_modal_text_pooler(x)
if self.is_clip:
cls_feats_image = self.cross_modal_image_pooler(y)
else:
avg_image_feats = self.avgpool(image_feats.transpose(1, 2)).view(image_feats.size(0), 1, -1)
cls_feats_image = self.cross_modal_image_pooler(avg_image_feats)
cls_feats = torch.cat([cls_feats_text, cls_feats_image], dim=-1)
ret = {
"query_embeds": query_embeds,
"query_atts": query_masks,
"query_cls":query_cls,
"text_feats": text_feats,
"text_masks": text_masks,
"text_cls":cls_feats_text,
"multi_embeds": torch.cat([image_feats,text_feats],dim=1),
"multi_atts":torch.cat([image_masks,text_masks],dim=-1),
"cls_feats_text": cls_feats_text,
"image_feats": image_feats,
"image_masks": image_masks,
"cls_feats_image": cls_feats_image,
"img_cls":cls_feats_image,
"cls_feats": cls_feats,
# "text_labels": text_labels,
# "text_ids": text_ids,
}
return ret
@torch.no_grad()
def output_multi( self,
batch,
mask_text=False,
mask_image=False,
image_token_type_idx=1,
img=None,):
img = batch["image"]
do_mlm = "_mlm" if mask_text else ""
# doc text
text_ids = batch[f"text_ids{do_mlm}"]
text_masks = batch[f"text_masks"]
text_embeds = self.text_transformer.embeddings(input_ids=text_ids)
device = text_embeds.device
input_shape = text_masks.size()
extend_text_masks = self.text_transformer.get_extended_attention_mask(text_masks, input_shape, device)
for layer in self.text_transformer.encoder.layer:
text_embeds = layer(text_embeds, extend_text_masks)[0]
text_embeds = self.cross_modal_text_transform(text_embeds)
img = batch["image"]
image_embeds = self.vit_model(img)
image_embeds = self.cross_modal_image_transform(image_embeds)
image_masks = torch.ones((image_embeds.size(0), image_embeds.size(1)), dtype=torch.long, device=device)
extend_image_masks = self.text_transformer.get_extended_attention_mask(image_masks, image_masks.size(), device)
text_embeds, image_embeds = (
text_embeds + self.token_type_embeddings(torch.zeros_like(text_masks)),
image_embeds
+ self.token_type_embeddings(
torch.full_like(image_masks, image_token_type_idx)
),
)
x, y = text_embeds, image_embeds
for text_layer, image_layer in zip(self.cross_modal_text_layers, self.cross_modal_image_layers):
x1 = text_layer(x, y, extend_text_masks, extend_image_masks)
y1 = image_layer(y, x, extend_image_masks, extend_text_masks)
x, y = x1[0], y1[0]
text_feats, image_feats = x, y
cls_feats_text = self.cross_modal_text_pooler(x)
if self.is_clip:
cls_feats_image = self.cross_modal_image_pooler(y)
else:
avg_image_feats = self.avgpool(image_feats.transpose(1, 2)).view(image_feats.size(0), 1, -1)
cls_feats_image = self.cross_modal_image_pooler(avg_image_feats)
cls_feats = torch.cat([cls_feats_text, cls_feats_image], dim=-1)
ret = {
"text_feats": text_feats,
"text_masks": text_masks,
"text_cls":cls_feats_text,
"cls_feats_text": cls_feats_text,
"image_feats": image_feats,
"image_masks": image_masks,
"cls_feats_image": cls_feats_image,
"cls_feats": cls_feats,
"img_cls":cls_feats_image,
"multi_embeds": torch.cat([image_feats,text_feats],dim=1),
"multi_atts":torch.cat([image_masks,text_masks],dim=-1),
}
return ret
def forward(self, batch):
ret = dict()
if len(self.current_tasks) == 0:
ret.update(self.infer(batch))
return ret
# Masked Language Modeling
if "mlm" in self.current_tasks:
ret.update(objectives.compute_mlm(self, batch))
# Image Text Matching
if "itm" in self.current_tasks:
ret.update(objectives.compute_itm(self, batch))
# Visual Question Answering
if "vqa" in self.current_tasks:
ret.update(objectives.compute_vqa(self, batch))
# Natural Language for Visual Reasoning 2
if "nlvr2" in self.current_tasks:
ret.update(objectives.compute_nlvr2(self, batch))
# SNLI Visual Entailment
if "snli" in self.current_tasks:
ret.update(objectives.compute_snli(self, batch))
# Image Retrieval and Text Retrieval
if "irtr" in self.current_tasks:
ret.update(objectives.compute_irtr(self, batch))
return ret
def training_step(self, batch, batch_idx):
# meter_utils.set_task(self)
output = self(batch)
total_loss = sum([v for k, v in output.items() if "loss" in k])
return total_loss
def training_epoch_end(self, outs):
# meter_utils.epoch_wrapup(self)
pass
def validation_step(self, batch, batch_idx):
# meter_utils.set_task(self)
pass
output = self(batch)
def validation_epoch_end(self, outs):
pass
# meter_utils.epoch_wrapup(self)
def test_step(self, batch, batch_idx):
pass
meter_utils.set_task(self)
output = self(batch)
ret = dict()
if self.hparams.config["loss_names"]["vqa"] > 0:
ret.update(objectives.vqa_test_step(self, batch, output))
return ret
def test_epoch_end(self, outs):
model_name = self.hparams.config["load_path"].split("/")[-1][:-5]
if self.hparams.config["loss_names"]["vqa"] > 0:
objectives.vqa_test_wrapup(outs, model_name)
# meter_utils.epoch_wrapup(self)
def configure_optimizers(self):
pass
# return meter_utils.set_schedule(self)
| 15,962 | 40.141753 | 134 | py |
Mr.Right | Mr.Right-main/models/METER/meter/modules/dist_utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import functools
import logging
import numpy as np
import pickle
import torch
import torch.distributed as dist
import torch
_LOCAL_PROCESS_GROUP = None
"""
A torch process group which only includes processes that on the same machine as the current process.
This variable is set when processes are spawned by `launch()` in "engine/launch.py".
"""
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_local_rank() -> int:
"""
Returns:
The rank of the current process within the local (per-machine) process group.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
assert _LOCAL_PROCESS_GROUP is not None
return dist.get_rank(group=_LOCAL_PROCESS_GROUP)
def get_local_size() -> int:
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
def is_main_process() -> bool:
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
@functools.lru_cache()
def _get_global_gloo_group():
"""
Return a process group based on gloo backend, containing all the ranks
The result is cached.
"""
if dist.get_backend() == "nccl":
return dist.new_group(backend="gloo")
else:
return dist.group.WORLD
def _serialize_to_tensor(data, group):
backend = dist.get_backend(group)
assert backend in ["gloo", "nccl"]
device = torch.device("cpu" if backend == "gloo" else "cuda")
buffer = pickle.dumps(data)
if len(buffer) > 1024 ** 3:
logger = logging.getLogger(__name__)
logger.warning(
"Rank {} trying to all-gather {:.2f} GB of data on device {}".format(
get_rank(), len(buffer) / (1024 ** 3), device
)
)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor
def _pad_to_largest_tensor(tensor, group):
"""
Returns:
list[int]: size of the tensor, on each rank
Tensor: padded tensor that has the max size
"""
world_size = dist.get_world_size(group=group)
assert (
world_size >= 1
), "comm.gather/all_gather must be called from ranks within the given group!"
local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device)
size_list = [
torch.zeros([1], dtype=torch.int64, device=tensor.device)
for _ in range(world_size)
]
dist.all_gather(size_list, local_size, group=group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
if local_size != max_size:
padding = torch.zeros(
(max_size - local_size,), dtype=torch.uint8, device=tensor.device
)
tensor = torch.cat((tensor, padding), dim=0)
return size_list, tensor
def all_gather(data, group=None):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: list of data gathered from each rank
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group) == 1:
return [data]
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
max_size = max(size_list)
# receiving Tensor from all ranks
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device)
for _ in size_list
]
dist.all_gather(tensor_list, tensor, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def gather(data, dst=0, group=None):
"""
Run gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
dst (int): destination rank
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: on dst, a list of data gathered from each rank. Otherwise,
an empty list.
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group=group) == 1:
return [data]
rank = dist.get_rank(group=group)
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
# receiving Tensor from all ranks
if rank == dst:
max_size = max(size_list)
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device)
for _ in size_list
]
dist.gather(tensor, tensor_list, dst=dst, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
else:
dist.gather(tensor, [], dst=dst, group=group)
return []
def shared_random_seed():
"""
Returns:
int: a random number that is the same across all workers.
If workers need a shared RNG, they can use this shared seed to
create one.
All workers must call this function, otherwise it will deadlock.
"""
ints = np.random.randint(2 ** 31)
all_ints = all_gather(ints)
return all_ints[0]
def reduce_dict(input_dict, average=True):
"""
Reduce the values in the dictionary from all processes so that process with rank
0 has the reduced results.
Args:
input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor.
average (bool): whether to do average or sum
Returns:
a dict with the same keys as input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
| 7,814 | 27.837638 | 100 | py |
Mr.Right | Mr.Right-main/models/METER/meter/modules/objectives.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
from torch.utils.data.distributed import DistributedSampler
from einops import rearrange
from .dist_utils import all_gather
def compute_mlm(pl_module, batch):
infer = pl_module.infer(batch, mask_text=True, mask_image=False)
mlm_logits = pl_module.mlm_score(infer["text_feats"])
mlm_labels = infer["text_labels"]
mlm_loss = F.cross_entropy(
mlm_logits.view(-1, pl_module.hparams.config["vocab_size"]),
mlm_labels.view(-1),
ignore_index=-100,
)
ret = {
"mlm_loss": mlm_loss,
"mlm_logits": mlm_logits,
"mlm_labels": mlm_labels,
"mlm_ids": infer["text_ids"],
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_mlm_loss")(ret["mlm_loss"])
acc = getattr(pl_module, f"{phase}_mlm_accuracy")(
ret["mlm_logits"], ret["mlm_labels"]
)
pl_module.log(f"mlm/{phase}/loss", loss)
pl_module.log(f"mlm/{phase}/accuracy", acc)
return ret
def compute_itm(pl_module, batch):
pos_len = len(batch["text"]) // 2
neg_len = len(batch["text"]) - pos_len
itm_labels = torch.cat([torch.ones(pos_len), torch.zeros(neg_len)]).to(
pl_module.device
)
itm_labels = itm_labels[torch.randperm(itm_labels.size(0))]
itm_images = [
torch.stack(
[
ti if itm_labels[i] == 1 else fi
for i, (ti, fi) in enumerate(zip(bti, bfi))
]
)
for bti, bfi in zip(batch["image"], batch["false_image_0"])
]
batch = {k: v for k, v in batch.items()}
batch["image"] = itm_images
infer = pl_module.infer(batch, mask_text=False, mask_image=False)
itm_logits = pl_module.itm_score(infer["cls_feats"])
itm_loss = F.cross_entropy(itm_logits, itm_labels.long())
ret = {
"itm_loss": itm_loss,
"itm_logits": itm_logits,
"itm_labels": itm_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_itm_loss")(ret["itm_loss"])
acc = getattr(pl_module, f"{phase}_itm_accuracy")(
ret["itm_logits"], ret["itm_labels"]
)
pl_module.log(f"itm/{phase}/loss", loss)
pl_module.log(f"itm/{phase}/accuracy", acc)
return ret
def compute_snli(pl_module, batch):
infer = pl_module.infer(
batch, mask_text=False, mask_image=False,
)
snli_logits = pl_module.snli_classifier(infer["cls_feats"])
snli_labels = batch["labels"]
snli_labels = torch.tensor(snli_labels).to(pl_module.device).long()
snli_loss = F.cross_entropy(snli_logits, snli_labels.view(-1))
ret = {
"snli_loss": snli_loss,
"snli_logits": snli_logits,
"snli_labels": snli_labels,
}
phase = "train" if pl_module.training else "val"
if phase == "train":
loss = getattr(pl_module, f"{phase}_snli_loss")(ret["snli_loss"])
acc = getattr(pl_module, f"{phase}_snli_accuracy")(
ret["snli_logits"], ret["snli_labels"]
)
pl_module.log(f"snli/{phase}/loss", loss)
pl_module.log(f"snli/{phase}/accuracy", acc)
else:
dev_batches = [i for i, n in enumerate(batch["table_name"]) if "dev" in n]
test_batches = [i for i, n in enumerate(batch["table_name"]) if "test" in n]
if dev_batches:
dev_loss = getattr(pl_module, f"dev_snli_loss")(
F.cross_entropy(
ret["snli_logits"][dev_batches], ret["snli_labels"][dev_batches]
)
)
dev_acc = getattr(pl_module, f"dev_snli_accuracy")(
ret["snli_logits"][dev_batches], ret["snli_labels"][dev_batches]
)
pl_module.log(f"snli/dev/loss", dev_loss)
pl_module.log(f"snli/dev/accuracy", dev_acc)
if test_batches:
test_loss = getattr(pl_module, f"test_snli_loss")(
F.cross_entropy(
ret["snli_logits"][test_batches], ret["snli_labels"][test_batches]
)
)
test_acc = getattr(pl_module, f"test_snli_accuracy")(
ret["snli_logits"][test_batches], ret["snli_labels"][test_batches]
)
pl_module.log(f"snli/test/loss", test_loss)
pl_module.log(f"snli/test/accuracy", test_acc)
return ret
def compute_vqa(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_image=False)
vqa_logits = pl_module.vqa_classifier(infer["cls_feats"])
vqa_targets = torch.zeros(
len(vqa_logits), pl_module.hparams.config["vqav2_label_size"]
).to(pl_module.device)
vqa_labels = batch["vqa_labels"]
vqa_scores = batch["vqa_scores"]
for i, (_label, _score) in enumerate(zip(vqa_labels, vqa_scores)):
for l, s in zip(_label, _score):
vqa_targets[i, l] = s
vqa_loss = (
F.binary_cross_entropy_with_logits(vqa_logits, vqa_targets)
* vqa_targets.shape[1]
) # https://github.com/jnhwkim/ban-vqa/blob/master/train.py#L19
ret = {
"vqa_loss": vqa_loss,
"vqa_logits": vqa_logits,
"vqa_targets": vqa_targets,
"vqa_labels": vqa_labels,
"vqa_scores": vqa_scores,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_vqa_loss")(ret["vqa_loss"])
score = getattr(pl_module, f"{phase}_vqa_score")(
ret["vqa_logits"], ret["vqa_targets"]
)
pl_module.log(f"vqa/{phase}/loss", loss)
pl_module.log(f"vqa/{phase}/score", score)
return ret
def compute_nlvr2(pl_module, batch):
infer1 = pl_module.infer(
batch, mask_text=False, mask_image=False, image_token_type_idx=1
)
infer2 = pl_module.infer(
batch, mask_text=False, mask_image=False, image_token_type_idx=2
)
cls_feats = torch.cat([infer1["cls_feats"], infer2["cls_feats"]], dim=-1)
nlvr2_logits = pl_module.nlvr2_classifier(cls_feats)
nlvr2_labels = batch["answers"]
nlvr2_labels = torch.tensor(nlvr2_labels).to(pl_module.device).long()
nlvr2_loss = F.cross_entropy(nlvr2_logits, nlvr2_labels.view(-1))
ret = {
"nlvr2_loss": nlvr2_loss,
"nlvr2_logits": nlvr2_logits,
"nlvr2_labels": nlvr2_labels,
}
phase = "train" if pl_module.training else "val"
if phase == "train":
loss = getattr(pl_module, f"{phase}_nlvr2_loss")(ret["nlvr2_loss"])
acc = getattr(pl_module, f"{phase}_nlvr2_accuracy")(
ret["nlvr2_logits"], ret["nlvr2_labels"]
)
pl_module.log(f"nlvr2/{phase}/loss", loss)
pl_module.log(f"nlvr2/{phase}/accuracy", acc)
else:
dev_batches = [i for i, n in enumerate(batch["table_name"]) if "dev" in n]
test_batches = [i for i, n in enumerate(batch["table_name"]) if "test" in n]
if dev_batches:
dev_loss = getattr(pl_module, f"dev_nlvr2_loss")(
F.cross_entropy(
ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches]
)
)
dev_acc = getattr(pl_module, f"dev_nlvr2_accuracy")(
ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches]
)
pl_module.log(f"nlvr2/dev/loss", dev_loss)
pl_module.log(f"nlvr2/dev/accuracy", dev_acc)
if test_batches:
test_loss = getattr(pl_module, f"test_nlvr2_loss")(
F.cross_entropy(
ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches]
)
)
test_acc = getattr(pl_module, f"test_nlvr2_accuracy")(
ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches]
)
pl_module.log(f"nlvr2/test/loss", test_loss)
pl_module.log(f"nlvr2/test/accuracy", test_acc)
return ret
def compute_irtr(pl_module, batch):
is_training_phase = pl_module.training
_bs, _c, _h, _w = batch["image"][0].shape
false_len = pl_module.hparams.config["draw_false_text"]
text_ids = torch.stack(
[batch[f"false_text_{i}_ids"] for i in range(false_len)], dim=1
)
text_masks = torch.stack(
[batch[f"false_text_{i}_masks"] for i in range(false_len)], dim=1
)
text_labels = torch.stack(
[batch[f"false_text_{i}_labels"] for i in range(false_len)], dim=1
)
text_ids = torch.cat([batch["text_ids"].unsqueeze(1), text_ids], dim=1)
text_masks = torch.cat([batch["text_masks"].unsqueeze(1), text_masks], dim=1)
text_labels = torch.cat([batch["text_labels"].unsqueeze(1), text_labels], dim=1)
images = batch["image"][0].unsqueeze(1).expand(_bs, false_len + 1, _c, _h, _w)
infer = pl_module.infer(
{
"image": [rearrange(images, "bs fs c h w -> (bs fs) c h w")],
"text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"),
"text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"),
"text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl"),
}
)
score = pl_module.rank_output(infer["cls_feats"])[:, 0]
score = rearrange(score, "(bs fs) -> bs fs", bs=_bs, fs=false_len + 1)
answer = torch.zeros(_bs).to(score).long()
irtr_loss = F.cross_entropy(score, answer)
ret = {
"irtr_loss": irtr_loss,
}
phase = "train" if pl_module.training else "val"
irtr_loss = getattr(pl_module, f"{phase}_irtr_loss")(ret["irtr_loss"])
pl_module.log(f"irtr/{phase}/irtr_loss", irtr_loss)
return ret
@torch.no_grad()
def compute_irtr_recall(pl_module):
text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset()
text_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
text_loader = torch.utils.data.DataLoader(
text_dset,
batch_size=64,
num_workers=pl_module.hparams.config["num_workers"],
pin_memory=True,
collate_fn=functools.partial(
text_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset(
image_only=True
)
image_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
dist_sampler = DistributedSampler(image_dset, shuffle=False)
image_loader = torch.utils.data.DataLoader(
image_dset,
batch_size=1,
num_workers=pl_module.hparams.config["num_workers"],
sampler=dist_sampler,
pin_memory=True,
collate_fn=functools.partial(
image_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
#TODO: speed up the process by caching text/image features
text_preload = list()
for _b in tqdm.tqdm(text_loader, desc="text prefetch loop"):
text_preload.append(
{
"text_ids": _b["text_ids"].to(pl_module.device),
"text_masks": _b["text_masks"].to(pl_module.device),
"text_labels": _b["text_labels"].to(pl_module.device),
"img_index": _b["img_index"],
}
)
tiids = list()
for pre in text_preload:
tiids += pre["img_index"]
tiids = torch.tensor(tiids)
image_preload = list()
for _b in tqdm.tqdm(image_loader, desc="image prefetch loop"):
image_preload.append((_b['image'][0], _b["img_index"][0]))
rank_scores = list()
rank_iids = list()
for img_batch in tqdm.tqdm(image_preload, desc="rank loop"):
_im, _iid = img_batch
img_batch_score = list()
for txt_batch in text_preload:
fblen = len(txt_batch["text_ids"])
im = _im.repeat(fblen, 1, 1, 1).to(device=txt_batch['text_ids'].device)
with torch.cuda.amp.autocast():
score = pl_module.rank_output(
pl_module.infer(
{
"text_ids": txt_batch["text_ids"],
"text_masks": txt_batch["text_masks"],
"text_labels": txt_batch["text_labels"],
},
img=im,
)["cls_feats"]
)[:, 0]
img_batch_score.append(score)
img_batch_score = torch.cat(img_batch_score)
rank_scores.append(img_batch_score.cpu().tolist())
rank_iids.append(_iid)
torch.distributed.barrier()
gather_rank_scores = all_gather(rank_scores)
gather_rank_iids = all_gather(rank_iids)
iids = torch.tensor(gather_rank_iids)
iids = iids.view(-1)
scores = torch.tensor(gather_rank_scores)
scores = scores.view(len(iids), -1)
topk10 = scores.topk(10, dim=1)
topk5 = scores.topk(5, dim=1)
topk1 = scores.topk(1, dim=1)
topk10_iids = tiids[topk10.indices]
topk5_iids = tiids[topk5.indices]
topk1_iids = tiids[topk1.indices]
tr_r10 = (iids.unsqueeze(1) == topk10_iids).float().max(dim=1)[0].mean()
tr_r5 = (iids.unsqueeze(1) == topk5_iids).float().max(dim=1)[0].mean()
tr_r1 = (iids.unsqueeze(1) == topk1_iids).float().max(dim=1)[0].mean()
topk10 = scores.topk(10, dim=0)
topk5 = scores.topk(5, dim=0)
topk1 = scores.topk(1, dim=0)
topk10_iids = iids[topk10.indices]
topk5_iids = iids[topk5.indices]
topk1_iids = iids[topk1.indices]
ir_r10 = (tiids.unsqueeze(0) == topk10_iids).float().max(dim=0)[0].mean()
ir_r5 = (tiids.unsqueeze(0) == topk5_iids).float().max(dim=0)[0].mean()
ir_r1 = (tiids.unsqueeze(0) == topk1_iids).float().max(dim=0)[0].mean()
return (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10)
def init_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def vqa_test_step(pl_module, batch, output):
try:
id2answer = (
pl_module.trainer.datamodule.dm_dicts["vqa_trainval"].id2answer
if "vqa_trainval" in pl_module.trainer.datamodule.dm_dicts
else pl_module.trainer.datamodule.dm_dicts["vqa"].id2answer
)
except:
id2answer = (
pl_module.trainer.datamodule.dm_dicts["gqa_test"].id2answer
if "gqa_test" in pl_module.trainer.datamodule.dm_dicts
else pl_module.trainer.datamodule.dm_dicts["gqa"].id2answer
)
vqa_logits = output["vqa_logits"]
vqa_preds = vqa_logits.argmax(dim=-1)
vqa_preds = [id2answer[pred.item()] for pred in vqa_preds]
questions = batch["text"]
qids = batch["qid"]
return {"qids": qids, "preds": vqa_preds, "gqa": True}
vqa_logits = output["vqa_logits"]
vqa_preds = vqa_logits.argmax(dim=-1)
vqa_preds = [id2answer[pred.item()] for pred in vqa_preds]
questions = batch["text"]
qids = batch["qid"]
return {"qids": qids, "preds": vqa_preds, "gqa": False}
def arc_test_step(pl_module, batch, output):
return output
def vqa_test_wrapup(outs, model_name):
rank = torch.distributed.get_rank()
qids, preds = list(), list()
gqa = False
for out in outs:
qids += out["qids"]
preds += out["preds"]
gqa = out['gqa']
rets = list()
for qid, pred in zip(qids, preds):
if gqa:
rets.append({"questionId": qid, "prediction": pred})
else:
rets.append({"question_id": qid, "answer": pred})
with open(f"vqa_submit_{rank}.json", "w") as fp:
json.dump(rets, fp, indent=4)
torch.distributed.barrier()
if rank == 0:
jsons = list()
paths = list(glob.glob("vqa_submit_*.json"))
for path in paths:
with open(path, "r") as fp:
jsons += json.load(fp)
os.makedirs("result", exist_ok=True)
with open(f"result/vqa_submit_{model_name}.json", "w") as fp:
json.dump(jsons, fp, indent=4)
torch.distributed.barrier()
os.remove(f"vqa_submit_{rank}.json")
def arc_test_wrapup(outs, caplen, model_name):
rank = torch.distributed.get_rank()
iids, captions = list(), list()
for out in outs:
iids += out["iid"]
captions += out["captions"]
rets = list()
for iid, caption in zip(iids, captions):
rets.append({"image_id": iid, "caption": caption})
with open(f"coco_cap_len{caplen}_{rank}.json", "w") as fp:
json.dump(rets, fp, indent=4)
torch.distributed.barrier()
if rank == 0:
jsons = list()
paths = list(glob.glob(f"coco_cap_len{caplen}_*.json"))
for path in paths:
with open(path, "r") as fp:
jsons += json.load(fp)
os.makedirs("result/arc", exist_ok=True)
jsons = sorted(jsons, key=lambda x: x["image_id"])
with open(f"result/arc/coco_cap_{model_name}_len{caplen}.json", "w") as fp:
json.dump(jsons, fp, indent=4)
torch.distributed.barrier()
os.remove(f"coco_cap_len{caplen}_{rank}.json")
| 17,360 | 33.514911 | 88 | py |
Mr.Right | Mr.Right-main/models/METER/meter/modules/swin_helpers.py | """ Model creation / weight loading / state_dict helpers
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
import os
import math
from collections import OrderedDict
from copy import deepcopy
from typing import Any, Callable, Optional, Tuple
import torch
import torch.nn as nn
from timm.models.features import FeatureListNet, FeatureDictNet, FeatureHookNet
from timm.models.hub import has_hf_hub, download_cached_file, load_state_dict_from_hf, load_state_dict_from_url
from timm.models.layers import Conv2dSame, Linear
def swin_adapt_position_encoding(model, before=384, patch_size=32, after=384,
suffix='relative_position_bias_table'):
if after == before:
return model
grid_before = int(before/32)
grid_after = int(after/32) #after // patch_size
before = (2*grid_before-1)
import math
after = (2*grid_after-1)
keys = [k for k in model if k.endswith(suffix)]
assert len(keys) > 0
for key in keys:
pos_embed = model[key]
pos_embed = pos_embed.transpose(0, 1).view(-1, before, before)
pos_embed = torch.nn.functional.interpolate(pos_embed.unsqueeze(0), size=(after, after), mode='bicubic')
pos_embed = pos_embed.squeeze(0).permute((1, 2, 0))
pos_embed = pos_embed.contiguous().view(-1, pos_embed.size(-1))
model[key] = pos_embed
keys = [k for k in model if k.endswith('attn_mask')]
for key in keys:
model.pop(key)
keys = [k for k in model if k.endswith('relative_position_index')]
for key in keys:
model.pop(key)
return model
_logger = logging.getLogger(__name__)
def load_state_dict(checkpoint_path, use_ema=False):
if checkpoint_path and os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
state_dict_key = 'state_dict'
if isinstance(checkpoint, dict):
if use_ema and 'state_dict_ema' in checkpoint:
state_dict_key = 'state_dict_ema'
if state_dict_key and state_dict_key in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint[state_dict_key].items():
# strip `module.` prefix
name = k[7:] if k.startswith('module') else k
new_state_dict[name] = v
state_dict = new_state_dict
else:
state_dict = checkpoint
_logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path))
return state_dict
else:
_logger.error("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
def load_checkpoint(model, checkpoint_path, use_ema=False, strict=True):
if os.path.splitext(checkpoint_path)[-1].lower() in ('.npz', '.npy'):
# numpy checkpoint, try to load via model specific load_pretrained fn
if hasattr(model, 'load_pretrained'):
model.load_pretrained(checkpoint_path)
else:
raise NotImplementedError('Model cannot load numpy checkpoint')
return
state_dict = load_state_dict(checkpoint_path, use_ema)
model.load_state_dict(state_dict, strict=strict)
def resume_checkpoint(model, checkpoint_path, optimizer=None, loss_scaler=None, log_info=True):
resume_epoch = None
if os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
if log_info:
_logger.info('Restoring model state from checkpoint...')
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict'].items():
name = k[7:] if k.startswith('module') else k
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
if optimizer is not None and 'optimizer' in checkpoint:
if log_info:
_logger.info('Restoring optimizer state from checkpoint...')
optimizer.load_state_dict(checkpoint['optimizer'])
if loss_scaler is not None and loss_scaler.state_dict_key in checkpoint:
if log_info:
_logger.info('Restoring AMP loss scaler state from checkpoint...')
loss_scaler.load_state_dict(checkpoint[loss_scaler.state_dict_key])
if 'epoch' in checkpoint:
resume_epoch = checkpoint['epoch']
if 'version' in checkpoint and checkpoint['version'] > 1:
resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save
if log_info:
_logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch']))
else:
model.load_state_dict(checkpoint)
if log_info:
_logger.info("Loaded checkpoint '{}'".format(checkpoint_path))
return resume_epoch
else:
_logger.error("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
def load_custom_pretrained(model, default_cfg=None, load_fn=None, progress=False, check_hash=False):
r"""Loads a custom (read non .pth) weight file
Downloads checkpoint file into cache-dir like torch.hub based loaders, but calls
a passed in custom load fun, or the `load_pretrained` model member fn.
If the object is already present in `model_dir`, it's deserialized and returned.
The default value of `model_dir` is ``<hub_dir>/checkpoints`` where
`hub_dir` is the directory returned by :func:`~torch.hub.get_dir`.
Args:
model: The instantiated model to load weights into
default_cfg (dict): Default pretrained model cfg
load_fn: An external stand alone fn that loads weights into provided model, otherwise a fn named
'laod_pretrained' on the model will be called if it exists
progress (bool, optional): whether or not to display a progress bar to stderr. Default: False
check_hash(bool, optional): If True, the filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file. Default: False
"""
default_cfg = default_cfg or getattr(model, 'default_cfg', None) or {}
pretrained_url = default_cfg.get('url', None)
if not pretrained_url:
_logger.warning("No pretrained weights exist for this model. Using random initialization.")
return
cached_file = download_cached_file(default_cfg['url'], check_hash=check_hash, progress=progress)
if load_fn is not None:
load_fn(model, cached_file)
elif hasattr(model, 'load_pretrained'):
model.load_pretrained(cached_file)
else:
_logger.warning("Valid function to load pretrained weights is not available, using random initialization.")
def adapt_input_conv(in_chans, conv_weight):
conv_type = conv_weight.dtype
conv_weight = conv_weight.float() # Some weights are in torch.half, ensure it's float for sum on CPU
O, I, J, K = conv_weight.shape
if in_chans == 1:
if I > 3:
assert conv_weight.shape[1] % 3 == 0
# For models with space2depth stems
conv_weight = conv_weight.reshape(O, I // 3, 3, J, K)
conv_weight = conv_weight.sum(dim=2, keepdim=False)
else:
conv_weight = conv_weight.sum(dim=1, keepdim=True)
elif in_chans != 3:
if I != 3:
raise NotImplementedError('Weight format not supported by conversion.')
else:
# NOTE this strategy should be better than random init, but there could be other combinations of
# the original RGB input layer weights that'd work better for specific cases.
repeat = int(math.ceil(in_chans / 3))
conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :]
conv_weight *= (3 / float(in_chans))
conv_weight = conv_weight.to(conv_type)
return conv_weight
def load_pretrained(model, img_size, default_cfg=None, num_classes=1000, in_chans=3, filter_fn=None, strict=True, progress=False, resolution_before=384):
""" Load pretrained checkpoint
Args:
model (nn.Module) : PyTorch model module
default_cfg (Optional[Dict]): default configuration for pretrained weights / target dataset
num_classes (int): num_classes for model
in_chans (int): in_chans for model
filter_fn (Optional[Callable]): state_dict filter fn for load (takes state_dict, model as args)
strict (bool): strict load of checkpoint
progress (bool): enable progress bar for weight download
"""
default_cfg = default_cfg or getattr(model, 'default_cfg', None) or {}
pretrained_url = default_cfg.get('url', None)
hf_hub_id = default_cfg.get('hf_hub', None)
if not pretrained_url and not hf_hub_id:
_logger.warning("No pretrained weights exist for this model. Using random initialization.")
return
if hf_hub_id and has_hf_hub(necessary=not pretrained_url):
_logger.info(f'Loading pretrained weights from Hugging Face hub ({hf_hub_id})')
state_dict = load_state_dict_from_hf(hf_hub_id)
else:
_logger.info(f'Loading pretrained weights from url ({pretrained_url})')
state_dict = load_state_dict_from_url(pretrained_url, progress=progress, map_location='cpu')
swin_adapt_position_encoding(state_dict['model'], before=resolution_before, after=img_size)
if filter_fn is not None:
# for backwards compat with filter fn that take one arg, try one first, the two
try:
state_dict = filter_fn(state_dict)
except TypeError:
state_dict = filter_fn(state_dict, model)
input_convs = default_cfg.get('first_conv', None)
if input_convs is not None and in_chans != 3:
if isinstance(input_convs, str):
input_convs = (input_convs,)
for input_conv_name in input_convs:
weight_name = input_conv_name + '.weight'
try:
state_dict[weight_name] = adapt_input_conv(in_chans, state_dict[weight_name])
_logger.info(
f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)')
except NotImplementedError as e:
del state_dict[weight_name]
strict = False
_logger.warning(
f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.')
classifiers = default_cfg.get('classifier', None)
label_offset = default_cfg.get('label_offset', 0)
if classifiers is not None:
if isinstance(classifiers, str):
classifiers = (classifiers,)
if num_classes != default_cfg['num_classes']:
for classifier_name in classifiers:
# completely discard fully connected if model num_classes doesn't match pretrained weights
del state_dict[classifier_name + '.weight']
del state_dict[classifier_name + '.bias']
strict = False
elif label_offset > 0:
for classifier_name in classifiers:
# special case for pretrained weights with an extra background class in pretrained weights
classifier_weight = state_dict[classifier_name + '.weight']
state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:]
classifier_bias = state_dict[classifier_name + '.bias']
state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:]
model.load_state_dict(state_dict, strict=strict)
def extract_layer(model, layer):
layer = layer.split('.')
module = model
if hasattr(model, 'module') and layer[0] != 'module':
module = model.module
if not hasattr(model, 'module') and layer[0] == 'module':
layer = layer[1:]
for l in layer:
if hasattr(module, l):
if not l.isdigit():
module = getattr(module, l)
else:
module = module[int(l)]
else:
return module
return module
def set_layer(model, layer, val):
layer = layer.split('.')
module = model
if hasattr(model, 'module') and layer[0] != 'module':
module = model.module
lst_index = 0
module2 = module
for l in layer:
if hasattr(module2, l):
if not l.isdigit():
module2 = getattr(module2, l)
else:
module2 = module2[int(l)]
lst_index += 1
lst_index -= 1
for l in layer[:lst_index]:
if not l.isdigit():
module = getattr(module, l)
else:
module = module[int(l)]
l = layer[lst_index]
setattr(module, l, val)
def adapt_model_from_string(parent_module, model_string):
separator = '***'
state_dict = {}
lst_shape = model_string.split(separator)
for k in lst_shape:
k = k.split(':')
key = k[0]
shape = k[1][1:-1].split(',')
if shape[0] != '':
state_dict[key] = [int(i) for i in shape]
new_module = deepcopy(parent_module)
for n, m in parent_module.named_modules():
old_module = extract_layer(parent_module, n)
if isinstance(old_module, nn.Conv2d) or isinstance(old_module, Conv2dSame):
if isinstance(old_module, Conv2dSame):
conv = Conv2dSame
else:
conv = nn.Conv2d
s = state_dict[n + '.weight']
in_channels = s[1]
out_channels = s[0]
g = 1
if old_module.groups > 1:
in_channels = out_channels
g = in_channels
new_conv = conv(
in_channels=in_channels, out_channels=out_channels, kernel_size=old_module.kernel_size,
bias=old_module.bias is not None, padding=old_module.padding, dilation=old_module.dilation,
groups=g, stride=old_module.stride)
set_layer(new_module, n, new_conv)
if isinstance(old_module, nn.BatchNorm2d):
new_bn = nn.BatchNorm2d(
num_features=state_dict[n + '.weight'][0], eps=old_module.eps, momentum=old_module.momentum,
affine=old_module.affine, track_running_stats=True)
set_layer(new_module, n, new_bn)
if isinstance(old_module, nn.Linear):
# FIXME extra checks to ensure this is actually the FC classifier layer and not a diff Linear layer?
num_features = state_dict[n + '.weight'][1]
new_fc = Linear(
in_features=num_features, out_features=old_module.out_features, bias=old_module.bias is not None)
set_layer(new_module, n, new_fc)
if hasattr(new_module, 'num_features'):
new_module.num_features = num_features
new_module.eval()
parent_module.eval()
return new_module
def adapt_model_from_file(parent_module, model_variant):
adapt_file = os.path.join(os.path.dirname(__file__), 'pruned', model_variant + '.txt')
with open(adapt_file, 'r') as f:
return adapt_model_from_string(parent_module, f.read().strip())
def default_cfg_for_features(default_cfg):
default_cfg = deepcopy(default_cfg)
# remove default pretrained cfg fields that don't have much relevance for feature backbone
to_remove = ('num_classes', 'crop_pct', 'classifier', 'global_pool') # add default final pool size?
for tr in to_remove:
default_cfg.pop(tr, None)
return default_cfg
def overlay_external_default_cfg(default_cfg, kwargs):
""" Overlay 'external_default_cfg' in kwargs on top of default_cfg arg.
"""
external_default_cfg = kwargs.pop('external_default_cfg', None)
if external_default_cfg:
default_cfg.pop('url', None) # url should come from external cfg
default_cfg.pop('hf_hub', None) # hf hub id should come from external cfg
default_cfg.update(external_default_cfg)
def set_default_kwargs(kwargs, names, default_cfg):
for n in names:
# for legacy reasons, model __init__args uses img_size + in_chans as separate args while
# default_cfg has one input_size=(C, H ,W) entry
if n == 'img_size':
input_size = default_cfg.get('input_size', None)
if input_size is not None:
assert len(input_size) == 3
kwargs.setdefault(n, input_size[-2:])
elif n == 'in_chans':
input_size = default_cfg.get('input_size', None)
if input_size is not None:
assert len(input_size) == 3
kwargs.setdefault(n, input_size[0])
else:
default_val = default_cfg.get(n, None)
if default_val is not None:
kwargs.setdefault(n, default_cfg[n])
def filter_kwargs(kwargs, names):
if not kwargs or not names:
return
for n in names:
kwargs.pop(n, None)
def update_default_cfg_and_kwargs(default_cfg, kwargs, kwargs_filter):
""" Update the default_cfg and kwargs before passing to model
FIXME this sequence of overlay default_cfg, set default kwargs, filter kwargs
could/should be replaced by an improved configuration mechanism
Args:
default_cfg: input default_cfg (updated in-place)
kwargs: keyword args passed to model build fn (updated in-place)
kwargs_filter: keyword arg keys that must be removed before model __init__
"""
# Overlay default cfg values from `external_default_cfg` if it exists in kwargs
overlay_external_default_cfg(default_cfg, kwargs)
# Set model __init__ args that can be determined by default_cfg (if not already passed as kwargs)
default_kwarg_names = ('num_classes', 'global_pool', 'in_chans')
if default_cfg.get('fixed_input_size', False):
# if fixed_input_size exists and is True, model takes an img_size arg that fixes its input size
default_kwarg_names += ('img_size',)
set_default_kwargs(kwargs, names=default_kwarg_names, default_cfg=default_cfg)
# Filter keyword args for task specific model variants (some 'features only' models, etc.)
filter_kwargs(kwargs, names=kwargs_filter)
def swin_build_model_with_cfg(
model_cls: Callable,
variant: str,
pretrained: bool,
default_cfg: dict,
model_cfg: Optional[Any] = None,
feature_cfg: Optional[dict] = None,
pretrained_strict: bool = True,
pretrained_filter_fn: Optional[Callable] = None,
pretrained_custom_load: bool = False,
kwargs_filter: Optional[Tuple[str]] = None,
**kwargs):
""" Build model with specified default_cfg and optional model_cfg
This helper fn aids in the construction of a model including:
* handling default_cfg and associated pretained weight loading
* passing through optional model_cfg for models with config based arch spec
* features_only model adaptation
* pruning config / model adaptation
Args:
model_cls (nn.Module): model class
variant (str): model variant name
pretrained (bool): load pretrained weights
default_cfg (dict): model's default pretrained/task config
model_cfg (Optional[Dict]): model's architecture config
feature_cfg (Optional[Dict]: feature extraction adapter config
pretrained_strict (bool): load pretrained weights strictly
pretrained_filter_fn (Optional[Callable]): filter callable for pretrained weights
pretrained_custom_load (bool): use custom load fn, to load numpy or other non PyTorch weights
kwargs_filter (Optional[Tuple]): kwargs to filter before passing to model
**kwargs: model args passed through to model __init__
"""
pruned = kwargs.pop('pruned', False)
features = False
feature_cfg = feature_cfg or {}
default_cfg = deepcopy(default_cfg) if default_cfg else {}
update_default_cfg_and_kwargs(default_cfg, kwargs, kwargs_filter)
default_cfg.setdefault('architecture', variant)
# Setup for feature extraction wrapper done at end of this fn
if kwargs.pop('features_only', False):
features = True
feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4))
if 'out_indices' in kwargs:
feature_cfg['out_indices'] = kwargs.pop('out_indices')
# Build the model
model = model_cls(**kwargs) if model_cfg is None else model_cls(cfg=model_cfg, **kwargs)
model.default_cfg = default_cfg
if pruned:
model = adapt_model_from_file(model, variant)
# For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats
num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000))
if pretrained:
if pretrained_custom_load:
load_custom_pretrained(model)
else:
load_pretrained(
model,
num_classes=num_classes_pretrained,
in_chans=kwargs.get('in_chans', 3),
filter_fn=pretrained_filter_fn,
img_size=kwargs['img_size'],
strict=pretrained_strict,
resolution_before=kwargs['config']['resolution_before'])
# Wrap the model in a feature extraction module if enabled
if features:
feature_cls = FeatureListNet
if 'feature_cls' in feature_cfg:
feature_cls = feature_cfg.pop('feature_cls')
if isinstance(feature_cls, str):
feature_cls = feature_cls.lower()
if 'hook' in feature_cls:
feature_cls = FeatureHookNet
else:
assert False, f'Unknown feature class {feature_cls}'
model = feature_cls(model, **feature_cfg)
model.default_cfg = default_cfg_for_features(default_cfg) # add back default_cfg
return model
def model_parameters(model, exclude_head=False):
if exclude_head:
# FIXME this a bit of a quick and dirty hack to skip classifier head params based on ordering
return [p for p in model.parameters()][:-2]
else:
return model.parameters()
def named_apply(fn: Callable, module: nn.Module, name='', depth_first=True, include_root=False) -> nn.Module:
if not depth_first and include_root:
fn(module=module, name=name)
for child_name, child_module in module.named_children():
child_name = '.'.join((name, child_name)) if name else child_name
named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True)
if depth_first and include_root:
fn(module=module, name=name)
return module
def named_modules(module: nn.Module, name='', depth_first=True, include_root=False):
if not depth_first and include_root:
yield name, module
for child_name, child_module in module.named_children():
child_name = '.'.join((name, child_name)) if name else child_name
yield from named_modules(
module=child_module, name=child_name, depth_first=depth_first, include_root=True)
if depth_first and include_root:
yield name, module
| 23,550 | 43.519849 | 153 | py |
Mr.Right | Mr.Right-main/models/METER/meter/modules/heads.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.models.bert.modeling_bert import BertPredictionHeadTransform
class Pooler(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class ITMHead(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.fc = nn.Linear(hidden_size, 2)
def forward(self, x):
x = self.fc(x)
return x
class MLMHead(nn.Module):
def __init__(self, config, weight=None):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
if weight is not None:
self.decoder.weight = weight
def forward(self, x):
x = self.transform(x)
x = self.decoder(x) + self.bias
return x
| 1,257 | 27.590909 | 83 | py |
Mr.Right | Mr.Right-main/models/METER/meter/modules/__init__.py | from .meter_module import METERTransformerSS
| 45 | 22 | 44 | py |
Mr.Right | Mr.Right-main/models/METER/meter/utils/write_vg.py | import json
import pandas as pd
import pyarrow as pa
import random
import os
from tqdm import tqdm
from glob import glob
from collections import defaultdict
def path2rest(path, iid2captions):
name = path.split("/")[-1]
iid = int(name[:-4])
with open(path, "rb") as fp:
binary = fp.read()
cdicts = iid2captions[iid]
captions = [c["phrase"] for c in cdicts]
widths = [c["width"] for c in cdicts]
heights = [c["height"] for c in cdicts]
xs = [c["x"] for c in cdicts]
ys = [c["y"] for c in cdicts]
return [
binary,
captions,
widths,
heights,
xs,
ys,
str(iid),
]
def make_arrow(root, dataset_root):
with open(f"{root}/annotations/region_descriptions.json", "r") as fp:
captions = json.load(fp)
iid2captions = defaultdict(list)
for cap in tqdm(captions):
cap = cap["regions"]
for c in cap:
iid2captions[c["image_id"]].append(c)
paths = list(glob(f"{root}/images/VG_100K/*.jpg")) + list(
glob(f"{root}/images/VG_100K_2/*.jpg")
)
random.shuffle(paths)
caption_paths = [
path for path in paths if int(path.split("/")[-1][:-4]) in iid2captions
]
if len(paths) == len(caption_paths):
print("all images have caption annotations")
else:
print("not all images have caption annotations")
print(
len(paths), len(caption_paths), len(iid2captions),
)
bs = [path2rest(path, iid2captions) for path in tqdm(caption_paths)]
dataframe = pd.DataFrame(
bs, columns=["image", "caption", "width", "height", "x", "y", "image_id"],
)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(f"{dataset_root}/vg.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
| 1,928 | 25.424658 | 82 | py |
Mr.Right | Mr.Right-main/models/METER/meter/utils/glossary.py | import re
contractions = {
"aint": "ain't",
"arent": "aren't",
"cant": "can't",
"couldve": "could've",
"couldnt": "couldn't",
"couldn'tve": "couldn't've",
"couldnt've": "couldn't've",
"didnt": "didn't",
"doesnt": "doesn't",
"dont": "don't",
"hadnt": "hadn't",
"hadnt've": "hadn't've",
"hadn'tve": "hadn't've",
"hasnt": "hasn't",
"havent": "haven't",
"hed": "he'd",
"hed've": "he'd've",
"he'dve": "he'd've",
"hes": "he's",
"howd": "how'd",
"howll": "how'll",
"hows": "how's",
"Id've": "I'd've",
"I'dve": "I'd've",
"Im": "I'm",
"Ive": "I've",
"isnt": "isn't",
"itd": "it'd",
"itd've": "it'd've",
"it'dve": "it'd've",
"itll": "it'll",
"let's": "let's",
"maam": "ma'am",
"mightnt": "mightn't",
"mightnt've": "mightn't've",
"mightn'tve": "mightn't've",
"mightve": "might've",
"mustnt": "mustn't",
"mustve": "must've",
"neednt": "needn't",
"notve": "not've",
"oclock": "o'clock",
"oughtnt": "oughtn't",
"ow's'at": "'ow's'at",
"'ows'at": "'ow's'at",
"'ow'sat": "'ow's'at",
"shant": "shan't",
"shed've": "she'd've",
"she'dve": "she'd've",
"she's": "she's",
"shouldve": "should've",
"shouldnt": "shouldn't",
"shouldnt've": "shouldn't've",
"shouldn'tve": "shouldn't've",
"somebody'd": "somebodyd",
"somebodyd've": "somebody'd've",
"somebody'dve": "somebody'd've",
"somebodyll": "somebody'll",
"somebodys": "somebody's",
"someoned": "someone'd",
"someoned've": "someone'd've",
"someone'dve": "someone'd've",
"someonell": "someone'll",
"someones": "someone's",
"somethingd": "something'd",
"somethingd've": "something'd've",
"something'dve": "something'd've",
"somethingll": "something'll",
"thats": "that's",
"thered": "there'd",
"thered've": "there'd've",
"there'dve": "there'd've",
"therere": "there're",
"theres": "there's",
"theyd": "they'd",
"theyd've": "they'd've",
"they'dve": "they'd've",
"theyll": "they'll",
"theyre": "they're",
"theyve": "they've",
"twas": "'twas",
"wasnt": "wasn't",
"wed've": "we'd've",
"we'dve": "we'd've",
"weve": "we've",
"werent": "weren't",
"whatll": "what'll",
"whatre": "what're",
"whats": "what's",
"whatve": "what've",
"whens": "when's",
"whered": "where'd",
"wheres": "where's",
"whereve": "where've",
"whod": "who'd",
"whod've": "who'd've",
"who'dve": "who'd've",
"wholl": "who'll",
"whos": "who's",
"whove": "who've",
"whyll": "why'll",
"whyre": "why're",
"whys": "why's",
"wont": "won't",
"wouldve": "would've",
"wouldnt": "wouldn't",
"wouldnt've": "wouldn't've",
"wouldn'tve": "wouldn't've",
"yall": "y'all",
"yall'll": "y'all'll",
"y'allll": "y'all'll",
"yall'd've": "y'all'd've",
"y'alld've": "y'all'd've",
"y'all'dve": "y'all'd've",
"youd": "you'd",
"youd've": "you'd've",
"you'dve": "you'd've",
"youll": "you'll",
"youre": "you're",
"youve": "you've",
}
manual_map = {
"none": "0",
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
}
articles = ["a", "an", "the"]
period_strip = re.compile("(?!<=\d)(\.)(?!\d)")
comma_strip = re.compile("(\d)(\,)(\d)")
punct = [
";",
r"/",
"[",
"]",
'"',
"{",
"}",
"(",
")",
"=",
"+",
"\\",
"_",
"-",
">",
"<",
"@",
"`",
",",
"?",
"!",
]
def normalize_word(token):
_token = token
for p in punct:
if (p + " " in token or " " + p in token) or (
re.search(comma_strip, token) != None
):
_token = _token.replace(p, "")
else:
_token = _token.replace(p, " ")
token = period_strip.sub("", _token, re.UNICODE)
_token = []
temp = token.lower().split()
for word in temp:
word = manual_map.setdefault(word, word)
if word not in articles:
_token.append(word)
for i, word in enumerate(_token):
if word in contractions:
_token[i] = contractions[word]
token = " ".join(_token)
token = token.replace(",", "")
return token
| 4,435 | 22.225131 | 54 | py |
Mr.Right | Mr.Right-main/models/METER/meter/utils/write_vqa.py | import json
import pandas as pd
import pyarrow as pa
import random
import os
from tqdm import tqdm
from glob import glob
from collections import defaultdict, Counter
from .glossary import normalize_word
def get_score(occurences):
if occurences == 0:
return 0.0
elif occurences == 1:
return 0.3
elif occurences == 2:
return 0.6
elif occurences == 3:
return 0.9
else:
return 1.0
def path2rest(path, split, annotations, label2ans):
iid = int(path.split("/")[-1].split("_")[-1][:-4])
with open(path, "rb") as fp:
binary = fp.read()
_annot = annotations[split][iid]
_annot = list(_annot.items())
qids, qas = [a[0] for a in _annot], [a[1] for a in _annot]
questions = [qa[0] for qa in qas]
answers = [qa[1] for qa in qas] if "test" not in split else list(list())
answer_labels = (
[a["labels"] for a in answers] if "test" not in split else list(list())
)
answer_scores = (
[a["scores"] for a in answers] if "test" not in split else list(list())
)
answers = (
[[label2ans[l] for l in al] for al in answer_labels]
if "test" not in split
else list(list())
)
return [binary, questions, answers, answer_labels, answer_scores, iid, qids, split]
def make_arrow(root, dataset_root):
with open(f"{root}/v2_OpenEnded_mscoco_train2014_questions.json", "r") as fp:
questions_train2014 = json.load(fp)["questions"]
with open(f"{root}/v2_OpenEnded_mscoco_val2014_questions.json", "r") as fp:
questions_val2014 = json.load(fp)["questions"]
with open(f"{root}/v2_OpenEnded_mscoco_test2015_questions.json", "r") as fp:
questions_test2015 = json.load(fp)["questions"]
with open(f"{root}/v2_OpenEnded_mscoco_test-dev2015_questions.json", "r") as fp:
questions_test_dev2015 = json.load(fp)["questions"]
with open(f"{root}/v2_mscoco_train2014_annotations.json", "r") as fp:
annotations_train2014 = json.load(fp)["annotations"]
with open(f"{root}/v2_mscoco_val2014_annotations.json", "r") as fp:
annotations_val2014 = json.load(fp)["annotations"]
annotations = dict()
for split, questions in zip(
["train", "val", "test", "test-dev"],
[
questions_train2014,
questions_val2014,
questions_test2015,
questions_test_dev2015,
],
):
_annot = defaultdict(dict)
for q in tqdm(questions):
_annot[q["image_id"]][q["question_id"]] = [q["question"]]
annotations[split] = _annot
all_major_answers = list()
for split, annots in zip(
["train", "val"], [annotations_train2014, annotations_val2014],
):
_annot = annotations[split]
for q in tqdm(annots):
all_major_answers.append(q["multiple_choice_answer"])
all_major_answers = [normalize_word(word) for word in tqdm(all_major_answers)]
counter = {k: v for k, v in Counter(all_major_answers).items() if v >= 9}
ans2label = {k: i for i, k in enumerate(counter.keys())}
label2ans = list(counter.keys())
for split, annots in zip(
["train", "val"], [annotations_train2014, annotations_val2014],
):
_annot = annotations[split]
for q in tqdm(annots):
answers = q["answers"]
answer_count = {}
for answer in answers:
answer_ = answer["answer"]
answer_count[answer_] = answer_count.get(answer_, 0) + 1
labels = []
scores = []
for answer in answer_count:
if answer not in ans2label:
continue
labels.append(ans2label[answer])
score = get_score(answer_count[answer])
scores.append(score)
_annot[q["image_id"]][q["question_id"]].append(
{"labels": labels, "scores": scores,}
)
for split in ["train", "val"]:
filtered_annot = dict()
for ik, iv in annotations[split].items():
new_q = dict()
for qk, qv in iv.items():
if len(qv[1]["labels"]) != 0:
new_q[qk] = qv
if len(new_q) != 0:
filtered_annot[ik] = new_q
annotations[split] = filtered_annot
for split in [
"train",
"val",
"test",
"test-dev",
]:
annot = annotations[split]
split_name = {
"train": "train2014",
"val": "val2014",
"test": "test2015",
"test-dev": "test2015",
}[split]
paths = list(glob(f"{root}/{split_name}/*.jpg"))
random.shuffle(paths)
annot_paths = [
path
for path in paths
if int(path.split("/")[-1].split("_")[-1][:-4]) in annot
]
if len(paths) == len(annot_paths):
print("all images have caption annotations")
else:
print("not all images have caption annotations")
print(
len(paths), len(annot_paths), len(annot),
)
bs = [
path2rest(path, split, annotations, label2ans) for path in tqdm(annot_paths)
]
dataframe = pd.DataFrame(
bs,
columns=[
"image",
"questions",
"answers",
"answer_labels",
"answer_scores",
"image_id",
"question_id",
"split",
],
)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(f"{dataset_root}/vqav2_{split}.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
table = pa.ipc.RecordBatchFileReader(
pa.memory_map(f"{dataset_root}/vqav2_val.arrow", "r")
).read_all()
pdtable = table.to_pandas()
df1 = pdtable[:-1000]
df2 = pdtable[-1000:]
df1 = pa.Table.from_pandas(df1)
df2 = pa.Table.from_pandas(df2)
with pa.OSFile(f"{dataset_root}/vqav2_trainable_val.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, df1.schema) as writer:
writer.write_table(df1)
with pa.OSFile(f"{dataset_root}/vqav2_rest_val.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, df2.schema) as writer:
writer.write_table(df2)
| 6,523 | 30.669903 | 88 | py |
Mr.Right | Mr.Right-main/models/METER/meter/utils/write_conceptual_caption.py | import json
import pandas as pd
import pyarrow as pa
import gc
import random
import os
from tqdm import tqdm
from glob import glob
def path2rest(path, iid2captions):
split, _, name = path.split("/")[-3:]
split = split.split("_")[-1]
iid = name
with open(path, "rb") as fp:
binary = fp.read()
captions = iid2captions[iid]
return [
binary,
captions,
iid,
split,
]
def make_arrow(root, dataset_root):
for split in ["val", "train"]:
with open(f"{root}/{split}_annot.json", "r") as fp:
captions = json.load(fp)
iid2captions = dict()
for cap in tqdm(captions):
iid = cap[0].split("/")[-1]
iid2captions[iid] = [cap[1]]
paths = list(glob(f"{root}/images_{split}/*/*"))
random.shuffle(paths)
caption_paths = [path for path in paths if path.split("/")[-1] in iid2captions]
if len(paths) == len(caption_paths):
print("all images have caption annotations")
else:
print("not all images have caption annotations")
print(
len(paths), len(caption_paths), len(iid2captions),
)
sub_len = int(len(caption_paths) // 100000)
subs = list(range(sub_len + 1))
for sub in subs:
sub_paths = caption_paths[sub * 100000 : (sub + 1) * 100000]
bs = [path2rest(path, iid2captions) for path in tqdm(sub_paths)]
dataframe = pd.DataFrame(
bs, columns=["image", "caption", "image_id", "split"],
)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(
f"{dataset_root}/conceptual_caption_{split}_{sub}.arrow", "wb"
) as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
del dataframe
del table
del bs
gc.collect()
| 2,037 | 27.305556 | 87 | py |
Mr.Right | Mr.Right-main/models/METER/meter/utils/write_nlvr2.py | import json
import pandas as pd
import pyarrow as pa
import os
from tqdm import tqdm
from collections import defaultdict
def process(root, iden, row):
texts = [r["sentence"] for r in row]
labels = [r["label"] for r in row]
split = iden.split("-")[0]
if iden.startswith("train"):
directory = row[0]["directory"]
path = f"{root}/images/train/{directory}/{iden}"
else:
path = f"{root}/{split}/{iden}"
with open(f"{path}-img0.png", "rb") as fp:
img0 = fp.read()
with open(f"{path}-img1.png", "rb") as fp:
img1 = fp.read()
return [img0, img1, texts, labels, iden]
def make_arrow(root, dataset_root):
train_data = list(
map(json.loads, open(f"{root}/nlvr2/data/train.json").readlines())
)
test1_data = list(
map(json.loads, open(f"{root}/nlvr2/data/test1.json").readlines())
)
dev_data = list(map(json.loads, open(f"{root}/nlvr2/data/dev.json").readlines()))
balanced_test1_data = list(
map(
json.loads,
open(f"{root}/nlvr2/data/balanced/balanced_test1.json").readlines(),
)
)
balanced_dev_data = list(
map(
json.loads,
open(f"{root}/nlvr2/data/balanced/balanced_dev.json").readlines(),
)
)
unbalanced_test1_data = list(
map(
json.loads,
open(f"{root}/nlvr2/data/unbalanced/unbalanced_test1.json").readlines(),
)
)
unbalanced_dev_data = list(
map(
json.loads,
open(f"{root}/nlvr2/data/unbalanced/unbalanced_dev.json").readlines(),
)
)
splits = [
"train",
"dev",
"test1",
"balanced_dev",
"balanced_test1",
"unbalanced_dev",
"unbalanced_test1",
]
datas = [
train_data,
dev_data,
test1_data,
balanced_dev_data,
balanced_test1_data,
unbalanced_dev_data,
unbalanced_test1_data,
]
annotations = dict()
for split, data in zip(splits, datas):
_annot = defaultdict(list)
for row in tqdm(data):
_annot["-".join(row["identifier"].split("-")[:-1])].append(row)
annotations[split] = _annot
for split in splits:
bs = [
process(root, iden, row) for iden, row in tqdm(annotations[split].items())
]
dataframe = pd.DataFrame(
bs, columns=["image_0", "image_1", "questions", "answers", "identifier"],
)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(f"{dataset_root}/nlvr2_{split}.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
| 2,818 | 25.101852 | 86 | py |
Mr.Right | Mr.Right-main/models/METER/meter/utils/write_sbu.py | import json
import pandas as pd
import pyarrow as pa
import gc
import random
import os
from tqdm import tqdm
from glob import glob
def path2rest(path, iid2captions):
split, _, name = path.split("/")[-3:]
split = split.split("_")[-1]
iid = name
with open(path, "rb") as fp:
binary = fp.read()
captions = iid2captions[iid]
return [
binary,
captions,
iid,
split,
]
def make_arrow(root, dataset_root):
with open(f"{root}/annot.json", "r") as fp:
captions = json.load(fp)
iid2captions = dict()
for cap in tqdm(captions):
iid = cap[0].split("/")[-1]
iid2captions[iid] = [cap[1]]
paths = list(glob(f"{root}/images_train/*/*"))
random.shuffle(paths)
caption_paths = [path for path in paths if path.split("/")[-1] in iid2captions]
if len(paths) == len(caption_paths):
print("all images have caption annotations")
else:
print("not all images have caption annotations")
print(
len(paths), len(caption_paths), len(iid2captions),
)
sub_len = int(len(caption_paths) // 100000)
subs = list(range(sub_len + 1))
for sub in subs:
sub_paths = caption_paths[sub * 100000 : (sub + 1) * 100000]
bs = [path2rest(path, iid2captions) for path in tqdm(sub_paths)]
dataframe = pd.DataFrame(bs, columns=["image", "caption", "image_id", "split"],)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(f"{dataset_root}/sbu_{sub}.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
del dataframe
del table
del bs
gc.collect()
| 1,785 | 25.656716 | 88 | py |
Mr.Right | Mr.Right-main/models/METER/meter/utils/write_f30k_karpathy.py | import json
import pandas as pd
import pyarrow as pa
import random
import os
from tqdm import tqdm
from glob import glob
from collections import defaultdict
def path2rest(path, iid2captions, iid2split):
name = path.split("/")[-1]
with open(path, "rb") as fp:
binary = fp.read()
captions = iid2captions[name]
split = iid2split[name]
return [binary, captions, name, split]
def make_arrow(root, dataset_root):
with open(f"{root}/karpathy/dataset_flickr30k.json", "r") as fp:
captions = json.load(fp)
captions = captions["images"]
iid2captions = defaultdict(list)
iid2split = dict()
for cap in tqdm(captions):
filename = cap["filename"]
iid2split[filename] = cap["split"]
for c in cap["sentences"]:
iid2captions[filename].append(c["raw"])
paths = list(glob(f"{root}/flickr30k-images/*.jpg"))
random.shuffle(paths)
caption_paths = [path for path in paths if path.split("/")[-1] in iid2captions]
if len(paths) == len(caption_paths):
print("all images have caption annotations")
else:
print("not all images have caption annotations")
print(
len(paths), len(caption_paths), len(iid2captions),
)
bs = [path2rest(path, iid2captions, iid2split) for path in tqdm(caption_paths)]
for split in ["train", "val", "test"]:
batches = [b for b in bs if b[-1] == split]
dataframe = pd.DataFrame(
batches, columns=["image", "caption", "image_id", "split"],
)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(
f"{dataset_root}/f30k_caption_karpathy_{split}.arrow", "wb"
) as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
| 1,871 | 26.529412 | 83 | py |
Mr.Right | Mr.Right-main/models/METER/meter/utils/write_snli.py | import json
import pandas as pd
import pyarrow as pa
import os
from tqdm import tqdm
from collections import defaultdict
label2id = {'contradiction': 0, 'neutral': 1, 'entailment': 2}
def process(root, imgid, ann):
with open(f"{root}/Flickr30K/images/{imgid}.jpg", "rb") as fp:
img = fp.read()
sentences = ann['sentences']
labels = ann['labels']
return [img, sentences, labels]
def make_arrow(root, dataset_root):
train_data = list(
map(json.loads, open(f"{root}/snli_ve_train.jsonl").readlines())
)
test_data = list(
map(json.loads, open(f"{root}/snli_ve_test.jsonl").readlines())
)
dev_data = list(
map(json.loads, open(f"{root}/snli_ve_dev.jsonl").readlines())
)
splits = [
"train",
"dev",
"test",
]
annotations = dict()
annotations['train'] = train_data
annotations['dev'] = dev_data
annotations['test'] = test_data
annots = dict()
for split in splits:
annots[split] = {}
for line in annotations[split]:
imgid = line['Flickr30K_ID']
if not imgid in annots[split]:
annots[split][imgid] = {}
annots[split][imgid]['sentences'] = []
annots[split][imgid]['labels'] = []
annots[split][imgid]['sentences'].append( [line['sentence1'], line['sentence2']] )
annots[split][imgid]['labels'].append( label2id[line['gold_label']] )
for split in splits:
bs = [process(root, imgid, annots[split][imgid]) for imgid in tqdm(annots[split])]
dataframe = pd.DataFrame(
bs, columns=["image", "sentences", "labels"]
)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(f"{dataset_root}/snli_{split}.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
| 2,006 | 26.493151 | 94 | py |
Mr.Right | Mr.Right-main/models/METER/meter/utils/__init__.py | 0 | 0 | 0 | py | |
Mr.Right | Mr.Right-main/models/METER/meter/utils/write_coco_karpathy.py | import json
import os
import pandas as pd
import pyarrow as pa
import random
from tqdm import tqdm
from glob import glob
from collections import defaultdict
def path2rest(path, iid2captions, iid2split):
name = path.split("/")[-1]
with open(path, "rb") as fp:
binary = fp.read()
captions = iid2captions[name]
split = iid2split[name]
return [binary, captions, name, split]
def make_arrow(root, dataset_root):
with open(f"{root}/karpathy/dataset_coco.json", "r") as fp:
captions = json.load(fp)
captions = captions["images"]
iid2captions = defaultdict(list)
iid2split = dict()
for cap in tqdm(captions):
filename = cap["filename"]
iid2split[filename] = cap["split"]
for c in cap["sentences"]:
iid2captions[filename].append(c["raw"])
paths = list(glob(f"{root}/train2014/*.jpg")) + list(glob(f"{root}/val2014/*.jpg"))
random.shuffle(paths)
caption_paths = [path for path in paths if path.split("/")[-1] in iid2captions]
if len(paths) == len(caption_paths):
print("all images have caption annotations")
else:
print("not all images have caption annotations")
print(
len(paths), len(caption_paths), len(iid2captions),
)
bs = [path2rest(path, iid2captions, iid2split) for path in tqdm(caption_paths)]
for split in ["train", "val", "restval", "test"]:
batches = [b for b in bs if b[-1] == split]
dataframe = pd.DataFrame(
batches, columns=["image", "caption", "image_id", "split"],
)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(
f"{dataset_root}/coco_caption_karpathy_{split}.arrow", "wb"
) as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
| 1,904 | 28.765625 | 87 | py |
Mr.Right | Mr.Right-main/models/METER/meter/transforms/transform.py | from .utils import (
inception_normalize,
imagenet_normalize,
MinMaxResize,
)
from PIL import Image
from torchvision import transforms
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from .randaug import RandAugment
def pixelbert_transform(size=800):
longer = int((1333 / 800) * size)
return transforms.Compose(
[
MinMaxResize(shorter=size, longer=longer),
transforms.ToTensor(),
inception_normalize,
]
)
def pixelbert_transform_randaug(size=800):
longer = int((1333 / 800) * size)
trs = transforms.Compose(
[
MinMaxResize(shorter=size, longer=longer),
transforms.ToTensor(),
inception_normalize,
]
)
trs.transforms.insert(0, RandAugment(2, 9))
return trs
def imagenet_transform(size=800):
return transforms.Compose(
[
Resize(size, interpolation=Image.BICUBIC),
CenterCrop(size),
transforms.ToTensor(),
imagenet_normalize,
]
)
def imagenet_transform_randaug(size=800):
trs = transforms.Compose(
[
Resize(size, interpolation=Image.BICUBIC),
CenterCrop(size),
transforms.ToTensor(),
imagenet_normalize,
]
)
trs.transforms.insert(0, RandAugment(2, 9))
return trs
def vit_transform(size=800):
return transforms.Compose(
[
Resize(size, interpolation=Image.BICUBIC),
CenterCrop(size),
transforms.ToTensor(),
inception_normalize,
]
)
def vit_transform_randaug(size=800):
trs = transforms.Compose(
[
Resize(size, interpolation=Image.BICUBIC),
CenterCrop(size),
transforms.ToTensor(),
inception_normalize,
]
)
trs.transforms.insert(0, RandAugment(2, 9))
return trs
def clip_transform(size):
return Compose([
Resize(size, interpolation=Image.BICUBIC),
CenterCrop(size),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def clip_transform_randaug(size):
trs = Compose([
Resize(size, interpolation=Image.BICUBIC),
CenterCrop(size),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
trs.transforms.insert(0, lambda image: image.convert('RGBA'))
trs.transforms.insert(0, RandAugment(2, 9))
trs.transforms.insert(0, lambda image: image.convert('RGB'))
return trs
| 2,733 | 26.34 | 93 | py |
Mr.Right | Mr.Right-main/models/METER/meter/transforms/utils.py | from torchvision import transforms
from PIL import Image
class MinMaxResize:
def __init__(self, shorter=800, longer=1333):
self.min = shorter
self.max = longer
def __call__(self, x):
w, h = x.size
scale = self.min / min(w, h)
if h < w:
newh, neww = self.min, scale * w
else:
newh, neww = scale * h, self.min
if max(newh, neww) > self.max:
scale = self.max / max(newh, neww)
newh = newh * scale
neww = neww * scale
newh, neww = int(newh + 0.5), int(neww + 0.5)
newh, neww = newh // 32 * 32, neww // 32 * 32
return x.resize((neww, newh), resample=Image.BICUBIC)
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
# The normalize code -> t.sub_(m).div_(s)
return tensor
# This is simple maximum entropy normalization performed in Inception paper
inception_normalize = transforms.Compose(
[transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
)
# ViT uses simple non-biased inception normalization
# https://github.com/google-research/vision_transformer/blob/master/vit_jax/input_pipeline.py#L132
inception_unnormalize = transforms.Compose(
[UnNormalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
)
# ImageNet normalize
imagenet_normalize = transforms.Compose(
[transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]
)
| 1,792 | 27.919355 | 98 | py |
Mr.Right | Mr.Right-main/models/METER/meter/transforms/randaug.py | # code in this file is adpated from rpmcruz/autoaugment
# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py
import random
import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw
import numpy as np
import torch
from PIL import Image
def ShearX(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))
def ShearY(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))
def TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random.random() > 0.5:
v = -v
v = v * img.size[0]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateXabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random.random() > 0.5:
v = -v
v = v * img.size[1]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def TranslateYabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def Rotate(img, v): # [-30, 30]
assert -30 <= v <= 30
if random.random() > 0.5:
v = -v
return img.rotate(v)
def AutoContrast(img, _):
return PIL.ImageOps.autocontrast(img)
def Invert(img, _):
return PIL.ImageOps.invert(img)
def Equalize(img, _):
return PIL.ImageOps.equalize(img)
def Flip(img, _): # not from the paper
return PIL.ImageOps.mirror(img)
def Solarize(img, v): # [0, 256]
assert 0 <= v <= 256
return PIL.ImageOps.solarize(img, v)
def SolarizeAdd(img, addition=0, threshold=128):
img_np = np.array(img).astype(np.int)
img_np = img_np + addition
img_np = np.clip(img_np, 0, 255)
img_np = img_np.astype(np.uint8)
img = Image.fromarray(img_np)
return PIL.ImageOps.solarize(img, threshold)
def Posterize(img, v): # [4, 8]
v = int(v)
v = max(1, v)
return PIL.ImageOps.posterize(img, v)
def Contrast(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Contrast(img).enhance(v)
def Color(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Color(img).enhance(v)
def Brightness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Brightness(img).enhance(v)
def Sharpness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Sharpness(img).enhance(v)
def Cutout(img, v): # [0, 60] => percentage: [0, 0.2]
assert 0.0 <= v <= 0.2
if v <= 0.0:
return img
v = v * img.size[0]
return CutoutAbs(img, v)
def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2]
# assert 0 <= v <= 20
if v < 0:
return img
w, h = img.size
x0 = np.random.uniform(w)
y0 = np.random.uniform(h)
x0 = int(max(0, x0 - v / 2.0))
y0 = int(max(0, y0 - v / 2.0))
x1 = min(w, x0 + v)
y1 = min(h, y0 + v)
xy = (x0, y0, x1, y1)
color = (125, 123, 114)
# color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
def SamplePairing(imgs): # [0, 0.4]
def f(img1, v):
i = np.random.choice(len(imgs))
img2 = PIL.Image.fromarray(imgs[i])
return PIL.Image.blend(img1, img2, v)
return f
def Identity(img, v):
return img
def augment_list(): # 16 oeprations and their ranges
# https://github.com/google-research/uda/blob/master/image/randaugment/policies.py#L57
# l = [
# (Identity, 0., 1.0),
# (ShearX, 0., 0.3), # 0
# (ShearY, 0., 0.3), # 1
# (TranslateX, 0., 0.33), # 2
# (TranslateY, 0., 0.33), # 3
# (Rotate, 0, 30), # 4
# (AutoContrast, 0, 1), # 5
# (Invert, 0, 1), # 6
# (Equalize, 0, 1), # 7
# (Solarize, 0, 110), # 8
# (Posterize, 4, 8), # 9
# # (Contrast, 0.1, 1.9), # 10
# (Color, 0.1, 1.9), # 11
# (Brightness, 0.1, 1.9), # 12
# (Sharpness, 0.1, 1.9), # 13
# # (Cutout, 0, 0.2), # 14
# # (SamplePairing(imgs), 0, 0.4), # 15
# ]
# https://github.com/tensorflow/tpu/blob/8462d083dd89489a79e3200bcc8d4063bf362186/models/official/efficientnet/autoaugment.py#L505
l = [
(AutoContrast, 0, 1),
(Equalize, 0, 1),
# (Invert, 0, 1),
(Rotate, 0, 30),
(Posterize, 0, 4),
(Solarize, 0, 256),
(SolarizeAdd, 0, 110),
(Color, 0.1, 1.9),
(Contrast, 0.1, 1.9),
(Brightness, 0.1, 1.9),
(Sharpness, 0.1, 1.9),
(ShearX, 0.0, 0.3),
(ShearY, 0.0, 0.3),
# (CutoutAbs, 0, 40),
(TranslateXabs, 0.0, 100),
(TranslateYabs, 0.0, 100),
]
return l
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = (
self.eigvec.type_as(img)
.clone()
.mul(alpha.view(1, 3).expand(3, 3))
.mul(self.eigval.view(1, 3).expand(3, 3))
.sum(1)
.squeeze()
)
return img.add(rgb.view(3, 1, 1).expand_as(img))
class CutoutDefault(object):
"""
Reference : https://github.com/quark0/darts/blob/master/cnn/utils.py
"""
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1:y2, x1:x2] = 0.0
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
class RandAugment:
def __init__(self, n, m):
self.n = n
self.m = m # [0, 30]
self.augment_list = augment_list()
def __call__(self, img):
ops = random.choices(self.augment_list, k=self.n)
for op, minval, maxval in ops:
val = (float(self.m) / 30) * float(maxval - minval) + minval
img = op(img, val)
return img
| 6,990 | 24.892593 | 134 | py |
Mr.Right | Mr.Right-main/models/METER/meter/transforms/__init__.py | from .transform import (
pixelbert_transform,
pixelbert_transform_randaug,
vit_transform,
vit_transform_randaug,
imagenet_transform,
imagenet_transform_randaug,
clip_transform,
clip_transform_randaug,
)
_transforms = {
"pixelbert": pixelbert_transform,
"pixelbert_randaug": pixelbert_transform_randaug,
"vit": vit_transform,
"vit_randaug": vit_transform_randaug,
"imagenet": imagenet_transform,
"imagenet_randaug": imagenet_transform_randaug,
"clip": clip_transform,
"clip_randaug": clip_transform_randaug,
}
def keys_to_transforms(keys: list, size=224):
return [_transforms[key](size=size) for key in keys]
| 678 | 26.16 | 56 | py |
Mr.Right | Mr.Right-main/models/ALBEF/models/xbert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import Tensor, device, dtype, nn
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from transformers.models.bert.configuration_bert import BertConfig
import transformers
transformers.logging.set_verbosity_error()
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
"bert-base-chinese",
"bert-base-german-cased",
"bert-large-uncased-whole-word-masking",
"bert-large-cased-whole-word-masking",
"bert-large-uncased-whole-word-masking-finetuned-squad",
"bert-large-cased-whole-word-masking-finetuned-squad",
"bert-base-cased-finetuned-mrpc",
"bert-base-german-dbmdz-cased",
"bert-base-german-dbmdz-uncased",
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
"TurkuNLP/bert-base-finnish-cased-v1",
"TurkuNLP/bert-base-finnish-uncased-v1",
"wietsedv/bert-base-dutch-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.config = config
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config, is_cross_attention):
super().__init__()
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
if is_cross_attention:
self.key = nn.Linear(config.encoder_width, self.all_head_size)
self.value = nn.Linear(config.encoder_width, self.all_head_size)
else:
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.save_attention = False
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
if self.save_attention:
self.save_attention_map(attention_probs)
attention_probs.register_hook(self.save_attn_gradients)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs_dropped = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs_dropped = attention_probs_dropped * head_mask
context_layer = torch.matmul(attention_probs_dropped, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
outputs = outputs + (past_key_value,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config, is_cross_attention=False):
super().__init__()
self.self = BertSelfAttention(config, is_cross_attention)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config, layer_num):
super().__init__()
self.config = config
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.has_cross_attention = (layer_num >= config.fusion_layer)
if self.has_cross_attention:
self.layer_num = layer_num
self.crossattention = BertAttention(config, is_cross_attention=True)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
if self.has_cross_attention:
assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers"
if type(encoder_hidden_states) == list:
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states[(self.layer_num-self.config.fusion_layer)%len(encoder_hidden_states)],
encoder_attention_mask[(self.layer_num-self.config.fusion_layer)%len(encoder_hidden_states)],
output_attentions=output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1]
else:
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions=output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
mode='multi_modal',
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
if mode=='text':
start_layer = 0
output_layer = self.config.fusion_layer
elif mode=='fusion':
start_layer = self.config.fusion_layer
output_layer = self.config.num_hidden_layers
elif mode=='multi_modal':
start_layer = 0
output_layer = self.config.num_hidden_layers
for i in range(start_layer, output_layer):
layer_module = self.layer[i]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@dataclass
class BertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.BertForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
BERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones(
(batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=False,
mode='multi_modal',
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = inputs_embeds.device
elif encoder_embeds is not None:
input_shape = encoder_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = encoder_embeds.device
else:
raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape,
device, is_decoder)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_hidden_states is not None:
if type(encoder_hidden_states) == list:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
else:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if type(encoder_attention_mask) == list:
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
elif encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
if encoder_embeds is None:
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
else:
embedding_output = encoder_embeds
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
mode=mode,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
sentence prediction (classification)` head.
""",
BERT_START_DOCSTRING,
)
class BertForPreTraining(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForPreTraining.from_pretrained('bert-base-uncased')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return BertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""Bert Model with a `language modeling` head on top for CLM fine-tuning. """, BERT_START_DOCSTRING
)
class BertLMHeadModel(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=True,
reduction='mean',
mode='multi_modal',
soft_labels=None,
alpha=0,
return_logits=False,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
is_decoder=is_decoder,
mode=mode,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
if return_logits:
return prediction_scores[:, :-1, :].contiguous()
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss(reduction=reduction)
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
lm_loss = lm_loss.view(prediction_scores.size(0),-1).sum(1)
if soft_labels is not None:
loss_distill = -torch.sum(F.log_softmax(shifted_prediction_scores, dim=-1)*soft_labels,dim=-1)
loss_distill = (loss_distill * (labels!=-100)).sum(1)
lm_loss = (1-alpha)*lm_loss + alpha*loss_distill
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"past_key_values": past,
"encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
"encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
"is_decoder": True,
}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=False,
mode='multi_modal',
soft_labels=None,
alpha=0,
return_logits=False,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_embeds=encoder_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
is_decoder=is_decoder,
mode=mode,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
if return_logits:
return prediction_scores
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if soft_labels is not None:
loss_distill = -torch.sum(F.log_softmax(prediction_scores, dim=-1)*soft_labels,dim=-1)
loss_distill = loss_distill[labels!=-100].mean()
masked_lm_loss = (1-alpha)*masked_lm_loss + alpha*loss_distill
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top. """,
BERT_START_DOCSTRING,
)
class BertForNextSentencePrediction(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see ``input_ids`` docstring). Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForNextSentencePrediction
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors='pt')
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_scores,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
BERT_START_DOCSTRING,
)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BERT_START_DOCSTRING,
)
class BertForMultipleChoice(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BERT_START_DOCSTRING,
)
class BertForTokenClassification(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BERT_START_DOCSTRING,
)
class BertForQuestionAnswering(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="bert-base-uncased",
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| 82,187 | 41.873239 | 213 | py |
Mr.Right | Mr.Right-main/models/ALBEF/models/vit.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from timm.models.vision_transformer import _cfg, PatchEmbed
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_, DropPath
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_gradients = None
self.attention_map = None
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def forward(self, x, register_hook=False):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
if register_hook:
self.save_attention_map(attn)
attn.register_hook(self.save_attn_gradients)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, register_hook=False):
x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
https://arxiv.org/abs/2010.11929
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
norm_layer: (nn.Module): normalization layer
"""
super().__init__()
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def forward(self, x, register_blk=-1):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed[:,:x.size(1),:]
x = self.pos_drop(x)
for i,blk in enumerate(self.blocks):
x = blk(x, register_blk==i)
x = self.norm(x)
return x
def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder):
# interpolate position embedding
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = visual_encoder.patch_embed.num_patches
num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
if orig_size!=new_size:
# class_token and dist_token are kept unchanged
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2))
return new_pos_embed
else:
return pos_embed_checkpoint
| 8,558 | 41.162562 | 118 | py |
Mr.Right | Mr.Right-main/models/ALBEF/models/model_retrieval.py | from functools import partial
from models.ALBEF.models.vit import VisionTransformer
from models.ALBEF.models.xbert import BertConfig, BertModel
import torch
from torch import nn
import torch.nn.functional as F
class ALBEF(nn.Module):
def __init__(self,
text_encoder = None,
tokenizer = None,
config = None,
):
super().__init__()
self.tokenizer = tokenizer
embed_dim = config['embed_dim']
vision_width = config['vision_width']
self.visual_encoder = VisionTransformer(
img_size=config['image_res'], patch_size=16, embed_dim=768, depth=12, num_heads=12,
mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6))
self.bert_config = BertConfig.from_json_file(config['bert_config'])
self.text_encoder = BertModel.from_pretrained(text_encoder, config=self.bert_config, add_pooling_layer=False)
text_width = self.text_encoder.config.hidden_size
self.vision_proj = nn.Linear(vision_width, embed_dim)
self.text_proj = nn.Linear(text_width, embed_dim)
def forward(self, doc_image, doc_text, query):
output = dict()
query_output = self.text_encoder(query["input_ids"],
attention_mask=query["attention_mask"],
return_dict=True,
mode='text')
query_embeds = query_output.last_hidden_state
query_feat = F.normalize(self.text_proj(query_embeds[:,0,:]),dim=-1)
image_embeds = self.visual_encoder(doc_image)
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(doc_image.device)
image_feat = F.normalize(self.vision_proj(image_embeds[:,0,:]),dim=-1)
text_output = self.text_encoder(doc_text["input_ids"], attention_mask = doc_text["attention_mask"],
return_dict = True, mode = 'text')
text_embeds = text_output.last_hidden_state
text_feat = F.normalize(self.text_proj(text_embeds[:,0,:]),dim=-1)
multi_output = self.text_encoder(encoder_embeds = text_embeds,
attention_mask = doc_text["attention_mask"],
encoder_hidden_states = image_embeds,
encoder_attention_mask = image_atts,
return_dict = True,
mode = 'fusion',
)
multi_embeds = multi_output.last_hidden_state
multi_atts = torch.ones(multi_embeds.size()[:-1],dtype=torch.long).to(doc_image.device)
multi_feat = F.normalize(self.text_proj(multi_embeds[:,0,:]),dim=-1)
output['query_embeds'] = query_embeds
output['query_cls'] = query_feat
output['query_atts'] = query["attention_mask"]
output['doctext_embeds'] = text_embeds
output['doctext_cls'] = text_feat
output['img_embeds'] = image_embeds
output['img_cls'] = image_feat
output['multi_embeds'] = multi_embeds
output['multi_cls'] = multi_feat
output['multi_atts'] = doc_text["attention_mask"]
return output
| 3,499 | 45.666667 | 129 | py |
Mr.Right | Mr.Right-main/models/ViLT/vilt/modules/vilt_utils.py | import torch
import random
from transformers.optimization import AdamW
from transformers import (
get_polynomial_decay_schedule_with_warmup,
get_cosine_schedule_with_warmup,
)
from models.ViLT.vilt.modules.dist_utils import all_gather
from models.ViLT.vilt.modules.objectives import compute_irtr_recall
from models.ViLT.vilt.gadgets.my_metrics import Accuracy, VQAScore, Scalar
def set_metrics(pl_module):
for split in ["train", "val"]:
for k, v in pl_module.hparams.config["loss_names"].items():
if v < 1:
continue
if k == "vqa":
setattr(pl_module, f"{split}_vqa_score", VQAScore())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
elif k == "nlvr2":
if split == "train":
setattr(pl_module, f"train_{k}_accuracy", Accuracy())
setattr(pl_module, f"train_{k}_loss", Scalar())
else:
setattr(pl_module, f"dev_{k}_accuracy", Accuracy())
setattr(pl_module, f"dev_{k}_loss", Scalar())
setattr(pl_module, f"test_{k}_accuracy", Accuracy())
setattr(pl_module, f"test_{k}_loss", Scalar())
elif k == "irtr":
setattr(pl_module, f"{split}_irtr_loss", Scalar())
elif k == "mppd" or k == "mpfr":
setattr(pl_module, f"{split}_{k}_loss", Scalar())
elif k == "itm":
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
setattr(pl_module, f"{split}_{k}_wpa_loss", Scalar())
else:
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
def epoch_wrapup(pl_module):
phase = "train" if pl_module.training else "val"
the_metric = 0
if pl_module.hparams.config["get_recall_metric"] and not pl_module.training:
(ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10) = compute_irtr_recall(pl_module)
print((ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10), pl_module.global_step)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r1", ir_r1, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r5", ir_r5, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r10", ir_r10, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r1", tr_r1, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r5", tr_r5, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r10", tr_r10, pl_module.global_step
)
the_metric += ir_r1.item() + tr_r1.item()
for loss_name, v in pl_module.hparams.config["loss_names"].items():
if v < 1:
continue
value = 0
if loss_name == "vqa":
value = getattr(pl_module, f"{phase}_{loss_name}_score").compute()
pl_module.log(f"{loss_name}/{phase}/score_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_score").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "nlvr2":
if phase == "train":
value = getattr(pl_module, f"train_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/train/accuracy_epoch", value)
getattr(pl_module, f"train_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/train/loss_epoch",
getattr(pl_module, f"train_{loss_name}_loss").compute(),
)
getattr(pl_module, f"train_{loss_name}_loss").reset()
else:
value = getattr(pl_module, f"dev_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/dev/accuracy_epoch", value)
getattr(pl_module, f"dev_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/dev/loss_epoch",
getattr(pl_module, f"dev_{loss_name}_loss").compute(),
)
getattr(pl_module, f"dev_{loss_name}_loss").reset()
value = getattr(pl_module, f"test_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/test/accuracy_epoch", value)
getattr(pl_module, f"test_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/test/loss_epoch",
getattr(pl_module, f"test_{loss_name}_loss").compute(),
)
getattr(pl_module, f"test_{loss_name}_loss").reset()
elif loss_name == "irtr":
pl_module.log(
f"{loss_name}/{phase}/irtr_loss_epoch",
getattr(pl_module, f"{phase}_irtr_loss").compute(),
)
getattr(pl_module, f"{phase}_irtr_loss").reset()
elif loss_name == "mppd" or loss_name == "mpfr":
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "itm":
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
pl_module.log(
f"{loss_name}/{phase}/wpa_loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_wpa_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_wpa_loss").reset()
else:
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
the_metric += value
pl_module.log(f"{phase}/the_metric", the_metric)
def check_non_acc_grad(pl_module):
if pl_module.token_type_embeddings.weight.grad is None:
return True
else:
grad = pl_module.token_type_embeddings.weight.grad
return (grad.sum() == 0).item()
def set_task(pl_module):
pl_module.current_tasks = [
k for k, v in pl_module.hparams.config["loss_names"].items() if v >= 1
]
return
def set_schedule(pl_module):
lr = pl_module.hparams.config["learning_rate"]
wd = pl_module.hparams.config["weight_decay"]
no_decay = [
"bias",
"LayerNorm.bias",
"LayerNorm.weight",
"norm.bias",
"norm.weight",
"norm1.bias",
"norm1.weight",
"norm2.bias",
"norm2.weight",
]
head_names = ["vqa_classifier", "nlvr2_classifier"]
lr_mult = pl_module.hparams.config["lr_mult"]
end_lr = pl_module.hparams.config["end_lr"]
decay_power = pl_module.hparams.config["decay_power"]
optim_type = pl_module.hparams.config["optim_type"]
names = [n for n, p in pl_module.named_parameters()]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in pl_module.named_parameters()
if not any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
],
"weight_decay": wd,
"lr": lr,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
],
"weight_decay": 0.0,
"lr": lr,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if not any(nd in n for nd in no_decay)
and any(bb in n for bb in head_names)
],
"weight_decay": wd,
"lr": lr * lr_mult,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if any(nd in n for nd in no_decay) and any(bb in n for bb in head_names)
],
"weight_decay": 0.0,
"lr": lr * lr_mult,
},
]
if optim_type == "adamw":
optimizer = AdamW(
optimizer_grouped_parameters, lr=lr, eps=1e-8, betas=(0.9, 0.98)
)
elif optim_type == "adam":
optimizer = torch.optim.Adam(optimizer_grouped_parameters, lr=lr)
elif optim_type == "sgd":
optimizer = torch.optim.SGD(optimizer_grouped_parameters, lr=lr, momentum=0.9)
if pl_module.trainer.max_steps is None:
max_steps = (
len(pl_module.trainer.datamodule.train_dataloader())
* pl_module.trainer.max_epochs
// pl_module.trainer.accumulate_grad_batches
)
else:
max_steps = pl_module.trainer.max_steps
warmup_steps = pl_module.hparams.config["warmup_steps"]
if isinstance(pl_module.hparams.config["warmup_steps"], float):
warmup_steps = int(max_steps * warmup_steps)
if decay_power == "cosine":
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=max_steps,
)
else:
scheduler = get_polynomial_decay_schedule_with_warmup(
optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=max_steps,
lr_end=end_lr,
power=decay_power,
)
sched = {"scheduler": scheduler, "interval": "step"}
return (
[optimizer],
[sched],
)
| 10,650 | 37.451264 | 88 | py |
Mr.Right | Mr.Right-main/models/ViLT/vilt/modules/dist_utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import functools
import logging
import numpy as np
import pickle
import torch
import torch.distributed as dist
import torch
_LOCAL_PROCESS_GROUP = None
"""
A torch process group which only includes processes that on the same machine as the current process.
This variable is set when processes are spawned by `launch()` in "engine/launch.py".
"""
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_local_rank() -> int:
"""
Returns:
The rank of the current process within the local (per-machine) process group.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
assert _LOCAL_PROCESS_GROUP is not None
return dist.get_rank(group=_LOCAL_PROCESS_GROUP)
def get_local_size() -> int:
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
def is_main_process() -> bool:
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
@functools.lru_cache()
def _get_global_gloo_group():
"""
Return a process group based on gloo backend, containing all the ranks
The result is cached.
"""
if dist.get_backend() == "nccl":
return dist.new_group(backend="gloo")
else:
return dist.group.WORLD
def _serialize_to_tensor(data, group):
backend = dist.get_backend(group)
assert backend in ["gloo", "nccl"]
device = torch.device("cpu" if backend == "gloo" else "cuda")
buffer = pickle.dumps(data)
if len(buffer) > 1024 ** 3:
logger = logging.getLogger(__name__)
logger.warning(
"Rank {} trying to all-gather {:.2f} GB of data on device {}".format(
get_rank(), len(buffer) / (1024 ** 3), device
)
)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor
def _pad_to_largest_tensor(tensor, group):
"""
Returns:
list[int]: size of the tensor, on each rank
Tensor: padded tensor that has the max size
"""
world_size = dist.get_world_size(group=group)
assert (
world_size >= 1
), "comm.gather/all_gather must be called from ranks within the given group!"
local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device)
size_list = [
torch.zeros([1], dtype=torch.int64, device=tensor.device)
for _ in range(world_size)
]
dist.all_gather(size_list, local_size, group=group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
if local_size != max_size:
padding = torch.zeros(
(max_size - local_size,), dtype=torch.uint8, device=tensor.device
)
tensor = torch.cat((tensor, padding), dim=0)
return size_list, tensor
def all_gather(data, group=None):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: list of data gathered from each rank
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group) == 1:
return [data]
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
max_size = max(size_list)
# receiving Tensor from all ranks
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device)
for _ in size_list
]
dist.all_gather(tensor_list, tensor, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def gather(data, dst=0, group=None):
"""
Run gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
dst (int): destination rank
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: on dst, a list of data gathered from each rank. Otherwise,
an empty list.
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group=group) == 1:
return [data]
rank = dist.get_rank(group=group)
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
# receiving Tensor from all ranks
if rank == dst:
max_size = max(size_list)
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device)
for _ in size_list
]
dist.gather(tensor, tensor_list, dst=dst, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
else:
dist.gather(tensor, [], dst=dst, group=group)
return []
def shared_random_seed():
"""
Returns:
int: a random number that is the same across all workers.
If workers need a shared RNG, they can use this shared seed to
create one.
All workers must call this function, otherwise it will deadlock.
"""
ints = np.random.randint(2 ** 31)
all_ints = all_gather(ints)
return all_ints[0]
def reduce_dict(input_dict, average=True):
"""
Reduce the values in the dictionary from all processes so that process with rank
0 has the reduced results.
Args:
input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor.
average (bool): whether to do average or sum
Returns:
a dict with the same keys as input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
| 7,814 | 27.837638 | 100 | py |
Mr.Right | Mr.Right-main/models/ViLT/vilt/modules/objectives.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
from torch.utils.data.distributed import DistributedSampler
from einops import rearrange
from models.ViLT.vilt.modules.dist_utils import all_gather
def cost_matrix_cosine(x, y, eps=1e-5):
"""Compute cosine distnace across every pairs of x, y (batched)
[B, L_x, D] [B, L_y, D] -> [B, Lx, Ly]"""
assert x.dim() == y.dim()
assert x.size(0) == y.size(0)
assert x.size(2) == y.size(2)
x_norm = F.normalize(x, p=2, dim=-1, eps=eps)
y_norm = F.normalize(y, p=2, dim=-1, eps=eps)
cosine_sim = x_norm.matmul(y_norm.transpose(1, 2))
cosine_dist = 1 - cosine_sim
return cosine_dist
def trace(x):
""" compute trace of input tensor (batched) """
b, m, n = x.size()
assert m == n
mask = torch.eye(n, dtype=torch.bool, device=x.device).unsqueeze(0).expand_as(x)
trace = x.masked_select(mask).contiguous().view(b, n).sum(dim=-1, keepdim=False)
return trace
@torch.no_grad()
def ipot(C, x_len, x_pad, y_len, y_pad, joint_pad, beta, iteration, k):
""" [B, M, N], [B], [B, M], [B], [B, N], [B, M, N]"""
b, m, n = C.size()
sigma = torch.ones(b, m, dtype=C.dtype, device=C.device) / x_len.unsqueeze(1)
T = torch.ones(b, n, m, dtype=C.dtype, device=C.device)
A = torch.exp(-C.transpose(1, 2) / beta)
# mask padded positions
sigma.masked_fill_(x_pad, 0)
joint_pad = joint_pad.transpose(1, 2)
T.masked_fill_(joint_pad, 0)
A.masked_fill_(joint_pad, 0)
# broadcastable lengths
x_len = x_len.unsqueeze(1).unsqueeze(2)
y_len = y_len.unsqueeze(1).unsqueeze(2)
# mask to zero out padding in delta and sigma
x_mask = (x_pad.to(C.dtype) * 1e4).unsqueeze(1)
y_mask = (y_pad.to(C.dtype) * 1e4).unsqueeze(1)
for _ in range(iteration):
Q = A * T # bs * n * m
sigma = sigma.view(b, m, 1)
for _ in range(k):
delta = 1 / (y_len * Q.matmul(sigma).view(b, 1, n) + y_mask)
sigma = 1 / (x_len * delta.matmul(Q) + x_mask)
T = delta.view(b, n, 1) * Q * sigma
T.masked_fill_(joint_pad, 0)
return T
def optimal_transport_dist(
txt_emb, img_emb, txt_pad, img_pad, beta=0.5, iteration=50, k=1
):
""" [B, M, D], [B, N, D], [B, M], [B, N]"""
cost = cost_matrix_cosine(txt_emb, img_emb)
# mask the padded inputs
joint_pad = txt_pad.unsqueeze(-1) | img_pad.unsqueeze(-2)
cost.masked_fill_(joint_pad, 0)
txt_len = (txt_pad.size(1) - txt_pad.sum(dim=1, keepdim=False)).to(dtype=cost.dtype)
img_len = (img_pad.size(1) - img_pad.sum(dim=1, keepdim=False)).to(dtype=cost.dtype)
T = ipot(
cost.detach(), txt_len, txt_pad, img_len, img_pad, joint_pad, beta, iteration, k
)
distance = trace(cost.matmul(T.detach()))
return distance
def compute_mlm(pl_module, batch):
infer = pl_module.infer(batch, mask_text=True, mask_image=False)
mlm_logits = pl_module.mlm_score(infer["text_feats"])
mlm_labels = infer["text_labels"]
mlm_loss = F.cross_entropy(
mlm_logits.view(-1, pl_module.hparams.config["vocab_size"]),
mlm_labels.view(-1),
ignore_index=-100,
)
ret = {
"mlm_loss": mlm_loss,
"mlm_logits": mlm_logits,
"mlm_labels": mlm_labels,
"mlm_ids": infer["text_ids"],
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_mlm_loss")(ret["mlm_loss"])
acc = getattr(pl_module, f"{phase}_mlm_accuracy")(
ret["mlm_logits"], ret["mlm_labels"]
)
pl_module.log(f"mlm/{phase}/loss", loss)
pl_module.log(f"mlm/{phase}/accuracy", acc)
return ret
def compute_mpp(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_image=True)
mpp_logits = pl_module.mpp_score(infer["image_feats"])
mpp_logits = torch.stack(
[
mpp_logits[:, :, 0:256],
mpp_logits[:, :, 256:512],
mpp_logits[:, :, 512:768],
],
dim=2,
)
mpp_labels = infer["image_labels"]
mpp_loss = F.cross_entropy(
mpp_logits.view(-1, 256),
mpp_labels.view(-1),
ignore_index=-100,
)
ret = {
"mpp_loss": mpp_loss,
"mpp_logits": mpp_logits,
"mpp_labels": mpp_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_mpp_loss")(ret["mpp_loss"])
acc = getattr(pl_module, f"{phase}_mpp_accuracy")(
ret["mpp_logits"], ret["mpp_labels"]
)
pl_module.log(f"mpp/{phase}/loss", loss)
pl_module.log(f"mpp/{phase}/accuracy", acc)
return ret
def compute_mppd(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_image=True)
mppd_logits = pl_module.mppd_score(infer["image_feats"])
mppd_labels = infer["image_labels_mppd"]
filter_to_train = infer["image_labels"].float().mean(dim=-1) != -100
labels = mppd_labels[filter_to_train]
logits = mppd_logits[filter_to_train]
mppd_loss = F.mse_loss(logits, labels)
ret = {
"mppd_loss": mppd_loss,
"mppd_logits": mppd_logits,
"mppd_labels": mppd_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_mppd_loss")(ret["mppd_loss"])
pl_module.log(f"mppd/{phase}/loss", loss)
return ret
def compute_mpfr(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_image=True)
mpfr_logits = pl_module.mpfr_score(infer["image_feats"])
mpfr_labels = infer["image_labels_mpfr"]
filter_to_train = infer["image_labels"].float().mean(dim=-1) != -100
labels = mpfr_labels[filter_to_train]
logits = mpfr_logits[filter_to_train]
mpfr_loss = F.mse_loss(logits, labels)
ret = {
"mpfr_loss": mpfr_loss,
"mpfr_logits": mpfr_logits,
"mpfr_labels": mpfr_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_mpfr_loss")(ret["mpfr_loss"])
pl_module.log(f"mpfr/{phase}/loss", loss)
return ret
def compute_itm_wpa(pl_module, batch):
pos_len = len(batch["text"]) // 2
neg_len = len(batch["text"]) - pos_len
itm_labels = torch.cat([torch.ones(pos_len), torch.zeros(neg_len)]).to(
pl_module.device
)
itm_labels = itm_labels[torch.randperm(itm_labels.size(0))]
itm_images = [
torch.stack(
[
ti if itm_labels[i] == 1 else fi
for i, (ti, fi) in enumerate(zip(bti, bfi))
]
)
for bti, bfi in zip(batch["image"], batch["false_image_0"])
]
batch = {k: v for k, v in batch.items()}
batch["image"] = itm_images
infer = pl_module.infer(batch, mask_text=False, mask_image=False)
with torch.cuda.amp.autocast(enabled=False):
txt_emb, img_emb = infer["text_feats"], infer["image_feats"]
txt_mask, img_mask = infer["text_masks"].bool(), infer["image_masks"].bool()
for i, _len in enumerate(txt_mask.sum(dim=1)):
txt_mask[i, _len - 1] = False
txt_mask[:, 0] = False
img_mask[:, 0] = False
if "deit" in pl_module.hparams.config["vit"]:
img_mask[:, 1] = False
txt_pad, img_pad = ~txt_mask, ~img_mask
cost = cost_matrix_cosine(txt_emb.float(), img_emb.float())
joint_pad = txt_pad.unsqueeze(-1) | img_pad.unsqueeze(-2)
cost.masked_fill_(joint_pad, 0)
txt_len = (txt_pad.size(1) - txt_pad.sum(dim=1, keepdim=False)).to(
dtype=cost.dtype
)
img_len = (img_pad.size(1) - img_pad.sum(dim=1, keepdim=False)).to(
dtype=cost.dtype
)
T = ipot(
cost.detach(), txt_len, txt_pad, img_len, img_pad, joint_pad, 0.5, 50, 1
)
distance = trace(cost.matmul(T.detach()))
dist_pos = distance.masked_select(itm_labels == 1)
dist_neg = distance.masked_select(itm_labels == 0)
ot_loss = (dist_pos.sum() - dist_neg.sum()) / (dist_pos.size(0) + dist_neg.size(0))
itm_logits = pl_module.itm_score(infer["cls_feats"])
itm_loss = F.cross_entropy(itm_logits, itm_labels.long())
ret = {
"itm_loss": itm_loss,
"itm_wpa_loss": 0.1 * ot_loss,
"itm_logits": itm_logits,
"itm_labels": itm_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_itm_loss")(ret["itm_loss"])
wpa_loss = getattr(pl_module, f"{phase}_itm_wpa_loss")(ret["itm_wpa_loss"])
acc = getattr(pl_module, f"{phase}_itm_accuracy")(
ret["itm_logits"], ret["itm_labels"]
)
pl_module.log(f"itm/{phase}/loss", loss)
pl_module.log(f"itm/{phase}/wpa_loss", wpa_loss)
pl_module.log(f"itm/{phase}/accuracy", acc)
return ret
def compute_imgcls(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_image=False)
imgcls_logits = pl_module.img_classifier(infer["cls_feats"])
imgcls_labels = batch["label"]
imgcls_labels = torch.tensor(imgcls_labels).to(pl_module.device).long()
imgcls_loss = F.cross_entropy(imgcls_logits, imgcls_labels)
ret = {
"imgcls_loss": imgcls_loss,
"imgcls_logits": imgcls_logits,
"imgcls_labels": imgcls_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_imgcls_loss")(ret["imgcls_loss"])
acc = getattr(pl_module, f"{phase}_imgcls_accuracy")(
ret["imgcls_logits"], ret["imgcls_labels"]
)
pl_module.log(f"imgcls/{phase}/loss", loss)
pl_module.log(f"imgcls/{phase}/accuracy", acc)
return ret
def compute_vqa(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_image=False)
vqa_logits = pl_module.vqa_classifier(infer["cls_feats"])
vqa_targets = torch.zeros(
len(vqa_logits), pl_module.hparams.config["vqav2_label_size"]
).to(pl_module.device)
vqa_labels = batch["vqa_labels"]
vqa_scores = batch["vqa_scores"]
for i, (_label, _score) in enumerate(zip(vqa_labels, vqa_scores)):
for l, s in zip(_label, _score):
vqa_targets[i, l] = s
vqa_loss = (
F.binary_cross_entropy_with_logits(vqa_logits, vqa_targets)
* vqa_targets.shape[1]
) # https://github.com/jnhwkim/ban-vqa/blob/master/train.py#L19
ret = {
"vqa_loss": vqa_loss,
"vqa_logits": vqa_logits,
"vqa_targets": vqa_targets,
"vqa_labels": vqa_labels,
"vqa_scores": vqa_scores,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_vqa_loss")(ret["vqa_loss"])
score = getattr(pl_module, f"{phase}_vqa_score")(
ret["vqa_logits"], ret["vqa_targets"]
)
pl_module.log(f"vqa/{phase}/loss", loss)
pl_module.log(f"vqa/{phase}/score", score)
return ret
def compute_nlvr2(pl_module, batch):
infer1 = pl_module.infer(
batch, mask_text=False, mask_image=False, image_token_type_idx=1
)
infer2 = pl_module.infer(
batch, mask_text=False, mask_image=False, image_token_type_idx=2
)
cls_feats = torch.cat([infer1["cls_feats"], infer2["cls_feats"]], dim=-1)
nlvr2_logits = pl_module.nlvr2_classifier(cls_feats)
nlvr2_labels = batch["answers"]
nlvr2_labels = torch.tensor(nlvr2_labels).to(pl_module.device).long()
nlvr2_loss = F.cross_entropy(nlvr2_logits, nlvr2_labels)
ret = {
"nlvr2_loss": nlvr2_loss,
"nlvr2_logits": nlvr2_logits,
"nlvr2_labels": nlvr2_labels,
}
phase = "train" if pl_module.training else "val"
if phase == "train":
loss = getattr(pl_module, f"{phase}_nlvr2_loss")(ret["nlvr2_loss"])
acc = getattr(pl_module, f"{phase}_nlvr2_accuracy")(
ret["nlvr2_logits"], ret["nlvr2_labels"]
)
pl_module.log(f"nlvr2/{phase}/loss", loss)
pl_module.log(f"nlvr2/{phase}/accuracy", acc)
else:
dev_batches = [i for i, n in enumerate(batch["table_name"]) if "dev" in n]
test_batches = [i for i, n in enumerate(batch["table_name"]) if "test" in n]
if dev_batches:
dev_loss = getattr(pl_module, f"dev_nlvr2_loss")(
F.cross_entropy(
ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches]
)
)
dev_acc = getattr(pl_module, f"dev_nlvr2_accuracy")(
ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches]
)
pl_module.log(f"nlvr2/dev/loss", dev_loss)
pl_module.log(f"nlvr2/dev/accuracy", dev_acc)
if test_batches:
test_loss = getattr(pl_module, f"test_nlvr2_loss")(
F.cross_entropy(
ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches]
)
)
test_acc = getattr(pl_module, f"test_nlvr2_accuracy")(
ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches]
)
pl_module.log(f"nlvr2/test/loss", test_loss)
pl_module.log(f"nlvr2/test/accuracy", test_acc)
return ret
def compute_irtr(pl_module, batch):
is_training_phase = pl_module.training
_bs, _c, _h, _w = batch["image"][0].shape
false_len = pl_module.hparams.config["draw_false_text"]
text_ids = torch.stack(
[batch[f"false_text_{i}_ids"] for i in range(false_len)], dim=1
)
text_masks = torch.stack(
[batch[f"false_text_{i}_masks"] for i in range(false_len)], dim=1
)
text_labels = torch.stack(
[batch[f"false_text_{i}_labels"] for i in range(false_len)], dim=1
)
text_ids = torch.cat([batch["text_ids"].unsqueeze(1), text_ids], dim=1)
text_masks = torch.cat([batch["text_masks"].unsqueeze(1), text_masks], dim=1)
text_labels = torch.cat([batch["text_labels"].unsqueeze(1), text_labels], dim=1)
images = batch["image"][0].unsqueeze(1).expand(_bs, false_len + 1, _c, _h, _w)
infer = pl_module.infer(
{
"image": [rearrange(images, "bs fs c h w -> (bs fs) c h w")],
"text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"),
"text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"),
"text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl"),
}
)
score = pl_module.rank_output(infer["cls_feats"])[:, 0]
score = rearrange(score, "(bs fs) -> bs fs", bs=_bs, fs=false_len + 1)
answer = torch.zeros(_bs).to(score).long()
irtr_loss = F.cross_entropy(score, answer)
ret = {
"irtr_loss": irtr_loss,
}
phase = "train" if pl_module.training else "val"
irtr_loss = getattr(pl_module, f"{phase}_irtr_loss")(ret["irtr_loss"])
pl_module.log(f"irtr/{phase}/irtr_loss", irtr_loss)
return ret
@torch.no_grad()
def compute_irtr_recall(pl_module):
text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset()
text_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
text_loader = torch.utils.data.DataLoader(
text_dset,
batch_size=64,
num_workers=pl_module.hparams.config["num_workers"],
pin_memory=True,
collate_fn=functools.partial(
text_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset(
image_only=True
)
image_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
dist_sampler = DistributedSampler(image_dset, shuffle=False)
image_loader = torch.utils.data.DataLoader(
image_dset,
batch_size=1,
num_workers=pl_module.hparams.config["num_workers"],
sampler=dist_sampler,
pin_memory=True,
collate_fn=functools.partial(
image_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
text_preload = list()
for _b in tqdm.tqdm(text_loader, desc="text prefetch loop"):
text_preload.append(
{
"text_ids": _b["text_ids"].to(pl_module.device),
"text_masks": _b["text_masks"].to(pl_module.device),
"text_labels": _b["text_labels"].to(pl_module.device),
"img_index": _b["img_index"],
}
)
tiids = list()
for pre in text_preload:
tiids += pre["img_index"]
tiids = torch.tensor(tiids)
image_preload = list()
for _b in tqdm.tqdm(image_loader, desc="image prefetch loop"):
(ie, im, _, _) = pl_module.transformer.visual_embed(
_b["image"][0].to(pl_module.device),
max_image_len=pl_module.hparams.config["max_image_len"],
mask_it=False,
)
image_preload.append((ie, im, _b["img_index"][0]))
rank_scores = list()
rank_iids = list()
for img_batch in tqdm.tqdm(image_preload, desc="rank loop"):
_ie, _im, _iid = img_batch
_, l, c = _ie.shape
img_batch_score = list()
for txt_batch in text_preload:
fblen = len(txt_batch["text_ids"])
ie = _ie.expand(fblen, l, c)
im = _im.expand(fblen, l)
with torch.cuda.amp.autocast():
score = pl_module.rank_output(
pl_module.infer(
{
"text_ids": txt_batch["text_ids"],
"text_masks": txt_batch["text_masks"],
"text_labels": txt_batch["text_labels"],
},
image_embeds=ie,
image_masks=im,
)["cls_feats"]
)[:, 0]
img_batch_score.append(score)
img_batch_score = torch.cat(img_batch_score)
rank_scores.append(img_batch_score.cpu().tolist())
rank_iids.append(_iid)
torch.distributed.barrier()
gather_rank_scores = all_gather(rank_scores)
gather_rank_iids = all_gather(rank_iids)
iids = torch.tensor(gather_rank_iids)
iids = iids.view(-1)
scores = torch.tensor(gather_rank_scores)
scores = scores.view(len(iids), -1)
topk10 = scores.topk(10, dim=1)
topk5 = scores.topk(5, dim=1)
topk1 = scores.topk(1, dim=1)
topk10_iids = tiids[topk10.indices]
topk5_iids = tiids[topk5.indices]
topk1_iids = tiids[topk1.indices]
tr_r10 = (iids.unsqueeze(1) == topk10_iids).float().max(dim=1)[0].mean()
tr_r5 = (iids.unsqueeze(1) == topk5_iids).float().max(dim=1)[0].mean()
tr_r1 = (iids.unsqueeze(1) == topk1_iids).float().max(dim=1)[0].mean()
topk10 = scores.topk(10, dim=0)
topk5 = scores.topk(5, dim=0)
topk1 = scores.topk(1, dim=0)
topk10_iids = iids[topk10.indices]
topk5_iids = iids[topk5.indices]
topk1_iids = iids[topk1.indices]
ir_r10 = (tiids.unsqueeze(0) == topk10_iids).float().max(dim=0)[0].mean()
ir_r5 = (tiids.unsqueeze(0) == topk5_iids).float().max(dim=0)[0].mean()
ir_r1 = (tiids.unsqueeze(0) == topk1_iids).float().max(dim=0)[0].mean()
return (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10)
def init_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def vqa_test_step(pl_module, batch, output):
id2answer = (
pl_module.trainer.datamodule.dm_dicts["vqa_trainval"].id2answer
if "vqa_trainval" in pl_module.trainer.datamodule.dm_dicts
else pl_module.trainer.datamodule.dm_dicts["vqa"].id2answer
)
vqa_logits = output["vqa_logits"]
vqa_preds = vqa_logits.argmax(dim=-1)
vqa_preds = [id2answer[pred.item()] for pred in vqa_preds]
questions = batch["text"]
qids = batch["qid"]
return {"qids": qids, "preds": vqa_preds}
def arc_test_step(pl_module, batch, output):
return output
def vqa_test_wrapup(outs, model_name):
rank = torch.distributed.get_rank()
qids, preds = list(), list()
for out in outs:
qids += out["qids"]
preds += out["preds"]
rets = list()
for qid, pred in zip(qids, preds):
rets.append({"question_id": qid, "answer": pred})
with open(f"vqa_submit_{rank}.json", "w") as fp:
json.dump(rets, fp, indent=4)
torch.distributed.barrier()
if rank == 0:
jsons = list()
paths = list(glob.glob("vqa_submit_*.json"))
for path in paths:
with open(path, "r") as fp:
jsons += json.load(fp)
os.makedirs("result", exist_ok=True)
with open(f"result/vqa_submit_{model_name}.json", "w") as fp:
json.dump(jsons, fp, indent=4)
torch.distributed.barrier()
os.remove(f"vqa_submit_{rank}.json")
def arc_test_wrapup(outs, caplen, model_name):
rank = torch.distributed.get_rank()
iids, captions = list(), list()
for out in outs:
iids += out["iid"]
captions += out["captions"]
rets = list()
for iid, caption in zip(iids, captions):
rets.append({"image_id": iid, "caption": caption})
with open(f"coco_cap_len{caplen}_{rank}.json", "w") as fp:
json.dump(rets, fp, indent=4)
torch.distributed.barrier()
if rank == 0:
jsons = list()
paths = list(glob.glob(f"coco_cap_len{caplen}_*.json"))
for path in paths:
with open(path, "r") as fp:
jsons += json.load(fp)
os.makedirs("result/arc", exist_ok=True)
jsons = sorted(jsons, key=lambda x: x["image_id"])
with open(f"result/arc/coco_cap_{model_name}_len{caplen}.json", "w") as fp:
json.dump(jsons, fp, indent=4)
torch.distributed.barrier()
os.remove(f"coco_cap_len{caplen}_{rank}.json")
| 22,098 | 32.842266 | 88 | py |
Mr.Right | Mr.Right-main/models/ViLT/vilt/modules/vilt_module.py | import torch
torch.autograd.set_detect_anomaly(True)
import torch.nn as nn
import pytorch_lightning as pl
import models.ViLT.vilt.modules.vision_transformer as vit
import pdb
from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings
from models.ViLT.vilt.modules import heads, objectives
# from models.ViLT.vilt.modules import vilt_utils
class ViLTransformerSS(pl.LightningModule):
def __init__(self, config):
super().__init__()
self.save_hyperparameters()
self.bert_config = BertConfig(
vocab_size=config["vocab_size"],
hidden_size=config["hidden_size"],
num_hidden_layers=config["num_layers"],
num_attention_heads=config["num_heads"],
intermediate_size=config["hidden_size"] * config["mlp_ratio"],
max_position_embeddings=config["max_text_len"],
hidden_dropout_prob=config["drop_rate"],
attention_probs_dropout_prob=config["drop_rate"],
)
self.text_embeddings = BertEmbeddings(self.bert_config)
self.text_embeddings.apply(objectives.init_weights)
self.token_type_embeddings = nn.Embedding(2, config["hidden_size"])
self.token_type_embeddings.apply(objectives.init_weights)
if self.hparams.config["checkpoint"] == "":
self.transformer = getattr(vit, self.hparams.config["vit"])(
pretrained=True, config=self.hparams.config
)
else:
self.transformer = getattr(vit, self.hparams.config["vit"])(
pretrained=False, config=self.hparams.config
)
self.pooler = heads.Pooler(config["hidden_size"])
self.pooler.apply(objectives.init_weights)
if config["loss_names"]["mlm"] > 0:
self.mlm_score = heads.MLMHead(self.bert_config)
self.mlm_score.apply(objectives.init_weights)
if config["loss_names"]["itm"] > 0:
self.itm_score = heads.ITMHead(config["hidden_size"])
self.itm_score.apply(objectives.init_weights)
if config["loss_names"]["mpp"] > 0:
self.mpp_score = heads.MPPHead(self.bert_config)
self.mpp_score.apply(objectives.init_weights)
# ===================== Downstream ===================== #
hs = self.hparams.config["hidden_size"]
if self.hparams.config["loss_names"]["vqa"] > 0:
vs = self.hparams.config["vqav2_label_size"]
self.vqa_classifier = nn.Sequential(
nn.Linear(hs, hs * 2),
nn.LayerNorm(hs * 2),
nn.GELU(),
nn.Linear(hs * 2, vs),
)
self.vqa_classifier.apply(objectives.init_weights)
if self.hparams.config["loss_names"]["nlvr2"] > 0:
self.nlvr2_classifier = nn.Sequential(
nn.Linear(hs * 2, hs * 2),
nn.LayerNorm(hs * 2),
nn.GELU(),
nn.Linear(hs * 2, 2),
)
self.nlvr2_classifier.apply(objectives.init_weights)
emb_data = self.token_type_embeddings.weight.data
self.token_type_embeddings = nn.Embedding(3, hs)
self.token_type_embeddings.apply(objectives.init_weights)
self.token_type_embeddings.weight.data[0, :] = emb_data[0, :]
self.token_type_embeddings.weight.data[1, :] = emb_data[1, :]
self.token_type_embeddings.weight.data[2, :] = emb_data[1, :]
if self.hparams.config["loss_names"]["irtr"] > 0:
self.rank_output = nn.Linear(hs, 1)
self.rank_output.weight.data = self.itm_score.fc.weight.data[1:, :]
self.rank_output.bias.data = self.itm_score.fc.bias.data[1:]
self.margin = 0.2
for p in self.itm_score.parameters():
p.requires_grad = False
# vilt_utils.set_metrics(self)
self.current_tasks = list()
# ===================== load downstream (test_only) ======================
if self.hparams.config["checkpoint"] != "":
ckpt = torch.load(self.hparams.config["checkpoint"], map_location="cpu")
state_dict = ckpt['state_dict']
# interpolate position embedding
org_embeds = state_dict['text_embeddings.position_embeddings.weight']
org_tokens_len = org_embeds.shape[0]
new_tokens_len = self.text_embeddings.position_embeddings.weight.shape[0]
if org_tokens_len > new_tokens_len:
state_dict['text_embeddings.position_embeddings.weight'] = org_embeds[:new_tokens_len,:]
else:
extra_tokens = new_tokens_len - org_tokens_len
extra_tokens_embeds = nn.Embedding(extra_tokens, self.hparams.config["hidden_size"])
extra_tokens_embeds_weight = extra_tokens_embeds.weight
state_dict['text_embeddings.position_embeddings.weight'] = torch.cat([org_embeds,extra_tokens_embeds_weight],dim=0)
state_dict['text_embeddings.position_ids'] = self.text_embeddings.position_ids
msg = self.load_state_dict(state_dict, strict=False)
print(msg)
def infer(
self,
batch,
mask_text=False,
mask_image=False,
image_token_type_idx=1,
image_embeds=None,
image_masks=None,
):
imgkey = "image"
do_mlm = "_mlm" if mask_text else ""
text_ids = batch[f"text_ids{do_mlm}"]
# text_labels = batch[f"text_labels{do_mlm}"]
text_masks = batch[f"text_masks"]
text_embeds = self.text_embeddings(text_ids)
query_ids = batch[f"query_ids"]
query_embeds = self.text_embeddings(query_ids)
query_masks = batch[f"query_masks"]
check = torch.isnan(query_embeds)
# print(-1,torch.isnan(check).any())
# print((check == True).nonzero(as_tuple=True))
for i, blk in enumerate(self.transformer.blocks):
query_embeds, _ = blk(query_embeds, mask=query_masks)
query_embeds = self.transformer.norm(query_embeds)
if image_embeds is None and image_masks is None:
img = batch[imgkey]
(
image_embeds,
image_masks,
patch_index,
image_labels,
) = self.transformer.visual_embed(
img,
max_image_len=self.hparams.config["max_image_len"],
mask_it=mask_image,
)
else:
patch_index, image_labels = (
None,
None,
)
text_embeds, image_embeds = (
text_embeds + self.token_type_embeddings(torch.zeros_like(text_masks)),
image_embeds + self.token_type_embeddings(torch.full_like(image_masks, image_token_type_idx)),
)
# co_embeds = text_embeds
# co_masks = text_masks
co_embeds = torch.cat([text_embeds, image_embeds], dim=1)
co_masks = torch.cat([text_masks, image_masks], dim=1)
x = co_embeds
for i, blk in enumerate(self.transformer.blocks):
x, _attn = blk(x, mask=co_masks)
x = self.transformer.norm(x)
text_feats, image_feats = (
x[:, : text_embeds.shape[1]],
x[:, text_embeds.shape[1] :],
)
# text_feats, image_feats = x, None
cls_feats = self.pooler(x)
ret = {
"query_embeds": query_embeds,
"query_atts": query_masks,
"query_cls": query_embeds[:,0,:],
"text_feats": text_feats,
"image_feats": image_feats,
"multi_cls": cls_feats,
"raw_cls_feats": x[:, 0],
"image_labels": image_labels,
"image_atts": image_masks,
"multi_embeds": x,
"multi_atts": co_masks,
# "text_labels": text_labels,
# "text_ids": text_ids,
"text_masks": text_masks,
"patch_index": patch_index,
}
return ret
@torch.no_grad()
def output_multi(self,batch,image_token_type_idx=1):
imgkey = "image"
text_ids = batch["text_ids"]
text_masks = batch["text_masks"]
text_embeds = self.text_embeddings(text_ids)
img = batch[imgkey]
(
image_embeds,
image_masks,
patch_index,
image_labels,
) = self.transformer.visual_embed(
img,
max_image_len=self.hparams.config["max_image_len"],
mask_it=False,
)
text_embeds, image_embeds = (
text_embeds + self.token_type_embeddings(torch.zeros_like(text_masks)),
image_embeds
+ self.token_type_embeddings(
torch.full_like(image_masks, image_token_type_idx)
),
)
# co_embeds = text_embeds
# co_masks = text_masks
co_embeds = torch.cat([text_embeds, image_embeds], dim=1)
co_masks = torch.cat([text_masks, image_masks], dim=1)
x = co_embeds
for i, blk in enumerate(self.transformer.blocks):
x, _attn = blk(x, mask=co_masks)
x = self.transformer.norm(x)
text_feats, image_feats = (
x[:, : text_embeds.shape[1]],
x[:, text_embeds.shape[1] :],
)
# text_feats, image_feats = x, None
cls_feats = self.pooler(x)
ret = {
"text_feats": text_feats,
"image_feats": image_feats,
"multi_cls": cls_feats,
"raw_cls_feats": x[:, 0],
"image_labels": image_labels,
"image_atts": image_masks,
"multi_embeds": x,
"multi_atts": co_masks,
# "text_labels": text_labels,
# "text_ids": text_ids,
"text_masks": text_masks,
"patch_index": patch_index,
}
return ret
def forward(self, batch):
ret = dict()
if len(self.current_tasks) == 0:
ret.update(self.infer(batch))
return ret
# Masked Language Modeling
if "mlm" in self.current_tasks:
ret.update(objectives.compute_mlm(self, batch))
# Masked Patch Prediction
if "mpp" in self.current_tasks:
ret.update(objectives.compute_mpp(self, batch))
# Image Text Matching
if "itm" in self.current_tasks:
ret.update(objectives.compute_itm_wpa(self, batch))
# Visual Question Answering
if "vqa" in self.current_tasks:
ret.update(objectives.compute_vqa(self, batch))
# Natural Language for Visual Reasoning 2
if "nlvr2" in self.current_tasks:
ret.update(objectives.compute_nlvr2(self, batch))
# Image Retrieval and Text Retrieval
if "irtr" in self.current_tasks:
ret.update(objectives.compute_irtr(self, batch))
return ret
def training_step(self, batch, batch_idx):
# vilt_utils.set_task(self)
output = self(batch)
total_loss = sum([v for k, v in output.items() if "loss" in k])
return total_loss
def training_epoch_end(self, outs):
pass
# vilt_utils.epoch_wrapup(self)
def validation_step(self, batch, batch_idx):
# vilt_utils.set_task(self)
output = self(batch)
def validation_epoch_end(self, outs):
pass
# vilt_utils.epoch_wrapup(self)
def test_step(self, batch, batch_idx):
# vilt_utils.set_task(self)
output = self(batch)
ret = dict()
if self.hparams.config["loss_names"]["vqa"] > 0:
ret.update(objectives.vqa_test_step(self, batch, output))
return ret
def test_epoch_end(self, outs):
model_name = self.hparams.config["checkpoint"].split("/")[-1][:-5]
if self.hparams.config["loss_names"]["vqa"] > 0:
objectives.vqa_test_wrapup(outs, model_name)
# vilt_utils.epoch_wrapup(self)
def configure_optimizers(self):
pass
# return vilt_utils.set_schedule(self)
| 10,172 | 28.148997 | 119 | py |
Mr.Right | Mr.Right-main/models/ViLT/vilt/modules/vision_transformer.py | """ Vision Transformer (ViT) in PyTorch
A PyTorch implement of Vision Transformers as described in
'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929
The official jax code is released and available at https://github.com/google-research/vision_transformer
Acknowledgments:
* The paper authors for releasing code and weights, thanks!
* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out
for some einops/einsum fun
* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT
* Bert reference code checks against Huggingface Transformers and Tensorflow Bert
DeiT model defs and weights from https://github.com/facebookresearch/deit,
paper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
_logger = logging.getLogger(__name__)
def download_clip(
url: str = "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
root: str = os.path.expanduser("~/.cache/clip"),
):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if (
hashlib.sha256(open(download_target, "rb").read()).hexdigest()
== expected_sha256
):
return download_target
else:
warnings.warn(
f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file"
)
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if (
hashlib.sha256(open(download_target, "rb").read()).hexdigest()
!= expected_sha256
):
raise RuntimeError(
f"Model has been downloaded but the SHA256 checksum does not not match"
)
return download_target
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
inception_unnormalize = transforms.Compose(
[UnNormalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
)
def _cfg(url="", **kwargs):
return {
"url": url,
"num_classes": 1000,
"input_size": (3, 224, 224),
"pool_size": None,
"crop_pct": 0.9,
"interpolation": "bicubic",
"mean": IMAGENET_DEFAULT_MEAN,
"std": IMAGENET_DEFAULT_STD,
"first_conv": "patch_embed.proj",
"classifier": "head",
**kwargs,
}
default_cfgs = {
# patch models (my experiments)
"vit_small_patch16_224": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/vit_small_p16_224-15ec54c9.pth",
),
# patch models (weights ported from official Google JAX impl)
"vit_base_patch16_224": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth",
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_base_patch32_224": _cfg(
url="", # no official model weights for this combo, only for in21k
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_base_patch16_384": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_384-83fb41ba.pth",
input_size=(3, 384, 384),
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=1.0,
),
"vit_base_patch32_384": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p32_384-830016f5.pth",
input_size=(3, 384, 384),
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=1.0,
),
"vit_large_patch16_224": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_224-4ee7a4dc.pth",
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_large_patch32_224": _cfg(
url="", # no official model weights for this combo, only for in21k
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_large_patch16_384": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_384-b3be5167.pth",
input_size=(3, 384, 384),
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=1.0,
),
"vit_large_patch32_384": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth",
input_size=(3, 384, 384),
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=1.0,
),
# patch models, imagenet21k (weights ported from official Google JAX impl)
"vit_base_patch16_224_in21k": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch16_224_in21k-e5005f0a.pth",
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_base_patch32_224_in21k": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch32_224_in21k-8db57226.pth",
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_large_patch16_224_in21k": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch16_224_in21k-606da67d.pth",
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_large_patch32_224_in21k": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth",
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_huge_patch14_224_in21k": _cfg(
url="", # FIXME I have weights for this but > 2GB limit for github release binaries
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
# hybrid models (weights ported from official Google JAX impl)
"vit_base_resnet50_224_in21k": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_224_in21k-6f7c7740.pth",
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=0.9,
first_conv="patch_embed.backbone.stem.conv",
),
"vit_base_resnet50_384": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_384-9fd3c705.pth",
input_size=(3, 384, 384),
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=1.0,
first_conv="patch_embed.backbone.stem.conv",
),
# hybrid models (my experiments)
"vit_small_resnet26d_224": _cfg(),
"vit_small_resnet50d_s3_224": _cfg(),
"vit_base_resnet26d_224": _cfg(),
"vit_base_resnet50d_224": _cfg(),
# deit models (FB weights)
"vit_deit_tiny_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth"
),
"vit_deit_small_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth"
),
"vit_deit_base_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
),
"vit_deit_base_patch16_384": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth",
input_size=(3, 384, 384),
crop_pct=1.0,
),
"vit_deit_tiny_distilled_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth"
),
"vit_deit_small_distilled_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth"
),
"vit_deit_base_distilled_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth",
),
"vit_deit_base_distilled_patch16_384": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth",
input_size=(3, 384, 384),
crop_pct=1.0,
),
}
class Mlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, mask=None):
B, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
q, k, v = (
qkv[0],
qkv[1],
qkv[2],
) # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
if mask is not None:
mask = mask.bool()
attn = attn.masked_fill(~mask[:, None, None, :], float("-inf"))
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
def forward(self, x, mask=None):
_x, attn = self.attn(self.norm1(x), mask=mask)
x = x + self.drop_path(_x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x, attn
class PatchEmbed(nn.Module):
""" Image to Patch Embedding"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
no_patch_embed_bias=False,
):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans,
embed_dim,
kernel_size=patch_size,
stride=patch_size,
bias=False if no_patch_embed_bias else True,
)
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
x = self.proj(x)
return x
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
https://arxiv.org/abs/2010.11929
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
representation_size=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
norm_layer=None,
add_norm_before_transformer=False,
no_patch_embed_bias=False,
config=None,
):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
hybrid_backbone (nn.Module): CNN backbone to use in-place of PatchEmbed module
norm_layer: (nn.Module): normalization layer
"""
super().__init__()
drop_rate = drop_rate if config is None else config["drop_rate"]
self.num_classes = num_classes
self.num_features = (
self.embed_dim
) = embed_dim # num_features for consistency with other models
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.add_norm_before_transformer = add_norm_before_transformer
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
self.patch_size = patch_size
self.patch_dim = img_size // patch_size
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
if add_norm_before_transformer:
self.pre_norm = norm_layer(embed_dim)
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, depth)
] # stochastic depth decay rule
self.blocks = nn.ModuleList(
[
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
)
for i in range(depth)
]
)
self.norm = norm_layer(embed_dim)
trunc_normal_(self.pos_embed, std=0.02)
trunc_normal_(self.cls_token, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def mask_tokens(self, orig_image, feats):
"""
Prepare masked tokens inputs/labels for masked patch prediction: 80% MASK, 10% random, 10% original.
"""
img_unnorm = orig_image * 0.5 + 0.5
_, _, ph, pw = self.patch_embed.proj.weight.shape
with torch.no_grad():
img_unnorm_patch = F.conv2d(
img_unnorm,
weight=torch.ones(3, 1, ph, pw).to(img_unnorm) / (ph * pw),
bias=None,
stride=(ph, pw),
padding=0,
groups=3,
)
labels = (
((img_unnorm_patch * 255).long().flatten(start_dim=2, end_dim=3))
.permute(0, 2, 1)
.contiguous()
)
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
probability_matrix = torch.full(labels.shape[:-1], 0.15)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = (
torch.bernoulli(torch.full(labels.shape[:-1], 0.8)).bool() & masked_indices
)
feats[indices_replaced] = self.mask_token.to(feats)
return feats, labels
def visual_embed(self, _x, max_image_len=200, mask_it=False):
_, _, ph, pw = self.patch_embed.proj.weight.shape
x = self.patch_embed(_x)
x_mask = (_x.sum(dim=1) != 0).float()[:, None, :, :]
x_mask = F.interpolate(x_mask, size=(x.shape[2], x.shape[3])).long()
x_h = x_mask[:, 0].sum(dim=1)[:, 0]
x_w = x_mask[:, 0].sum(dim=2)[:, 0]
B, C, H, W = x.shape
spatial_pos = (
self.pos_embed[:, 1:, :]
.transpose(1, 2)
.view(1, C, self.patch_dim, self.patch_dim)
)
pos_embed = torch.cat(
[
F.pad(
F.interpolate(
spatial_pos, size=(h, w), mode="bilinear", align_corners=True,
),
(0, W - w, 0, H - h),
)
for h, w in zip(x_h, x_w)
],
dim=0,
)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
x = x.flatten(2).transpose(1, 2)
patch_index = (
torch.stack(
torch.meshgrid(
torch.arange(x_mask.shape[-2]), torch.arange(x_mask.shape[-1])
),
dim=-1,
)[None, None, :, :, :]
.expand(x_mask.shape[0], x_mask.shape[1], -1, -1, -1)
.flatten(1, 3)
)
x_mask = x_mask.flatten(1)
if mask_it:
x, label = self.mask_tokens(_x, x)
if (
max_image_len < 0
or max_image_len is None
or not isinstance(max_image_len, int)
):
# suppose aug is 800 x 1333, then, maximum effective res is 800 x 1333 (if one side gets bigger, the other will be constrained and be shrinked)
# (800 // self.patch_size) * (1333 // self.patch_size) is the maximum number of patches that single image can get.
# if self.patch_size = 32, 25 * 41 = 1025
# if res is 384 x 640, 12 * 20 = 240
eff = x_h * x_w
max_image_len = eff.max()
else:
eff = x_h * x_w
max_image_len = min(eff.max(), max_image_len)
valid_idx = x_mask.nonzero(as_tuple=False)
non_valid_idx = (1 - x_mask).nonzero(as_tuple=False)
unique_rows = valid_idx[:, 0].unique()
valid_row_idx = [valid_idx[valid_idx[:, 0] == u] for u in unique_rows]
non_valid_row_idx = [
non_valid_idx[non_valid_idx[:, 0] == u] for u in unique_rows
]
valid_nums = [v.size(0) for v in valid_row_idx]
non_valid_nums = [v.size(0) for v in non_valid_row_idx]
pad_nums = [max_image_len - v for v in valid_nums]
select = list()
for i, (v, nv, p) in enumerate(zip(valid_nums, non_valid_nums, pad_nums)):
if p <= 0:
valid_choice = torch.multinomial(torch.ones(v).float(), max_image_len)
select.append(valid_row_idx[i][valid_choice])
else:
pad_choice = torch.multinomial(
torch.ones(nv).float(), p, replacement=True
)
select.append(
torch.cat(
[valid_row_idx[i], non_valid_row_idx[i][pad_choice]], dim=0,
)
)
select = torch.cat(select, dim=0)
x = x[select[:, 0], select[:, 1]].view(B, -1, C)
x_mask = x_mask[select[:, 0], select[:, 1]].view(B, -1)
patch_index = patch_index[select[:, 0], select[:, 1]].view(B, -1, 2)
pos_embed = pos_embed[select[:, 0], select[:, 1]].view(B, -1, C)
if mask_it:
label = label[select[:, 0], select[:, 1]].view(B, -1, 3)
label[x_mask == 0] = -100
label = torch.cat(
[torch.full((label.shape[0], 1, 3), -100).to(label), label,], dim=1,
)
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
pos_embed = torch.cat(
(self.pos_embed[:, 0, :][:, None, :].expand(B, -1, -1), pos_embed), dim=1
)
x = x + pos_embed
x = self.pos_drop(x)
if self.add_norm_before_transformer:
x = self.pre_norm(x)
x_mask = torch.cat([torch.ones(x_mask.shape[0], 1).to(x_mask), x_mask], dim=1)
if mask_it:
return x, x_mask, (patch_index, (H, W)), label
else:
return x, x_mask, (patch_index, (H, W)), None
def forward_features(self, _x, max_image_len=144, mask_it=False):
x, x_mask, patch_index, label = self.visual_embed(
_x, max_image_len=max_image_len, mask_it=mask_it
)
for blk in self.blocks:
x, _ = blk(x, mask=x_mask)
x = self.norm(x)
return x, x_mask, label
def forward(self, x, max_image_len=-1):
x, _, _ = self.forward_features(x, max_image_len=max_image_len)
x = x[:, 0]
x = self.head(x)
return x
class DistilledVisionTransformer(VisionTransformer):
""" Vision Transformer with distillation token.
Paper: `Training data-efficient image transformers & distillation through attention` -
https://arxiv.org/abs/2012.12877
This impl of distilled ViT is taken from https://github.com/facebookresearch/deit
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
num_patches = self.patch_embed.num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 2, self.embed_dim))
trunc_normal_(self.dist_token, std=0.02)
trunc_normal_(self.pos_embed, std=0.02)
def visual_embed(self, _x, max_image_len=200, mask_it=False):
_, _, ph, pw = self.patch_embed.proj.weight.shape
x = self.patch_embed(_x)
x_mask = (_x.sum(dim=1) != 0).float()[:, None, :, :]
x_mask = F.interpolate(x_mask, size=(x.shape[2], x.shape[3])).long()
x_h = x_mask[:, 0].sum(dim=1)[:, 0]
x_w = x_mask[:, 0].sum(dim=2)[:, 0]
B, C, H, W = x.shape
spatial_pos = (
self.pos_embed[:, 2:, :]
.transpose(1, 2)
.view(1, C, self.patch_dim, self.patch_dim)
)
pos_embed = torch.cat(
[
F.pad(
F.interpolate(
spatial_pos, size=(h, w), mode="bilinear", align_corners=True,
),
(0, W - w, 0, H - h),
)
for h, w in zip(x_h, x_w)
],
dim=0,
)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
x = x.flatten(2).transpose(1, 2)
patch_index = (
torch.stack(
torch.meshgrid(
torch.arange(x_mask.shape[-2]), torch.arange(x_mask.shape[-1])
),
dim=-1,
)[None, None, :, :, :]
.expand(x_mask.shape[0], x_mask.shape[1], -1, -1, -1)
.flatten(1, 3)
)
x_mask = x_mask.flatten(1)
if mask_it:
x, label = self.mask_tokens(_x, x)
if (
max_image_len < 0
or max_image_len is None
or not isinstance(max_image_len, int)
):
# suppose aug is 800 x 1333, then, maximum effective res is 800 x 1333 (if one side gets bigger, the other will be constrained and be shrinked)
# (800 // self.patch_size) * (1333 // self.patch_size) is the maximum number of patches that single image can get.
# if self.patch_size = 32, 25 * 41 = 1025
# if res is 384 x 640, 12 * 20 = 240
eff = x_h * x_w
max_image_len = eff.max()
else:
eff = x_h * x_w
max_image_len = min(eff.max(), max_image_len)
valid_idx = x_mask.nonzero(as_tuple=False)
non_valid_idx = (1 - x_mask).nonzero(as_tuple=False)
unique_rows = valid_idx[:, 0].unique()
valid_row_idx = [valid_idx[valid_idx[:, 0] == u] for u in unique_rows]
non_valid_row_idx = [
non_valid_idx[non_valid_idx[:, 0] == u] for u in unique_rows
]
valid_nums = [v.size(0) for v in valid_row_idx]
non_valid_nums = [v.size(0) for v in non_valid_row_idx]
pad_nums = [max_image_len - v for v in valid_nums]
select = list()
for i, (v, nv, p) in enumerate(zip(valid_nums, non_valid_nums, pad_nums)):
if p <= 0:
valid_choice = torch.multinomial(torch.ones(v).float(), max_image_len)
select.append(valid_row_idx[i][valid_choice])
else:
pad_choice = torch.multinomial(
torch.ones(nv).float(), p, replacement=True
)
select.append(
torch.cat(
[valid_row_idx[i], non_valid_row_idx[i][pad_choice]], dim=0,
)
)
select = torch.cat(select, dim=0)
x = x[select[:, 0], select[:, 1]].view(B, -1, C)
x_mask = x_mask[select[:, 0], select[:, 1]].view(B, -1)
patch_index = patch_index[select[:, 0], select[:, 1]].view(B, -1, 2)
pos_embed = pos_embed[select[:, 0], select[:, 1]].view(B, -1, C)
if mask_it:
label = label[select[:, 0], select[:, 1]].view(B, -1, 3)
label[x_mask == 0] = -100
label = torch.cat(
[torch.full((label.shape[0], 1, 3), -100).to(label), label,], dim=1,
)
cls_tokens = self.cls_token.expand(B, -1, -1)
dist_token = self.dist_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, dist_token, x), dim=1)
pos_embed = torch.cat(
(self.pos_embed[:, :2, :].expand(B, -1, -1), pos_embed), dim=1
)
x = x + pos_embed
x = self.pos_drop(x)
if self.add_norm_before_transformer:
x = self.pre_norm(x)
x_mask = torch.cat([torch.ones(x_mask.shape[0], 2).to(x_mask), x_mask], dim=1)
if mask_it:
return x, x_mask, (patch_index, (H, W)), label
else:
return x, x_mask, (patch_index, (H, W)), None
def forward_features(self, _x, max_image_len=144, mask_it=False):
x, x_mask, patch_index, label = self.visual_embed(
_x, max_image_len=max_image_len, mask_it=mask_it
)
for blk in self.blocks:
x, _ = blk(x, mask=x_mask)
x = self.norm(x)
return x, x_mask, label
def forward(self, x, max_image_len=-1):
x, _, _ = self.forward_features(x, max_image_len=max_image_len)
x = x[:, 0]
x = self.head(x)
return x
def resize_pos_embed(posemb, posemb_new):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
_logger.info("Resized position embedding: %s to %s", posemb.shape, posemb_new.shape)
ntok_new = posemb_new.shape[1]
if True:
posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]
ntok_new -= 1
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
gs_new = int(math.sqrt(ntok_new))
_logger.info("Position embedding grid-size from %s to %s", gs_old, gs_new)
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=(gs_new, gs_new), mode="bilinear")
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new * gs_new, -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def checkpoint_filter_fn(state_dict, model):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
if "model" in state_dict:
# For deit models
state_dict = state_dict["model"]
for k, v in state_dict.items():
if "patch_embed.proj.weight" in k and len(v.shape) < 4:
# For old models that I trained prior to conv based patchification
O, I, H, W = model.patch_embed.proj.weight.shape
v = v.reshape(O, -1, H, W)
elif k == "pos_embed" and v.shape != model.pos_embed.shape:
# To resize pos embedding when using model at different size from pretrained weights
v = resize_pos_embed(v, model.pos_embed)
out_dict[k] = v
return out_dict
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
@register_model
def vit_small_patch16_224(pretrained=False, **kwargs):
""" My custom 'small' ViT model. Depth=8, heads=8= mlp_ratio=3."""
model_kwargs = dict(
patch_size=16,
embed_dim=768,
depth=8,
num_heads=8,
mlp_ratio=3.0,
qkv_bias=False,
norm_layer=nn.LayerNorm,
**kwargs,
)
if pretrained:
# NOTE my scale was wrong for original weights, leaving this here until I have better ones for this model
model_kwargs.setdefault("qk_scale", 768 ** -0.5)
model = _create_vision_transformer(
"vit_small_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch16_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_base_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch32_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_base_patch32_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch16_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_base_patch16_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch32_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_base_patch32_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch16_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer(
"vit_large_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch32_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer(
"vit_large_patch32_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch16_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer(
"vit_large_patch16_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch32_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer(
"vit_large_patch32_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
representation_size=768,
**kwargs,
)
model = _create_vision_transformer(
"vit_base_patch16_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=32,
embed_dim=768,
depth=12,
num_heads=12,
representation_size=768,
**kwargs,
)
model = _create_vision_transformer(
"vit_base_patch32_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
representation_size=1024,
**kwargs,
)
model = _create_vision_transformer(
"vit_large_patch16_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=32,
embed_dim=1024,
depth=24,
num_heads=16,
representation_size=1024,
**kwargs,
)
model = _create_vision_transformer(
"vit_large_patch32_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_huge_patch14_224_in21k(pretrained=False, **kwargs):
""" ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: converted weights not currently available, too large for github release hosting.
"""
model_kwargs = dict(
patch_size=14,
embed_dim=1280,
depth=32,
num_heads=16,
representation_size=1280,
**kwargs,
)
model = _create_vision_transformer(
"vit_huge_patch14_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_resnet50_224_in21k(pretrained=False, **kwargs):
""" R50+ViT-B/16 hybrid model from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
# create a ResNetV2 w/o pre-activation, that uses StdConv and GroupNorm and has 3 stages, no head
backbone = ResNetV2(
layers=(3, 4, 9),
num_classes=0,
global_pool="",
in_chans=kwargs.get("in_chans", 3),
preact=False,
stem_type="same",
conv_layer=StdConv2dSame,
)
model_kwargs = dict(
embed_dim=768,
depth=12,
num_heads=12,
hybrid_backbone=backbone,
representation_size=768,
**kwargs,
)
model = _create_vision_transformer(
"vit_base_resnet50_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_resnet50_384(pretrained=False, **kwargs):
""" R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
# create a ResNetV2 w/o pre-activation, that uses StdConv and GroupNorm and has 3 stages, no head
backbone = ResNetV2(
layers=(3, 4, 9),
num_classes=0,
global_pool="",
in_chans=kwargs.get("in_chans", 3),
preact=False,
stem_type="same",
conv_layer=StdConv2dSame,
)
model_kwargs = dict(
embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, **kwargs
)
model = _create_vision_transformer(
"vit_base_resnet50_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_small_resnet26d_224(pretrained=False, **kwargs):
""" Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights.
"""
backbone = resnet26d(
pretrained=pretrained,
in_chans=kwargs.get("in_chans", 3),
features_only=True,
out_indices=[4],
)
model_kwargs = dict(
embed_dim=768,
depth=8,
num_heads=8,
mlp_ratio=3,
hybrid_backbone=backbone,
**kwargs,
)
model = _create_vision_transformer(
"vit_small_resnet26d_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_small_resnet50d_s3_224(pretrained=False, **kwargs):
""" Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights.
"""
backbone = resnet50d(
pretrained=pretrained,
in_chans=kwargs.get("in_chans", 3),
features_only=True,
out_indices=[3],
)
model_kwargs = dict(
embed_dim=768,
depth=8,
num_heads=8,
mlp_ratio=3,
hybrid_backbone=backbone,
**kwargs,
)
model = _create_vision_transformer(
"vit_small_resnet50d_s3_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_resnet26d_224(pretrained=False, **kwargs):
""" Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights.
"""
backbone = resnet26d(
pretrained=pretrained,
in_chans=kwargs.get("in_chans", 3),
features_only=True,
out_indices=[4],
)
model_kwargs = dict(
embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, **kwargs
)
model = _create_vision_transformer(
"vit_base_resnet26d_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_resnet50d_224(pretrained=False, **kwargs):
""" Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights.
"""
backbone = resnet50d(
pretrained=pretrained,
in_chans=kwargs.get("in_chans", 3),
features_only=True,
out_indices=[4],
)
model_kwargs = dict(
embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, **kwargs
)
model = _create_vision_transformer(
"vit_base_resnet50d_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_deit_tiny_patch16_224(pretrained=False, **kwargs):
""" DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer(
"vit_deit_tiny_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_deit_small_patch16_224(pretrained=False, **kwargs):
""" DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer(
"vit_deit_small_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_deit_base_patch16_224(pretrained=False, **kwargs):
""" DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_deit_base_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_deit_base_patch16_384(pretrained=False, **kwargs):
""" DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_deit_base_patch16_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_deit_tiny_distilled_patch16_224(pretrained=False, **kwargs):
""" DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer(
"vit_deit_tiny_distilled_patch16_224",
pretrained=pretrained,
distilled=True,
**model_kwargs,
)
return model
@register_model
def vit_deit_small_distilled_patch16_224(pretrained=False, **kwargs):
""" DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer(
"vit_deit_small_distilled_patch16_224",
pretrained=pretrained,
distilled=True,
**model_kwargs,
)
return model
@register_model
def vit_deit_base_distilled_patch16_224(pretrained=False, **kwargs):
""" DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_deit_base_distilled_patch16_224",
pretrained=pretrained,
distilled=True,
**model_kwargs,
)
return model
@register_model
def vit_deit_base_distilled_patch16_384(pretrained=False, **kwargs):
""" DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_deit_base_distilled_patch16_384",
pretrained=pretrained,
distilled=True,
**model_kwargs,
)
return model
| 49,034 | 34.558376 | 155 | py |
Mr.Right | Mr.Right-main/models/ViLT/vilt/modules/heads.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.models.bert.modeling_bert import BertPredictionHeadTransform
class Pooler(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class ITMHead(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.fc = nn.Linear(hidden_size, 2)
def forward(self, x):
x = self.fc(x)
return x
class MLMHead(nn.Module):
def __init__(self, config, weight=None):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
if weight is not None:
self.decoder.weight = weight
def forward(self, x):
x = self.transform(x)
x = self.decoder(x) + self.bias
return x
class MPPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, 256 * 3)
def forward(self, x):
x = self.transform(x)
x = self.decoder(x)
return x
| 1,569 | 27.035714 | 83 | py |
Mr.Right | Mr.Right-main/models/ViLT/vilt/modules/__init__.py | from .vilt_module import ViLTransformerSS
| 42 | 20.5 | 41 | py |
Mr.Right | Mr.Right-main/models/ViLT/vilt/transforms/utils.py | from torchvision import transforms
from PIL import Image
class MinMaxResize:
def __init__(self, shorter=800, longer=1333):
self.min = shorter
self.max = longer
def __call__(self, x):
w, h = x.size
scale = self.min / min(w, h)
if h < w:
newh, neww = self.min, scale * w
else:
newh, neww = scale * h, self.min
if max(newh, neww) > self.max:
scale = self.max / max(newh, neww)
newh = newh * scale
neww = neww * scale
newh, neww = int(newh + 0.5), int(neww + 0.5)
newh, neww = newh // 32 * 32, neww // 32 * 32
return x.resize((neww, newh), resample=Image.BICUBIC)
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
# The normalize code -> t.sub_(m).div_(s)
return tensor
# This is simple maximum entropy normalization performed in Inception paper
inception_normalize = transforms.Compose(
[transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
)
# ViT uses simple non-biased inception normalization
# https://github.com/google-research/vision_transformer/blob/master/vit_jax/input_pipeline.py#L132
inception_unnormalize = transforms.Compose(
[UnNormalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
)
| 1,645 | 27.877193 | 98 | py |
Mr.Right | Mr.Right-main/models/ViLT/vilt/transforms/randaug.py | # code in this file is adpated from rpmcruz/autoaugment
# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py
import random
import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw
import numpy as np
import torch
from PIL import Image
def ShearX(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))
def ShearY(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))
def TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random.random() > 0.5:
v = -v
v = v * img.size[0]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateXabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random.random() > 0.5:
v = -v
v = v * img.size[1]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def TranslateYabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def Rotate(img, v): # [-30, 30]
assert -30 <= v <= 30
if random.random() > 0.5:
v = -v
return img.rotate(v)
def AutoContrast(img, _):
return PIL.ImageOps.autocontrast(img)
def Invert(img, _):
return PIL.ImageOps.invert(img)
def Equalize(img, _):
return PIL.ImageOps.equalize(img)
def Flip(img, _): # not from the paper
return PIL.ImageOps.mirror(img)
def Solarize(img, v): # [0, 256]
assert 0 <= v <= 256
return PIL.ImageOps.solarize(img, v)
def SolarizeAdd(img, addition=0, threshold=128):
img_np = np.array(img).astype(np.int)
img_np = img_np + addition
img_np = np.clip(img_np, 0, 255)
img_np = img_np.astype(np.uint8)
img = Image.fromarray(img_np)
return PIL.ImageOps.solarize(img, threshold)
def Posterize(img, v): # [4, 8]
v = int(v)
v = max(1, v)
return PIL.ImageOps.posterize(img, v)
def Contrast(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Contrast(img).enhance(v)
def Color(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Color(img).enhance(v)
def Brightness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Brightness(img).enhance(v)
def Sharpness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Sharpness(img).enhance(v)
def Cutout(img, v): # [0, 60] => percentage: [0, 0.2]
assert 0.0 <= v <= 0.2
if v <= 0.0:
return img
v = v * img.size[0]
return CutoutAbs(img, v)
def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2]
# assert 0 <= v <= 20
if v < 0:
return img
w, h = img.size
x0 = np.random.uniform(w)
y0 = np.random.uniform(h)
x0 = int(max(0, x0 - v / 2.0))
y0 = int(max(0, y0 - v / 2.0))
x1 = min(w, x0 + v)
y1 = min(h, y0 + v)
xy = (x0, y0, x1, y1)
color = (125, 123, 114)
# color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
def SamplePairing(imgs): # [0, 0.4]
def f(img1, v):
i = np.random.choice(len(imgs))
img2 = PIL.Image.fromarray(imgs[i])
return PIL.Image.blend(img1, img2, v)
return f
def Identity(img, v):
return img
def augment_list(): # 16 oeprations and their ranges
# https://github.com/google-research/uda/blob/master/image/randaugment/policies.py#L57
# l = [
# (Identity, 0., 1.0),
# (ShearX, 0., 0.3), # 0
# (ShearY, 0., 0.3), # 1
# (TranslateX, 0., 0.33), # 2
# (TranslateY, 0., 0.33), # 3
# (Rotate, 0, 30), # 4
# (AutoContrast, 0, 1), # 5
# (Invert, 0, 1), # 6
# (Equalize, 0, 1), # 7
# (Solarize, 0, 110), # 8
# (Posterize, 4, 8), # 9
# # (Contrast, 0.1, 1.9), # 10
# (Color, 0.1, 1.9), # 11
# (Brightness, 0.1, 1.9), # 12
# (Sharpness, 0.1, 1.9), # 13
# # (Cutout, 0, 0.2), # 14
# # (SamplePairing(imgs), 0, 0.4), # 15
# ]
# https://github.com/tensorflow/tpu/blob/8462d083dd89489a79e3200bcc8d4063bf362186/models/official/efficientnet/autoaugment.py#L505
l = [
(AutoContrast, 0, 1),
(Equalize, 0, 1),
# (Invert, 0, 1),
(Rotate, 0, 30),
(Posterize, 0, 4),
(Solarize, 0, 256),
(SolarizeAdd, 0, 110),
(Color, 0.1, 1.9),
(Contrast, 0.1, 1.9),
(Brightness, 0.1, 1.9),
(Sharpness, 0.1, 1.9),
(ShearX, 0.0, 0.3),
(ShearY, 0.0, 0.3),
# (CutoutAbs, 0, 40),
(TranslateXabs, 0.0, 100),
(TranslateYabs, 0.0, 100),
]
return l
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = (
self.eigvec.type_as(img)
.clone()
.mul(alpha.view(1, 3).expand(3, 3))
.mul(self.eigval.view(1, 3).expand(3, 3))
.sum(1)
.squeeze()
)
return img.add(rgb.view(3, 1, 1).expand_as(img))
class CutoutDefault(object):
"""
Reference : https://github.com/quark0/darts/blob/master/cnn/utils.py
"""
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1:y2, x1:x2] = 0.0
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
class RandAugment:
def __init__(self, n, m):
self.n = n
self.m = m # [0, 30]
self.augment_list = augment_list()
def __call__(self, img):
ops = random.choices(self.augment_list, k=self.n)
for op, minval, maxval in ops:
val = (float(self.m) / 30) * float(maxval - minval) + minval
img = op(img, val)
return img
| 6,990 | 24.892593 | 134 | py |
Mr.Right | Mr.Right-main/models/ViLT/vilt/transforms/pixelbert.py | from .utils import (
inception_normalize,
MinMaxResize,
)
from torchvision import transforms
from .randaug import RandAugment
def pixelbert_transform(size=800):
longer = int((1333 / 800) * size)
return transforms.Compose(
[
MinMaxResize(shorter=size, longer=longer),
transforms.ToTensor(),
inception_normalize,
]
)
def pixelbert_transform_randaug(size=800):
longer = int((1333 / 800) * size)
trs = transforms.Compose(
[
MinMaxResize(shorter=size, longer=longer),
transforms.ToTensor(),
inception_normalize,
]
)
trs.transforms.insert(0, RandAugment(2, 9))
return trs
| 714 | 22.064516 | 54 | py |
Mr.Right | Mr.Right-main/models/ViLT/vilt/transforms/__init__.py | from .pixelbert import (
pixelbert_transform,
pixelbert_transform_randaug,
)
_transforms = {
"pixelbert": pixelbert_transform,
"pixelbert_randaug": pixelbert_transform_randaug,
}
def keys_to_transforms(keys: list, size=224):
return [_transforms[key](size=size) for key in keys]
| 301 | 20.571429 | 56 | py |
Mr.Right | Mr.Right-main/data/utils.py | import re
import cv2
import numpy as np
# ref: https://github.com/salesforce/ALBEF
def pre_caption(caption,max_words):
caption = re.sub(
r"([,.'!?\"()*#:;~])",
'',
caption.lower(),
).replace('-', ' ').replace('/', ' ').replace('<person>', 'person')
caption = re.sub(
r"\s{2,}",
' ',
caption,
)
caption = caption.rstrip('\n')
caption = caption.strip(' ')
#truncate caption
caption_words = caption.split(' ')
if len(caption_words)>max_words:
caption = ' '.join(caption_words[:max_words])
return caption
## aug functions
def identity_func(img):
return img
def autocontrast_func(img, cutoff=0):
'''
same output as PIL.ImageOps.autocontrast
'''
n_bins = 256
def tune_channel(ch):
n = ch.size
cut = cutoff * n // 100
if cut == 0:
high, low = ch.max(), ch.min()
else:
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
low = np.argwhere(np.cumsum(hist) > cut)
low = 0 if low.shape[0] == 0 else low[0]
high = np.argwhere(np.cumsum(hist[::-1]) > cut)
high = n_bins - 1 if high.shape[0] == 0 else n_bins - 1 - high[0]
if high <= low:
table = np.arange(n_bins)
else:
scale = (n_bins - 1) / (high - low)
offset = -low * scale
table = np.arange(n_bins) * scale + offset
table[table < 0] = 0
table[table > n_bins - 1] = n_bins - 1
table = table.clip(0, 255).astype(np.uint8)
return table[ch]
channels = [tune_channel(ch) for ch in cv2.split(img)]
out = cv2.merge(channels)
return out
def equalize_func(img):
'''
same output as PIL.ImageOps.equalize
PIL's implementation is different from cv2.equalize
'''
n_bins = 256
def tune_channel(ch):
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
non_zero_hist = hist[hist != 0].reshape(-1)
step = np.sum(non_zero_hist[:-1]) // (n_bins - 1)
if step == 0: return ch
n = np.empty_like(hist)
n[0] = step // 2
n[1:] = hist[:-1]
table = (np.cumsum(n) // step).clip(0, 255).astype(np.uint8)
return table[ch]
channels = [tune_channel(ch) for ch in cv2.split(img)]
out = cv2.merge(channels)
return out
def rotate_func(img, degree, fill=(0, 0, 0)):
'''
like PIL, rotate by degree, not radians
'''
H, W = img.shape[0], img.shape[1]
center = W / 2, H / 2
M = cv2.getRotationMatrix2D(center, degree, 1)
out = cv2.warpAffine(img, M, (W, H), borderValue=fill)
return out
def solarize_func(img, thresh=128):
'''
same output as PIL.ImageOps.posterize
'''
table = np.array([el if el < thresh else 255 - el for el in range(256)])
table = table.clip(0, 255).astype(np.uint8)
out = table[img]
return out
def color_func(img, factor):
'''
same output as PIL.ImageEnhance.Color
'''
## implementation according to PIL definition, quite slow
# degenerate = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[:, :, np.newaxis]
# out = blend(degenerate, img, factor)
# M = (
# np.eye(3) * factor
# + np.float32([0.114, 0.587, 0.299]).reshape(3, 1) * (1. - factor)
# )[np.newaxis, np.newaxis, :]
M = (
np.float32([
[0.886, -0.114, -0.114],
[-0.587, 0.413, -0.587],
[-0.299, -0.299, 0.701]]) * factor
+ np.float32([[0.114], [0.587], [0.299]])
)
out = np.matmul(img, M).clip(0, 255).astype(np.uint8)
return out
def contrast_func(img, factor):
"""
same output as PIL.ImageEnhance.Contrast
"""
mean = np.sum(np.mean(img, axis=(0, 1)) * np.array([0.114, 0.587, 0.299]))
table = np.array([(
el - mean) * factor + mean
for el in range(256)
]).clip(0, 255).astype(np.uint8)
out = table[img]
return out
def brightness_func(img, factor):
'''
same output as PIL.ImageEnhance.Contrast
'''
table = (np.arange(256, dtype=np.float32) * factor).clip(0, 255).astype(np.uint8)
out = table[img]
return out
def sharpness_func(img, factor):
'''
The differences the this result and PIL are all on the 4 boundaries, the center
areas are same
'''
kernel = np.ones((3, 3), dtype=np.float32)
kernel[1][1] = 5
kernel /= 13
degenerate = cv2.filter2D(img, -1, kernel)
if factor == 0.0:
out = degenerate
elif factor == 1.0:
out = img
else:
out = img.astype(np.float32)
degenerate = degenerate.astype(np.float32)[1:-1, 1:-1, :]
out[1:-1, 1:-1, :] = degenerate + factor * (out[1:-1, 1:-1, :] - degenerate)
out = out.astype(np.uint8)
return out
def shear_x_func(img, factor, fill=(0, 0, 0)):
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, factor, 0], [0, 1, 0]])
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
return out
def translate_x_func(img, offset, fill=(0, 0, 0)):
'''
same output as PIL.Image.transform
'''
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, -offset], [0, 1, 0]])
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
return out
def translate_y_func(img, offset, fill=(0, 0, 0)):
'''
same output as PIL.Image.transform
'''
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, 0], [0, 1, -offset]])
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
return out
def posterize_func(img, bits):
'''
same output as PIL.ImageOps.posterize
'''
out = np.bitwise_and(img, np.uint8(255 << (8 - bits)))
return out
def shear_y_func(img, factor, fill=(0, 0, 0)):
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, 0], [factor, 1, 0]])
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
return out
def cutout_func(img, pad_size, replace=(0, 0, 0)):
replace = np.array(replace, dtype=np.uint8)
H, W = img.shape[0], img.shape[1]
rh, rw = np.random.random(2)
pad_size = pad_size // 2
ch, cw = int(rh * H), int(rw * W)
x1, x2 = max(ch - pad_size, 0), min(ch + pad_size, H)
y1, y2 = max(cw - pad_size, 0), min(cw + pad_size, W)
out = img.copy()
out[x1:x2, y1:y2, :] = replace
return out
### level to args
def enhance_level_to_args(MAX_LEVEL):
def level_to_args(level):
return ((level / MAX_LEVEL) * 1.8 + 0.1,)
return level_to_args
def shear_level_to_args(MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * 0.3
if np.random.random() > 0.5: level = -level
return (level, replace_value)
return level_to_args
def translate_level_to_args(translate_const, MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * float(translate_const)
if np.random.random() > 0.5: level = -level
return (level, replace_value)
return level_to_args
def cutout_level_to_args(cutout_const, MAX_LEVEL, replace_value):
def level_to_args(level):
level = int((level / MAX_LEVEL) * cutout_const)
return (level, replace_value)
return level_to_args
def solarize_level_to_args(MAX_LEVEL):
def level_to_args(level):
level = int((level / MAX_LEVEL) * 256)
return (level, )
return level_to_args
def none_level_to_args(level):
return ()
def posterize_level_to_args(MAX_LEVEL):
def level_to_args(level):
level = int((level / MAX_LEVEL) * 4)
return (level, )
return level_to_args
def rotate_level_to_args(MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * 30
if np.random.random() < 0.5:
level = -level
return (level, replace_value)
return level_to_args
func_dict = {
'Identity': identity_func,
'AutoContrast': autocontrast_func,
'Equalize': equalize_func,
'Rotate': rotate_func,
'Solarize': solarize_func,
'Color': color_func,
'Contrast': contrast_func,
'Brightness': brightness_func,
'Sharpness': sharpness_func,
'ShearX': shear_x_func,
'TranslateX': translate_x_func,
'TranslateY': translate_y_func,
'Posterize': posterize_func,
'ShearY': shear_y_func,
}
translate_const = 10
MAX_LEVEL = 10
replace_value = (128, 128, 128)
arg_dict = {
'Identity': none_level_to_args,
'AutoContrast': none_level_to_args,
'Equalize': none_level_to_args,
'Rotate': rotate_level_to_args(MAX_LEVEL, replace_value),
'Solarize': solarize_level_to_args(MAX_LEVEL),
'Color': enhance_level_to_args(MAX_LEVEL),
'Contrast': enhance_level_to_args(MAX_LEVEL),
'Brightness': enhance_level_to_args(MAX_LEVEL),
'Sharpness': enhance_level_to_args(MAX_LEVEL),
'ShearX': shear_level_to_args(MAX_LEVEL, replace_value),
'TranslateX': translate_level_to_args(
translate_const, MAX_LEVEL, replace_value
),
'TranslateY': translate_level_to_args(
translate_const, MAX_LEVEL, replace_value
),
'Posterize': posterize_level_to_args(MAX_LEVEL),
'ShearY': shear_level_to_args(MAX_LEVEL, replace_value),
}
class RandomAugment(object):
def __init__(self, N=2, M=10, isPIL=False, augs=[]):
self.N = N
self.M = M
self.isPIL = isPIL
if augs:
self.augs = augs
else:
self.augs = list(arg_dict.keys())
def get_random_ops(self):
sampled_ops = np.random.choice(self.augs, self.N)
return [(op, 0.5, self.M) for op in sampled_ops]
def __call__(self, img):
if self.isPIL:
img = np.array(img)
ops = self.get_random_ops()
for name, prob, level in ops:
if np.random.random() > prob:
continue
args = arg_dict[name](level)
img = func_dict[name](img, *args)
return img | 10,271 | 27.773109 | 99 | py |
Mr.Right | Mr.Right-main/data/extract_multimodal_val.py | import os
import json
import pdb
import random
from argparse import ArgumentParser
random.seed(42)
def main(args):
document = json.load(open(args.mul_doc,'r'))
val_query = json.load(open(args.mul_val,'r'))
document_dict = dict()
for doc in document:
document_dict[doc['id']] = doc
val_document = []
for idx, query in enumerate(val_query):
new_doc = document_dict[query['id']].copy()
del document_dict[query['id']]
query["id"] = idx
new_doc["id"] = idx
val_document.append(new_doc)
remaining_doc = list(document_dict.values())
add_doc = random.sample(remaining_doc,args.val_amount - len(val_document))
idx = len(val_document)
for doc in add_doc:
doc['id'] = idx
idx += 1
val_document += add_doc
with open(args.mul_val, "w") as outfile:
json.dump(val_query, outfile, indent = 4)
with open(args.output, "w") as outfile:
json.dump(val_document, outfile, indent = 4)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--mul_doc', default='multimodal_documents.json')
parser.add_argument('--mul_val', default='multimodal_val_queries.json')
parser.add_argument('--val_amount', default=10000,type=int)
parser.add_argument('--output', default="multimodal_val_documents.json")
args = parser.parse_args()
print(args)
main(args) | 1,298 | 24.470588 | 75 | py |
Mr.Right | Mr.Right-main/data/data_module.py | import random
import torch
import os
import json
import pickle
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import Compose, ToTensor, Normalize, Resize, RandomResizedCrop, RandomHorizontalFlip
from pytorch_lightning import LightningDataModule
from data.utils import pre_caption, RandomAugment
from PIL import Image,ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
class TextToMultiDataset(Dataset):
def __init__(self, args, configs, data, task, tokenizer):
self.data = data
self.image_size = configs.image_res
self.image_root = configs["image_root"]
self.task = task
self.tokenizer = tokenizer
self.configs = configs
self.args = args
self.q_max_len = configs.query_length
self.d_max_len = configs.text_length
normalize = Normalize((0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711))
if task == "train":
self.transform = Compose([
RandomResizedCrop(self.image_size,
scale=(0.5, 1.),
interpolation=Image.BICUBIC),
RandomHorizontalFlip(),
RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',
'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']),
ToTensor(),
normalize,
])
elif task == "val_queries":
self.data = self.data
elif task == 'test_queries':
self.data = self.data
elif task == "docs":
self.data = self.data
self.transform = Compose([
Resize((self.image_size,self.image_size),
interpolation=Image.BICUBIC),
ToTensor(),
normalize,
])
def __len__(self):
return len(self.data)
def __getitem__(self, index):
result = {}
data = self.data[index]
if self.task == "train":
# query str
result['query_text_str'] = data["txt_query_str"]
result['query_image_str'] = data["img_query_str"]
r = random.random()
if r < 0.33:
query_str = data["txt_query_str"]
elif 0.33 < r < 0.66:
query_str = data["img_query_str"]
elif 0.66< r < 0.83:
query_str = data["txt_query_str"] + data["img_query_str"]
else:
query_str = data["img_query_str"] + data["txt_query_str"]
image = data["doc_image"]
image = image.replace("./","")
image_path = os.path.join(self.image_root,image)
image = Image.open(image_path).convert('RGB')
image = self.transform(image)
result['query_str'] = query_str
result['doc_id'] = data["doc_id"]
result['doc_str'] = data["doc_text_str"]
result['doc_image'] = image
result['image_path'] = image_path
elif self.task == 'val_queries':
result['img_query_str'] = data['img_query_str']
result['txt_query_str'] = data['txt_query_str']
result['multi_query_str'] = data['multi_query_str']
result['doc_id'] = data["doc_id"]
elif self.task == 'test_queries':
result['img_query_str'] = data['img_query_str']
result['txt_query_str'] = data['txt_query_str']
result['multi_query_str'] = data['multi_query_str']
result['doc_id'] = data["doc_id"]
elif self.task == 'docs':
image = data["doc_image"]
image = image.replace("./","")
image_path = os.path.join(self.image_root,image)
image = Image.open(image_path).convert('RGB')
image = self.transform(image)
result['doc_str'] = data["doc_text_str"]
result['doc_image'] = image
result['image_path'] = image_path
result['doc_id'] = data["doc_id"]
return result
def collate_fn(self, batch):
if self.task == 'test_queries' or self.task == 'val_queries':
batch_dict = {
"doc_id": torch.tensor([b["doc_id"] for b in batch]).long(),
"img_query_str": [b["img_query_str"] for b in batch],
"txt_query_str": [b["txt_query_str"] for b in batch],
"multi_query_str": [b["multi_query_str"] for b in batch],
}
img_query_str_tensor = self.tokenizer(
text=[b["img_query_str"] for b in batch],
max_length=self.q_max_len,
padding="max_length",
truncation=True,
return_tensors="pt")
txt_query_str_tensor = self.tokenizer(
text=[b["txt_query_str"] for b in batch],
max_length=self.q_max_len,
padding="max_length",
truncation=True,
return_tensors="pt")
multi_query_str_tensor = self.tokenizer(
text=[b["multi_query_str"] for b in batch],
max_length=self.q_max_len,
padding="max_length",
truncation=True,
return_tensors="pt")
batch_dict["img_query_str_tensor"] = img_query_str_tensor
batch_dict["txt_query_str_tensor"] = txt_query_str_tensor
batch_dict["multi_query_str_tensor"] = multi_query_str_tensor
return batch_dict
if self.task == 'docs':
batch_dict = {
"doc_id": torch.tensor([b["doc_id"] for b in batch]).long(),
"image_path": [b["image_path"] for b in batch],
"doc_str" : [b["doc_str"] for b in batch],
"doc_image_tensor": torch.stack([b["doc_image"] for b in batch]),
}
doc_str_tensor = self.tokenizer(
text=[b["doc_str"] for b in batch],
max_length=self.d_max_len,
padding="max_length",
truncation=True,
return_tensors="pt")
batch_dict["doc_str_tensor"] = doc_str_tensor
return batch_dict
batch_dict = {
"doc_id": torch.tensor([b["doc_id"] for b in batch]).long(),
"query_str": [b["query_str"] for b in batch],
"doc_str" : [b["doc_str"] for b in batch],
"doc_image_tensor": torch.stack([b["doc_image"] for b in batch]),
}
query_str_tensor = self.tokenizer(
text=[b["query_str"] for b in batch],
max_length=self.q_max_len,
padding="longest" if self.task == "train" else "max_length",
# padding="max_length",
truncation=True,
return_tensors="pt")
batch_dict["query_str_tensor"] = query_str_tensor
doc_str_tensor = self.tokenizer(
text=[b["doc_str"] for b in batch],
max_length=self.d_max_len,
padding="longest" if self.task == "train" else "max_length",
# padding="max_length",
truncation=True,
return_tensors="pt")
batch_dict["doc_str_tensor"] = doc_str_tensor
query_str_tensor_total = query_str_tensor
if self.task == "train":
query_str_tensor_total = self.tokenizer(
text=[b["query_text_str"] + ' ' + b["query_image_str"] for b in batch],
max_length=self.q_max_len,
padding="longest",
# padding="max_length",
truncation=True,
return_tensors="pt")
if self.args.ctx_prediction:
context_labels = torch.zeros(len(batch), self.tokenizer.vocab_size)
pad_id = self.tokenizer.pad_token_id
sep_id = self.tokenizer.sep_token_id
cls_id = self.tokenizer.cls_token_id
context_labels[torch.arange(len(batch)).unsqueeze(1), query_str_tensor_total['input_ids']] = 1
context_labels[torch.arange(len(batch)).unsqueeze(1), doc_str_tensor['input_ids']] = 1
context_labels[:, pad_id] = 0
context_labels[:, sep_id] = 0
context_labels[:, cls_id] = 0
batch_dict["context_labels"] = context_labels
return batch_dict
class TextToMultiDataModule(LightningDataModule):
def __init__(self,args,configs,tokenizer):
super().__init__()
self.args = args
self.shuffle = args.shuffle
self.batch_size = args.batch_size
self.num_workers = args.num_workers
self.configs = configs
self.max_len = configs.text_length
self.tokenizer = tokenizer
def prepare_data(self,train=None,val=None,test=None,document=None):
# called only on 1 GPU
def load_data(config, mode):
datas = self.prepare_text2multi_data(config,mode)
return datas
if train is not None: self.train_datas = load_data(train, 'train')
if val is not None: self.val_datas = load_data(val, 'val')
if test is not None: self.test_datas = load_data(test, 'test')
if document is not None: self.documents = load_data(document,'doc')
def setup(self):
if hasattr(self, 'train_datas'):
self.train_dataset = TextToMultiDataset(self.args, self.configs, self.train_datas,"train", self.tokenizer)
if hasattr(self, 'val_datas'):
self.val_queries_dataset = TextToMultiDataset(self.args, self.configs, self.val_datas, "val_queries", self.tokenizer)
self.val_docs_dataset = TextToMultiDataset(self.args, self.configs, self.documents, "docs", self.tokenizer)
if hasattr(self, 'test_datas'):
self.test_queries_dataset = TextToMultiDataset(self.args, self.configs, self.test_datas, "test_queries", self.tokenizer)
self.test_docs_dataset = TextToMultiDataset(self.args, self.configs, self.documents, "docs", self.tokenizer)
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers,pin_memory=True,drop_last=True, collate_fn=self.train_dataset.collate_fn)
def val_dataloader(self):
return [
DataLoader(self.val_queries_dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True, collate_fn=self.val_queries_dataset.collate_fn),
DataLoader(self.val_docs_dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True, collate_fn=self.val_docs_dataset.collate_fn),
]
def test_dataloader(self):
return [
DataLoader(self.test_queries_dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True, collate_fn=self.test_queries_dataset.collate_fn),
DataLoader(self.test_docs_dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True, collate_fn=self.test_docs_dataset.collate_fn),
]
def prepare_text2multi_data(self,files,task="train"):
print("\nReading json files")
data = []
for f in files:
print(f"File: {f}",end="\r")
data += json.load(open(f,'r'))
result = []
for idx,pairs in enumerate(data):
if task == "train":
result.append({
"img_query_str": pre_caption(pairs["query_img"],self.max_len),
"txt_query_str": pre_caption(pairs["query_text"],self.max_len),
"doc_text_str": pre_caption(pairs["doc_text"], self.max_len),
"doc_image": pairs["doc_image"],
"doc_id": idx,
})
if task == "val":
result.append({
"multi_query_str": pre_caption(pairs["query_multi"],self.max_len),
"img_query_str": pre_caption(pairs["query_img"],self.max_len),
"txt_query_str": pre_caption(pairs["query_text"],self.max_len),
"doc_id": pairs["id"],
})
if task == "test":
result.append({
"img_query_str": pre_caption(pairs["query_img"],self.max_len),
"txt_query_str": pre_caption(pairs["query_text"],self.max_len),
"multi_query_str": pre_caption(pairs["query_multi"],self.max_len),
"doc_id": pairs["id"],
})
if task == "doc":
result.append({
"doc_text_str": pre_caption(pairs["doc_text"], self.max_len),
"doc_image": pairs["doc_image"],
"doc_id": pairs["id"],
})
return result
| 13,856 | 44.136808 | 176 | py |
NORPPA | NORPPA-main/sql.py | import sqlite3
import numpy as np
from datetime import datetime
def create_connection(path="/app/mount/tasks.db"):
""" create a database connection to the SQLite database
"""
conn = None
try:
conn = sqlite3.connect(path, check_same_thread=False)
except sqlite3.Error as e:
print(e)
return conn
def select_class_path_by_ids(conn, ids):
c = conn.cursor()
bindings = ",".join(["?"] * len(ids))
c.execute(f"SELECT image_path, seal_id FROM database WHERE image_id IN ({bindings})", [int(x) for x in ids])
return c.fetchall()
def create_database_table(conn):
c = conn.cursor()
c.execute("PRAGMA foreign_keys=ON")
c.execute('''
CREATE TABLE IF NOT EXISTS database
([image_id] INTEGER PRIMARY KEY AUTOINCREMENT, [seal_id] TEXT, [image_path] TEXT, [seal_type] TEXT,[encoding] TEXT, [date] TEXT)
''')
c.close()
conn.commit()
def get_patch_features(conn, image_id):
c = conn.cursor()
c.execute("SELECT encoding FROM patches WHERE image_id = ?", (int(image_id),))
result = c.fetchall()
db_features = np.array([np.fromstring(res[0], dtype=np.float32, sep=' ') for res in result])
c.close()
return db_features
def get_patch_features_multiple_ids(conn, ids):
c = conn.cursor()
bindings = ",".join(["?"] * len(ids))
c.execute(f"SELECT image_id, encoding FROM patches WHERE image_id IN ({bindings})", [int(x) for x in ids])
result = c.fetchall()
db_features = np.array([np.fromstring(res[1], dtype=np.float32, sep=' ') for res in result])
db_ids = np.array([int(res[0]) for res in result])
c.close()
return db_ids, db_features
def get_fisher_vectors(conn, ids):
c = conn.cursor()
bindings = ",".join(["?"] * len(ids))
c.execute(f"SELECT encoding FROM database WHERE image_id IN ({bindings})", [int(x) for x in ids])
result = c.fetchall()
db_features = np.array([np.fromstring(res[0], dtype=float, sep=' ') for res in result])
return db_features
def get_label(conn, image_id):
c = conn.cursor()
c.execute("SELECT seal_id FROM database WHERE image_id = ?", (image_id,))
result = c.fetchone()
return result
def get_db_ids(conn, seal_type="norppa"):
c = conn.cursor()
c.execute("SELECT image_id FROM database WHERE seal_type = ? ORDER BY image_id", (seal_type,))
result = c.fetchall()
result = [res[0] for res in result]
return result
def get_img_paths_by_id(conn, seal_id):
c = conn.cursor()
c.execute("SELECT image_id, image_path FROM database WHERE seal_id = ?", (seal_id,))
result = c.fetchall()
image_ids = [res[0] for res in result]
image_paths = [res[1] for res in result]
return image_ids, image_paths
def get_patches(conn, image_id):
c = conn.cursor()
c.execute("SELECT coordinates, encoding FROM patches WHERE image_id = ?", (int(image_id),))
result = c.fetchall()
coordinates = np.array([np.fromstring(res[0], dtype=np.float32, sep=' ') for res in result])
db_features = np.array([np.fromstring(res[1], dtype=np.float32, sep=' ') for res in result])
return db_features, coordinates
def get_patches_multiple(conn, ids):
c = conn.cursor()
bindings = ",".join(["?"] * len(ids))
c.execute(f"SELECT coordinates, encoding FROM patches WHERE image_id IN ({bindings})", [int(x) for x in ids])
result = c.fetchall()
coordinates = np.array([np.fromstring(res[0], dtype=np.float32, sep=' ') for res in result])
db_features = np.array([np.fromstring(res[1], dtype=np.float32, sep=' ') for res in result])
return db_features, coordinates
def get_patch_features(conn, image_id):
c = conn.cursor()
c.execute("SELECT encoding FROM patches WHERE image_id = ?", (int(image_id),))
result = c.fetchall()
db_features = np.array([np.fromstring(res[0], dtype=np.float32, sep=' ') for res in result])
return db_features
def get_patch_features_multiple_ids_with_labels(conn, ids):
c = conn.cursor()
bindings = ",".join(["?"] * len(ids))
c.execute(f"SELECT patches.image_id, patches.encoding, database.seal_id FROM patches INNER JOIN database ON patches.image_id = database.image_id WHERE patches.image_id IN ({bindings})", [int(x) for x in ids])
result = c.fetchall()
db_features = np.array([np.fromstring(res[1], dtype=np.float64, sep=' ') for res in result])
db_ids = np.array([int(res[0]) for res in result])
db_labels = np.array([res[2] for res in result])
return db_ids, db_features, db_labels
def create_database_table(conn):
c = conn.cursor()
c.execute("PRAGMA foreign_keys=ON")
c.execute('''
CREATE TABLE IF NOT EXISTS database
([image_id] INTEGER PRIMARY KEY AUTOINCREMENT, [seal_id] TEXT, [image_path] TEXT, [seal_type] TEXT,[encoding] TEXT, [date] TEXT)
''')
c.close()
conn.commit()
def create_patches_table(conn):
c = conn.cursor()
c.execute('''
CREATE TABLE IF NOT EXISTS patches
([patch_id] INTEGER PRIMARY KEY AUTOINCREMENT, [image_id] INTEGER, [coordinates] TEXT, [encoding] TEXT, FOREIGN KEY (image_id) REFERENCES database(image_id) ON DELETE CASCADE)
''')
c.close()
conn.commit()
def insert_patches(conn, image_id, coordinates, encoding):
c = conn.cursor()
c.execute("INSERT INTO patches (image_id, coordinates, encoding) VALUES (?, ?, ?)", (image_id, np.array2string(coordinates)[1:-1], np.array2string(encoding)[1:-1]))
conn.commit()
c.close()
def update_encoding(conn, image_id, encoding, date):
c = conn.cursor()
sql = """ UPDATE database SET encoding = ?, date = ? WHERE image_id = ? """
conn.execute(sql, (np.array2string(encoding)[1:-1], date, image_id))
conn.commit()
c.close()
def insert_database(conn, image_path, seal_id, seal_type, encoding, date):
c = conn.cursor()
c.execute("INSERT INTO database (image_path, seal_id, seal_type, encoding, date) VALUES (?, ?, ?, ?, ?)", (image_path, seal_id, seal_type, np.array2string(encoding)[1:-1], date))
row_id = c.lastrowid
conn.commit()
c.close()
return row_id | 6,354 | 29.552885 | 212 | py |
NORPPA | NORPPA-main/tools.py |
from datetime import datetime
import random
import string
import os
import sys
import shutil
from zipfile import ZipFile
from skimage import color
import numpy as np
from datetime import datetime
from six.moves import urllib
from pattern_extraction.utils import thickness_resize
from pattern_extraction.extract_pattern import smart_resize
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from PIL import Image
import json
import gc
from tqdm import tqdm
import pickle
def save_pickle(x, file):
with open(file, 'wb') as f_file:
pickle.dump(x, f_file, protocol=4)
return x
def load_pickle(file):
with open(file, 'rb') as f_file:
result = pickle.load(f_file)
return result
def print_step(text):
def identity_print(x):
print(text)
return x
return identity_print
def flatten(t):
return [item for sublist in t for item in sublist]
def apply_step(input, step, rest):
result = step(input)
if rest:
result = flatten((apply_step(x, rest[0], rest[1:]) for x in result))
return result
def apply_pipeline(image, pipeline, verbose=False):
return apply_pipeline_dataset([image], pipeline, verbose=verbose)
def apply_pipeline_dataset(dataset, pipeline, verbose=False):
result = dataset
for (i,step) in enumerate(pipeline):
result = step(result)
if verbose:
print(f"Completed {i+1}/{len(pipeline)} steps")
return result
def process(SOURCE_DIR, pipeline):
result = []
num_files = sum([len(files) for r, d, files in os.walk(SOURCE_DIR)])
for root, _, files in os.walk(SOURCE_DIR):
for f in files:
num_files -= 1
result.extend(apply_pipeline(os.path.join(root, f), SOURCE_DIR, pipeline))
gc.collect(0)
return result
def curry(func, *params, **kw):
return lambda x: func(x, *params, **kw)
def cat(array):
return [y for x in array for y in x]
def apply_sequential(func):
return lambda dataset: cat([func(x) for x in tqdm(dataset)])
def curry_sequential(func, *params, **kw):
return apply_sequential(curry(func, *params, **kw))
def compose(*funcs):
return curry(apply_step, step=funcs[0], rest=funcs[1:])
def compose_sequential(*funcs):
return apply_sequential(compose(*funcs))
def save_id_result(result, source_dir, dest_path):
res_json_path = os.path.join(dest_path, 'result.json')
data = []
for _, res in result:
res["query"] = os.path.join(source_dir, res["query"])
data.append(res)
with open(res_json_path, mode='w') as outfile:
json.dump(data, outfile)
def save_upload_result(result, dest_path):
res_json_path = os.path.join(dest_path, 'result.json')
data = []
for image, label in result:
if image is None:
res = "Fail"
else:
res = "Success"
data.append((label,res))
with open(res_json_path, mode='w') as outfile:
json.dump(data, outfile)
def randomString(stringLength=8):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def generate_task_id(task_type):
now = datetime.now()
date_time = now.strftime("%d%m%y_%H%M%S")
return f"{task_type[:4]}_{date_time}"
def clean_dir(folder):
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
def to_zip_file(file, zip_file):
if os.path.isfile(file):
zip_file.write(file)
else:
addFolderToZip(zip_file, file, file)
def toZip(file, filename):
zip_file = ZipFile(filename, 'w')
to_zip_file(file, zip_file)
zip_file.close()
def addFolderToZip(zip_file, folder, base_path):
for file in os.listdir(folder):
full_path = os.path.join(folder, file)
if os.path.isfile(full_path):
# print('File added: ' + str(full_path))
zip_file.write(full_path, os.path.relpath(full_path, start=base_path))
# print(full_path, base_path)
elif os.path.isdir(full_path):
# print('Entering folder: ' + str(full_path))
addFolderToZip(zip_file, full_path, base_path)
def is_image(filename):
return filename.lower().endswith(('.png', '.jpg', '.jpeg', 'cr2', 'pef', 'nef'))
def is_raw_image(filename):
return filename.lower().endswith(('cr2', 'pef', 'nef'))
def crop_to_bb(img, return_bb=False):
labels = color.rgb2gray(np.asarray(img))
labels = (labels > 0).astype(np.uint8)
where = np.where(labels)
y1, x1 = np.amin(where, axis=1)
y2, x2 = np.amax(where, axis=1)
img_cropped = img.crop((x1, y1, x2, y2))
if return_bb:
return (img_cropped, (x1,y1,x2-x1,y2-y1))
else:
return img_cropped
def crop_imgs_in_dir(src):
for dirpath, _, filenames in os.walk(src):
for filename in filenames:
source = os.path.join(dirpath, filename)
img = Image.open(source)
img = img.convert('RGB')
img_cropped = crop_to_bb(img)
img_cropped.save(source)
def crop_step(input):
image, label = input
if image is not None:
image, bb = crop_to_bb(image, True)
if type(label) is dict:
label["bb"] = bb
return [(image, label)]
def crop_step_sequential(input):
return apply_sequential(crop_step)(input)
def thickness_resize_step(input):
image, label = input
if image is not None:
image, ratio = thickness_resize(image, return_ratio=True)
if type(label) is dict:
label["resize_ratio"] = ratio
return [(image, label)]
def change_dir(path, new_dir):
name = os.path.basename(path)
return os.path.join(new_dir, name)
def get_save_step(dest_dir):
return lambda x: save_step(x, dest_dir)
def get_save_step(dest_dir, new_path_name=None):
return curry_sequential(save_step, dest_dir, new_path_name=new_path_name)
def test_save_step(dest_dir):
return lambda x: test_step(x, dest_dir)
def test_step(input, dest_dir):
image, label = input
if image is not None:
if type(label) is dict and 'dataset_dir' in label:
new_path = os.path.join(dest_dir, os.path.relpath(label['file'], label['dataset_dir']))
else:
new_path = change_dir(label['file'], dest_dir)
if os.path.exists(new_path):
return []
return [(image, label)]
def save_step(input, dest_dir, new_path_name=None):
image, label = input
if image is not None:
os.makedirs(dest_dir, exist_ok=True)
if type(label) is dict and 'dataset_dir' in label:
new_path = os.path.join(dest_dir, os.path.relpath(label['file'], label['dataset_dir']))
else:
new_path = change_dir(label['file'], dest_dir)
os.makedirs(os.path.dirname(new_path), exist_ok = True)
image.save(new_path, format="png")
if type(label) is dict and new_path_name is not None:
label[new_path_name] = new_path
return [(image, label)]
def download_url(url, dst):
print('* url="{}"'.format(url))
print('* destination="{}"'.format(dst))
def _reporthook(count, block_size, total_size):
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = int(count * block_size * 100 / total_size)
sys.stdout.write(
'\r...%d%%, %d MB, %d KB/s, %d seconds passed'
% (percent, progress_size / (1024 * 1024), speed, duration)
)
sys.stdout.flush()
urllib.request.urlretrieve(url, dst, _reporthook)
sys.stdout.write('\n')
def read_image(path):
if not os.path.exists(path):
raise IOError(f'"{path}" does not exist')
try:
img = Image.open(path).convert('RGB')
except IOError:
print(f'IOError when reading "{path}"')
return img
def get_topk_matrix(identification_result):
result = []
for (db_labels, q_labels) in (identification_result):
q_class = q_labels['class_id']
q_ln = len(q_labels['labels'])
result.extend([[db_label['db_label']['class_id']==q_class for db_label in db_labels]]*q_ln)
topk = len(identification_result[0][0])
return (np.asarray(result), topk)
def calculate_accuracy(result, max_topk=None):
topk_matrix, topk = get_topk_matrix(result)
topk = max_topk if max_topk is not None else topk
print(f'TOP-k={topk}')
hits = topk_matrix
# hits = (db_labels.T == q_labels).T
print([sum((np.sum(hits[:, :j+1], axis=1) > 0)) / len(topk_matrix)
for j in range(topk)])
def get_topk_matrix(identification_result):
result = []
for (db_labels, q_labels) in identification_result:
q_class = q_labels['class_id']
q_ln = len(q_labels['labels'])
result.append([db_label['db_label']['class_id']==q_class for db_label in db_labels]*q_ln)
return np.asarray(result)
def get_topk_accuracy(identification_result):
result = []
for (db_labels, q_labels) in identification_result:
q_class = q_labels['class_id']
q_ln = len(q_labels['labels'])
hits = [db_label['db_label']['class_id']==q_class for db_label in db_labels]
for _ in range(q_ln):
result.append(hits)
result = np.asarray(result)
return [sum((np.sum(result[:, :j+1], axis=1) > 0)) / len(result) for j in range(result.shape[1])]
def print_topk_accuracy(identification_result, label=""):
topk_acc = get_topk_accuracy(identification_result)
print(label)
for (i, acc) in enumerate(topk_acc):
print(f"Top-{i+1} accuracy: {acc*100}%")
return identification_result
def resize_dataset(input, size):
image, img_label = input
if image is None:
return [input]
result, ratio = smart_resize(image, size, return_ratio=True)
img_label["resize_ratio"] = ratio
return [(result, img_label)]
def update_codebooks(input, cfg):
codebooks, encoded = input
cfg["codebooks"] = codebooks
return encoded | 10,545 | 29.391931 | 101 | py |
NORPPA | NORPPA-main/config.py | import os
import sys
from pathlib import Path
import cv2
import numpy as np
file_folder = Path(__file__).resolve().parent
sys.path.append(str(file_folder / "reidentification/hesaff_pytorch"))
from HessianAffinePatches import init_affnet, init_orinet, init_hardnet
from segmentation.detectron_segment import create_predictor
from pattern_extraction.extract_pattern import create_unet
from torchvision.datasets.utils import download_url
from sql import create_connection
def init_file(path, url, allow_download=True):
if Path(path).exists():
return path
elif allow_download:
download_url(url, Path(path).parent, Path(path).name)
return path
else:
raise Exception("The file {path} is not found!")
def config(use_cuda=True, allow_download=True):
config = {}
base_dir = Path(__file__).resolve().parent
mount_path = "/ekaterina/work/data/"
path_db = mount_path + "DB.db"
config["conn"] = create_connection(path_db)
config["detectron_predictor"] = create_predictor(init_file(base_dir/"models/R-101-FPN_150ims.pth",
"https://github.com/kwadraterry/NORPPA/raw/models/models/R-101-FPN_150ims.pth",
allow_download=allow_download),
not use_cuda )
config["unet"] = create_unet(init_file(base_dir/"models/unet_seals_512.hdf5",
"https://github.com/kwadraterry/NORPPA/raw/models/models/unet_seals_512.hdf5",
allow_download=allow_download))
config["net"] = init_hardnet(init_file(base_dir/"models/HardNet++.pth",
"https://github.com/kwadraterry/NORPPA/raw/models/models/HardNet++.pth",
allow_download=allow_download),
use_cuda=use_cuda)
affnet = init_affnet(init_file(base_dir/"models/AffNet.pth",
"https://github.com/kwadraterry/NORPPA/raw/models/models/AffNet.pth",
allow_download=allow_download),
use_cuda=use_cuda)
orinet = init_orinet(init_file(base_dir/"models/OriNet.pth",
"https://github.com/kwadraterry/NORPPA/raw/models/models/OriNet.pth",
allow_download=allow_download),
use_cuda=use_cuda)
codebooks_path = init_file(base_dir/'codebooks/codebooks.pickle',
"https://github.com/kwadraterry/NORPPA/raw/models/codebooks/codebooks.pickle",
allow_download=allow_download)
config["codebooks_path"] = Path(base_dir/"codebooks/norppa.pickle")
config["codebooks"] = None
config["hesaff_args"] = {'init_sigma': 1.3213713243956968,
'mrSize': 9.348280997446642,
'nlevels': 10,
'num_features': 480,
'unsharp_amount': 6.80631647207343,
'unsharp_radius': None,
'use_cuda' :use_cuda}
config["hesaff_args"]["AffNet"] = affnet
config["hesaff_args"]["OriNet"] = orinet
config["hesaff_args"]["patch_size"] = 32
config["use_hesaff"] = True
config["pca"] = 64
config["use_kpca"] = False
config["n_clusters"] = 1400
config["features_shape"] = 64
config["topk"] = 10
config["kernel"] = "rbf"
config["use_cuda"] = use_cuda
config["dataset_dir"] = base_dir/'data'
config["sequence_dataset_dir"] = '/ekaterina/work/data/many_dataset/original_small'
config["batch_size"] = 256
config["geometric"] = {
"method": cv2.RANSAC,
"max_iters": 5000,
"max_reproj_err": 0.2,
"estimator": lambda d, mask: d ** np.sum(mask)
}
return config | 4,039 | 41.526316 | 123 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.