repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
FragmentVC | FragmentVC-main/preprocess.py | #!/usr/bin/env python3
"""Precompute Wav2Vec features."""
import os
import json
from pathlib import Path
from tempfile import mkstemp
from multiprocessing import cpu_count
import tqdm
import torch
from torch.utils.data import DataLoader
from jsonargparse import ArgumentParser, ActionConfigFile
from models import load_pretrained_wav2vec
from data import PreprocessDataset
def parse_args():
"""Parse command-line arguments."""
parser = ArgumentParser()
parser.add_argument("data_dirs", type=str, nargs="+")
parser.add_argument("wav2vec_path", type=str)
parser.add_argument("out_dir", type=str)
parser.add_argument("--trim_method", choices=["librosa", "vad"], default="vad")
parser.add_argument("--n_workers", type=int, default=cpu_count())
parser.add_argument("--sample_rate", type=int, default=16000)
parser.add_argument("--preemph", type=float, default=0.97)
parser.add_argument("--hop_len", type=int, default=326)
parser.add_argument("--win_len", type=int, default=1304)
parser.add_argument("--n_fft", type=int, default=1304)
parser.add_argument("--n_mels", type=int, default=80)
parser.add_argument("--f_min", type=int, default=80)
parser.add_argument("--audio_config", action=ActionConfigFile)
return vars(parser.parse_args())
def main(
data_dirs,
wav2vec_path,
out_dir,
trim_method,
n_workers,
sample_rate,
preemph,
hop_len,
win_len,
n_fft,
n_mels,
f_min,
**kwargs,
):
"""Main function."""
out_dir_path = Path(out_dir)
if out_dir_path.exists():
assert out_dir_path.is_dir()
else:
out_dir_path.mkdir(parents=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dataset = PreprocessDataset(
data_dirs,
trim_method,
sample_rate,
preemph,
hop_len,
win_len,
n_fft,
n_mels,
f_min,
)
dataloader = DataLoader(
dataset, batch_size=1, shuffle=False, drop_last=False, num_workers=n_workers
)
wav2vec = load_pretrained_wav2vec(wav2vec_path).to(device)
speaker_infos = {}
pbar = tqdm.tqdm(total=len(dataset), ncols=0)
for speaker_name, audio_path, wav, mel in dataloader:
if wav.size(-1) < 10:
continue
wav = wav.to(device)
speaker_name = speaker_name[0]
audio_path = audio_path[0]
with torch.no_grad():
feat = wav2vec.extract_features(wav, None)[0]
feat = feat.detach().cpu().squeeze(0)
mel = mel.squeeze(0)
fd, temp_file = mkstemp(suffix=".tar", prefix="utterance-", dir=out_dir_path)
torch.save({"feat": feat, "mel": mel}, temp_file)
os.close(fd)
if speaker_name not in speaker_infos.keys():
speaker_infos[speaker_name] = []
speaker_infos[speaker_name].append(
{
"feature_path": Path(temp_file).name,
"audio_path": audio_path,
"feat_len": len(feat),
"mel_len": len(mel),
}
)
pbar.update(dataloader.batch_size)
with open(out_dir_path / "metadata.json", "w") as f:
json.dump(speaker_infos, f, indent=2)
if __name__ == "__main__":
main(**parse_args())
| 3,318 | 25.766129 | 85 | py |
FragmentVC | FragmentVC-main/models/utils.py | """Useful utilities."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from fairseq.models.wav2vec import Wav2Vec2Model
def load_pretrained_wav2vec(ckpt_path):
"""Load pretrained Wav2Vec model."""
ckpt = torch.load(ckpt_path)
model = Wav2Vec2Model.build_model(ckpt["args"], task=None)
model.load_state_dict(ckpt["model"])
model.remove_pretraining_modules()
model.eval()
return model
def get_cosine_schedule_with_warmup(
optimizer: Optimizer,
num_warmup_steps: int,
num_training_steps: int,
num_cycles: float = 0.5,
last_epoch: int = -1,
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
num_cycles (:obj:`float`, `optional`, defaults to 0.5):
The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
following a half-cosine).
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(
max(1, num_training_steps - num_warmup_steps)
)
return max(
0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
| 2,140 | 33.532258 | 116 | py |
FragmentVC | FragmentVC-main/models/model.py | """FragmentVC model architecture."""
from typing import Tuple, List, Optional
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from .convolutional_transformer import Smoother, Extractor
class FragmentVC(nn.Module):
"""
FragmentVC uses Wav2Vec feature of the source speaker to query and attend
on mel spectrogram of the target speaker.
"""
def __init__(self, d_model=512):
super().__init__()
self.unet = UnetBlock(d_model)
self.smoothers = nn.TransformerEncoder(Smoother(d_model, 2, 1024), num_layers=3)
self.mel_linear = nn.Linear(d_model, 80)
self.post_net = nn.Sequential(
nn.Conv1d(80, 512, kernel_size=5, padding=2),
nn.BatchNorm1d(512),
nn.Tanh(),
nn.Dropout(0.5),
nn.Conv1d(512, 512, kernel_size=5, padding=2),
nn.BatchNorm1d(512),
nn.Tanh(),
nn.Dropout(0.5),
nn.Conv1d(512, 512, kernel_size=5, padding=2),
nn.BatchNorm1d(512),
nn.Tanh(),
nn.Dropout(0.5),
nn.Conv1d(512, 512, kernel_size=5, padding=2),
nn.BatchNorm1d(512),
nn.Tanh(),
nn.Dropout(0.5),
nn.Conv1d(512, 80, kernel_size=5, padding=2),
nn.BatchNorm1d(80),
nn.Dropout(0.5),
)
def forward(
self,
srcs: Tensor,
refs: Tensor,
src_masks: Optional[Tensor] = None,
ref_masks: Optional[Tensor] = None,
) -> Tuple[Tensor, List[Optional[Tensor]]]:
"""Forward function.
Args:
srcs: (batch, src_len, 768)
src_masks: (batch, src_len)
refs: (batch, 80, ref_len)
ref_masks: (batch, ref_len)
"""
# out: (src_len, batch, d_model)
out, attns = self.unet(srcs, refs, src_masks=src_masks, ref_masks=ref_masks)
# out: (src_len, batch, d_model)
out = self.smoothers(out, src_key_padding_mask=src_masks)
# out: (src_len, batch, 80)
out = self.mel_linear(out)
# out: (batch, 80, src_len)
out = out.transpose(1, 0).transpose(2, 1)
refined = self.post_net(out)
out = out + refined
# out: (batch, 80, src_len)
return out, attns
class UnetBlock(nn.Module):
"""Hierarchically attend on references."""
def __init__(self, d_model: int):
super(UnetBlock, self).__init__()
self.conv1 = nn.Conv1d(80, d_model, 3, padding=1, padding_mode="replicate")
self.conv2 = nn.Conv1d(d_model, d_model, 3, padding=1, padding_mode="replicate")
self.conv3 = nn.Conv1d(d_model, d_model, 3, padding=1, padding_mode="replicate")
self.prenet = nn.Sequential(
nn.Linear(768, 768), nn.ReLU(), nn.Linear(768, d_model),
)
self.extractor1 = Extractor(d_model, 2, 1024, no_residual=True)
self.extractor2 = Extractor(d_model, 2, 1024)
self.extractor3 = Extractor(d_model, 2, 1024)
def forward(
self,
srcs: Tensor,
refs: Tensor,
src_masks: Optional[Tensor] = None,
ref_masks: Optional[Tensor] = None,
) -> Tuple[Tensor, List[Optional[Tensor]]]:
"""Forward function.
Args:
srcs: (batch, src_len, 768)
src_masks: (batch, src_len)
refs: (batch, 80, ref_len)
ref_masks: (batch, ref_len)
"""
# tgt: (batch, tgt_len, d_model)
tgt = self.prenet(srcs)
# tgt: (tgt_len, batch, d_model)
tgt = tgt.transpose(0, 1)
# ref*: (batch, d_model, mel_len)
ref1 = self.conv1(refs)
ref2 = self.conv2(F.relu(ref1))
ref3 = self.conv3(F.relu(ref2))
# out*: (tgt_len, batch, d_model)
out, attn1 = self.extractor1(
tgt,
ref3.transpose(1, 2).transpose(0, 1),
tgt_key_padding_mask=src_masks,
memory_key_padding_mask=ref_masks,
)
out, attn2 = self.extractor2(
out,
ref2.transpose(1, 2).transpose(0, 1),
tgt_key_padding_mask=src_masks,
memory_key_padding_mask=ref_masks,
)
out, attn3 = self.extractor3(
out,
ref1.transpose(1, 2).transpose(0, 1),
tgt_key_padding_mask=src_masks,
memory_key_padding_mask=ref_masks,
)
# out: (tgt_len, batch, d_model)
return out, [attn1, attn2, attn3]
| 4,523 | 29.362416 | 88 | py |
FragmentVC | FragmentVC-main/models/convolutional_transformer.py | """Convolutional transsformer"""
from typing import Optional, Tuple
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Module, Dropout, LayerNorm, Conv1d, MultiheadAttention
class Smoother(Module):
"""Convolutional Transformer Encoder Layer"""
def __init__(self, d_model: int, nhead: int, d_hid: int, dropout=0.1):
super(Smoother, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.conv1 = Conv1d(d_model, d_hid, 9, padding=4)
self.conv2 = Conv1d(d_hid, d_model, 1, padding=0)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
def forward(
self,
src: Tensor,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
) -> Tensor:
# multi-head self attention
src2 = self.self_attn(
src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
)[0]
# add & norm
src = src + self.dropout1(src2)
src = self.norm1(src)
# conv1d
src2 = src.transpose(0, 1).transpose(1, 2)
src2 = self.conv2(F.relu(self.conv1(src2)))
src2 = src2.transpose(1, 2).transpose(0, 1)
# add & norm
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
class Extractor(Module):
"""Convolutional Transformer Decoder Layer"""
def __init__(
self, d_model: int, nhead: int, d_hid: int, dropout=0.1, no_residual=False,
):
super(Extractor, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.cross_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.conv1 = Conv1d(d_model, d_hid, 9, padding=4)
self.conv2 = Conv1d(d_hid, d_model, 1, padding=0)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.norm3 = LayerNorm(d_model)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
self.dropout3 = Dropout(dropout)
self.no_residual = no_residual
def forward(
self,
tgt: Tensor,
memory: Tensor,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
) -> Tuple[Tensor, Optional[Tensor]]:
# multi-head self attention
tgt2 = self.self_attn(
tgt, tgt, tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
)[0]
# add & norm
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
# multi-head cross attention
tgt2, attn = self.cross_attn(
tgt,
memory,
memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)
# add & norm
if self.no_residual:
tgt = self.dropout2(tgt2)
else:
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
# conv1d
tgt2 = tgt.transpose(0, 1).transpose(1, 2)
tgt2 = self.conv2(F.relu(self.conv1(tgt2)))
tgt2 = tgt2.transpose(1, 2).transpose(0, 1)
# add & norm
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt, attn
| 3,526 | 28.889831 | 84 | py |
FragmentVC | FragmentVC-main/models/__init__.py | from .model import FragmentVC
from .utils import *
| 51 | 16.333333 | 29 | py |
FragmentVC | FragmentVC-main/data/intra_speaker_dataset.py | """Dataset for reconstruction scheme."""
import json
import random
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor
import torch
from tqdm import tqdm
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
class IntraSpeakerDataset(Dataset):
"""Dataset for reconstruction scheme.
Returns:
speaker_id: speaker id number.
feat: Wav2Vec feature tensor.
mel: log mel spectrogram tensor.
"""
def __init__(self, data_dir, metadata_path, n_samples=5, pre_load=False):
with open(metadata_path, "r") as f:
metadata = json.load(f)
executor = ThreadPoolExecutor(max_workers=4)
futures = []
for speaker_name, utterances in metadata.items():
for utterance in utterances:
futures.append(
executor.submit(
_process_data,
speaker_name,
data_dir,
utterance["feature_path"],
pre_load,
)
)
self.data = []
self.speaker_to_indices = {}
for i, future in enumerate(tqdm(futures, ncols=0)):
result = future.result()
speaker_name = result[0]
self.data.append(result)
if speaker_name not in self.speaker_to_indices:
self.speaker_to_indices[speaker_name] = [i]
else:
self.speaker_to_indices[speaker_name].append(i)
self.data_dir = Path(data_dir)
self.n_samples = n_samples
self.pre_load = pre_load
def __len__(self):
return len(self.data)
def _get_data(self, index):
if self.pre_load:
speaker_name, content_emb, target_mel = self.data[index]
else:
speaker_name, content_emb, target_mel = _load_data(*self.data[index])
return speaker_name, content_emb, target_mel
def __getitem__(self, index):
speaker_name, content_emb, target_mel = self._get_data(index)
utterance_indices = self.speaker_to_indices[speaker_name].copy()
utterance_indices.remove(index)
sampled_mels = []
for sampled_id in random.sample(utterance_indices, self.n_samples):
sampled_mel = self._get_data(sampled_id)[2]
sampled_mels.append(sampled_mel)
reference_mels = torch.cat(sampled_mels, dim=0)
return content_emb, reference_mels, target_mel
def _process_data(speaker_name, data_dir, feature_path, load):
if load:
return _load_data(speaker_name, data_dir, feature_path)
else:
return speaker_name, data_dir, feature_path
def _load_data(speaker_name, data_dir, feature_path):
feature = torch.load(Path(data_dir, feature_path))
content_emb = feature["feat"]
target_mel = feature["mel"]
return speaker_name, content_emb, target_mel
def collate_batch(batch):
"""Collate a batch of data."""
srcs, refs, tgts = zip(*batch)
src_lens = [len(src) for src in srcs]
ref_lens = [len(ref) for ref in refs]
tgt_lens = [len(tgt) for tgt in tgts]
overlap_lens = [
min(src_len, tgt_len) for src_len, tgt_len in zip(src_lens, tgt_lens)
]
srcs = pad_sequence(srcs, batch_first=True) # (batch, max_src_len, wav2vec_dim)
src_masks = [torch.arange(srcs.size(1)) >= src_len for src_len in src_lens]
src_masks = torch.stack(src_masks) # (batch, max_src_len)
refs = pad_sequence(refs, batch_first=True, padding_value=-20)
refs = refs.transpose(1, 2) # (batch, mel_dim, max_ref_len)
ref_masks = [torch.arange(refs.size(2)) >= ref_len for ref_len in ref_lens]
ref_masks = torch.stack(ref_masks) # (batch, max_ref_len)
tgts = pad_sequence(tgts, batch_first=True, padding_value=-20)
tgts = tgts.transpose(1, 2) # (batch, mel_dim, max_tgt_len)
tgt_masks = [torch.arange(tgts.size(2)) >= tgt_len for tgt_len in tgt_lens]
tgt_masks = torch.stack(tgt_masks) # (batch, max_tgt_len)
return srcs, src_masks, refs, ref_masks, tgts, tgt_masks, overlap_lens
| 4,148 | 31.928571 | 84 | py |
FragmentVC | FragmentVC-main/data/utils.py | """Utilities for data manipulation."""
from typing import Union
from pathlib import Path
import librosa
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from scipy.signal import lfilter
matplotlib.use("Agg")
def load_wav(
audio_path: Union[str, Path], sample_rate: int, trim: bool = False
) -> np.ndarray:
"""Load and preprocess waveform."""
wav = librosa.load(audio_path, sr=sample_rate)[0]
wav = wav / (np.abs(wav).max() + 1e-6)
if trim:
_, (start_frame, end_frame) = librosa.effects.trim(
wav, top_db=25, frame_length=512, hop_length=128
)
start_frame = max(0, start_frame - 0.1 * sample_rate)
end_frame = min(len(wav), end_frame + 0.1 * sample_rate)
start = int(start_frame)
end = int(end_frame)
if end - start > 1000: # prevent empty slice
wav = wav[start:end]
return wav
def log_mel_spectrogram(
x: np.ndarray,
preemph: float,
sample_rate: int,
n_mels: int,
n_fft: int,
hop_length: int,
win_length: int,
f_min: int,
) -> np.ndarray:
"""Create a log Mel spectrogram from a raw audio signal."""
x = lfilter([1, -preemph], [1], x)
magnitude = np.abs(
librosa.stft(x, n_fft=n_fft, hop_length=hop_length, win_length=win_length)
)
mel_fb = librosa.filters.mel(sample_rate, n_fft, n_mels=n_mels, fmin=f_min)
mel_spec = np.dot(mel_fb, magnitude)
log_mel_spec = np.log(mel_spec + 1e-9)
return log_mel_spec.T
def plot_mel(gt_mel, predicted_mel=None, filename="mel.png"):
if predicted_mel is not None:
fig, axes = plt.subplots(2, 1, squeeze=False, figsize=(10, 10))
else:
fig, axes = plt.subplots(1, 1, squeeze=False, figsize=(10, 10))
axes[0][0].imshow(gt_mel.detach().cpu().numpy().T, origin="lower")
axes[0][0].set_aspect(1, adjustable="box")
axes[0][0].set_ylim(1.0, 80)
axes[0][0].set_title("ground-truth mel-spectrogram", fontsize="medium")
axes[0][0].tick_params(labelsize="x-small", left=False, labelleft=False)
if predicted_mel is not None:
axes[1][0].imshow(predicted_mel.detach().cpu().numpy(), origin="lower")
axes[1][0].set_aspect(1.0, adjustable="box")
axes[1][0].set_ylim(0, 80)
axes[1][0].set_title("predicted mel-spectrogram", fontsize="medium")
axes[1][0].tick_params(labelsize="x-small", left=False, labelleft=False)
plt.tight_layout()
plt.savefig(filename)
plt.close()
def plot_attn(attn, filename="attn.png"):
fig, axes = plt.subplots(len(attn), 1, squeeze=False, figsize=(10, 10))
for i, layer_attn in enumerate(attn):
axes[i][0].imshow(attn[i][0].detach().cpu().numpy(), origin="lower")
axes[i][0].set_title("layer {}".format(i), fontsize="medium")
axes[i][0].tick_params(labelsize="x-small")
axes[i][0].set_xlabel("target")
axes[i][0].set_ylabel("source")
plt.tight_layout()
plt.savefig(filename)
plt.close()
| 3,008 | 31.010638 | 82 | py |
FragmentVC | FragmentVC-main/data/preprocess_dataset.py | """Precompute Wav2Vec features and spectrograms."""
from copy import deepcopy
from pathlib import Path
import torch
from librosa.util import find_files
import sox
from .utils import load_wav, log_mel_spectrogram
class PreprocessDataset(torch.utils.data.Dataset):
"""Prefetch audio data for preprocessing."""
def __init__(
self,
data_dirs,
trim_method,
sample_rate,
preemph,
hop_len,
win_len,
n_fft,
n_mels,
f_min,
):
data = []
for data_dir in data_dirs:
data_dir_path = Path(data_dir)
speaker_dirs = [x for x in data_dir_path.iterdir() if x.is_dir()]
for speaker_dir in speaker_dirs:
audio_paths = find_files(speaker_dir)
if len(audio_paths) == 0:
continue
speaker_name = speaker_dir.name
for audio_path in audio_paths:
data.append((speaker_name, audio_path))
self.trim_method = trim_method
self.sample_rate = sample_rate
self.preemph = preemph
self.hop_len = hop_len
self.win_len = win_len
self.n_fft = n_fft
self.n_mels = n_mels
self.f_min = f_min
self.data = data
if trim_method == "vad":
tfm = sox.Transformer()
tfm.vad(location=1)
tfm.vad(location=-1)
self.sox_transform = tfm
def __len__(self):
return len(self.data)
def __getitem__(self, index):
speaker_name, audio_path = self.data[index]
if self.trim_method == "librosa":
wav = load_wav(audio_path, self.sample_rate, trim=True)
elif self.trim_method == "vad":
wav = load_wav(audio_path, self.sample_rate)
trim_wav = self.sox_transform.build_array(
input_array=wav, sample_rate_in=self.sample_rate
)
wav = deepcopy(trim_wav if len(trim_wav) > 10 else wav)
mel = log_mel_spectrogram(
wav,
self.preemph,
self.sample_rate,
self.n_mels,
self.n_fft,
self.hop_len,
self.win_len,
self.f_min,
)
return speaker_name, audio_path, torch.FloatTensor(wav), torch.FloatTensor(mel)
| 2,354 | 26.068966 | 87 | py |
FragmentVC | FragmentVC-main/data/__init__.py | from .preprocess_dataset import PreprocessDataset
from .intra_speaker_dataset import IntraSpeakerDataset, collate_batch
from .utils import *
| 141 | 34.5 | 69 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/utils.py | # coding=utf-8
# Copyleft 2019 Project LXRT
import sys
import csv
import base64
import time
import torch
import numpy as np
from tqdm import tqdm
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ["img_id", "img_h", "img_w", "objects_id", "objects_conf",
"attrs_id", "attrs_conf", "num_boxes", "boxes", "features"]
def load_obj_tsv(fname, topk=None):
"""Load object features from tsv file.
:param fname: The path to the tsv file.
:param topk: Only load features for top K images (lines) in the tsv file.
Will load all the features if topk is either -1 or None.
:return: A list of image object features where each feature is a dict.
See FILENAMES above for the keys in the feature dict.
"""
data = []
start_time = time.time()
print("Start to load Faster-RCNN detected objects from %s" % fname)
with open(fname) as f:
reader = csv.DictReader(f, FIELDNAMES, delimiter="\t")
for i, item in enumerate(tqdm(reader)):
for key in ['img_h', 'img_w', 'num_boxes']:
item[key] = int(item[key])
boxes = item['num_boxes']
decode_config = [
('objects_id', (boxes, ), np.int64),
('objects_conf', (boxes, ), np.float32),
('attrs_id', (boxes, ), np.int64),
('attrs_conf', (boxes, ), np.float32),
('boxes', (boxes, 4), np.float32),
('features', (boxes, -1), np.float32),
]
for key, shape, dtype in decode_config:
item[key] = np.frombuffer(base64.b64decode(item[key]), dtype=dtype)
item[key] = item[key].reshape(shape)
item[key].setflags(write=False)
data.append(item)
if topk is not None and len(data) == topk:
break
elapsed_time = time.time() - start_time
print("Loaded %d images in file %s in %d seconds." % (len(data), fname, elapsed_time))
return data
def load_obj_tsv_save_to_h5(fname, save_h5_name, save_json_name, all_examples):
import h5py
import json
data = []
start_time = time.time()
print("Start to load Faster-RCNN detected objects from %s" % fname)
metadata = []
import h5py
h5_file = h5py.File(save_h5_name, 'w')
h5_features = h5_file.create_dataset('features', (all_examples, 36, 2048), dtype=np.float32)
h5_boxes = h5_file.create_dataset('boxes', (all_examples, 36, 4), dtype=np.float32)
h5_objects_id = h5_file.create_dataset('objects_id', (all_examples,36), dtype=np.int64)
h5_objects_conf = h5_file.create_dataset('objects_conf', (all_examples,36), dtype=np.float32)
h5_attrs_id = h5_file.create_dataset('attrs_id', (all_examples,36), dtype=np.int64)
h5_attrs_conf = h5_file.create_dataset('attrs_conf', (all_examples,36), dtype=np.float32)
with open(fname) as f:
reader = csv.DictReader(f, FIELDNAMES, delimiter="\t")
for i, item in enumerate(tqdm(reader)):
for key in ['img_h', 'img_w', 'num_boxes']:
item[key] = int(item[key])
boxes = item['num_boxes']
decode_config = [
('objects_id', (boxes, ), np.int64),
('objects_conf', (boxes, ), np.float32),
('attrs_id', (boxes, ), np.int64),
('attrs_conf', (boxes, ), np.float32),
('boxes', (boxes, 4), np.float32),
('features', (boxes, -1), np.float32),
]
for key, shape, dtype in decode_config:
item[key] = np.frombuffer(base64.b64decode(item[key]), dtype=dtype)
item[key] = item[key].reshape(shape)
item[key].setflags(write=False)
metadata.append(
{
"img_id": item["img_id"],
"img_h": item["img_h"],
"img_w": item['img_w']
}
)
h5_features[i] = item["features"]
h5_boxes[i] = item["boxes"]
h5_objects_id[i] = item["objects_id"]
h5_objects_conf[i] = item["objects_conf"]
h5_attrs_id[i] = item["attrs_id"]
h5_attrs_conf[i] = item["attrs_conf"]
with open(save_json_name, "w") as f:
json.dump(metadata, f)
return data
def create_slim_h5(fname, save_h5_name, save_json_name, all_examples, img_ids_to_keep):
import h5py
import json
data = []
start_time = time.time()
print("Start to load Faster-RCNN detected objects from %s" % fname)
metadata = []
import h5py
h5_file = h5py.File(save_h5_name, 'w')
h5_features = h5_file.create_dataset('features', (all_examples, 36, 2048), dtype=np.float32)
h5_boxes = h5_file.create_dataset('boxes', (all_examples, 36, 4), dtype=np.float32)
h5_objects_id = h5_file.create_dataset('objects_id', (all_examples,36), dtype=np.int64)
h5_objects_conf = h5_file.create_dataset('objects_conf', (all_examples,36), dtype=np.float32)
h5_attrs_id = h5_file.create_dataset('attrs_id', (all_examples,36), dtype=np.int64)
h5_attrs_conf = h5_file.create_dataset('attrs_conf', (all_examples,36), dtype=np.float32)
i = 0
with open(fname) as f:
reader = csv.DictReader(f, FIELDNAMES, delimiter="\t")
for index, item in enumerate(tqdm(reader)):
#continue
if item["img_id"] not in img_ids_to_keep:
continue
for key in ['img_h', 'img_w', 'num_boxes']:
item[key] = int(item[key])
boxes = item['num_boxes']
decode_config = [
('objects_id', (boxes, ), np.int64),
('objects_conf', (boxes, ), np.float32),
('attrs_id', (boxes, ), np.int64),
('attrs_conf', (boxes, ), np.float32),
('boxes', (boxes, 4), np.float32),
('features', (boxes, -1), np.float32),
]
for key, shape, dtype in decode_config:
item[key] = np.frombuffer(base64.b64decode(item[key]), dtype=dtype)
item[key] = item[key].reshape(shape)
item[key].setflags(write=False)
metadata.append(
{
"img_id": item["img_id"],
"img_h": item["img_h"],
"img_w": item['img_w']
}
)
h5_features[i] = item["features"]
h5_boxes[i] = item["boxes"]
h5_objects_id[i] = item["objects_id"]
h5_objects_conf[i] = item["objects_conf"]
h5_attrs_id[i] = item["attrs_id"]
h5_attrs_conf[i] = item["attrs_conf"]
i += 1
with open(save_json_name, "w") as f:
json.dump(metadata, f)
return data
def load_lxmert_sgg(path, model):
print("Load rel pre-trained LXMERT from %s " % path)
loaded_state_dict = torch.load(path)["model"]
model_state_dict = model.state_dict()
'''print("loaded_state_dict", loaded_state_dict["model"].keys())
print("\n\n\n\n\n")
print("model_state_dict", model_state_dict.keys())
assert(0)'''
new_loaded_state_dict = {}
for key in list(loaded_state_dict.keys()):
if "lxrt" in key:
new_loaded_state_dict[key.split("lxrt.")[-1]] = loaded_state_dict[key]
# module.rel_heads.rel_predictor.lxrt.encoder.r_layers.3.output.LayerNorm.weight -> encoder.r_layers.3.output.LayerNorm.weight
load_state_dict_flexible(model.lxrt_encoder.model.bert, new_loaded_state_dict)
def load_lxmert_sgg_pretrain(path, model):
print("Load rel pre-trained LXMERT from %s " % path)
loaded_state_dict = torch.load(path)["model"]
model_state_dict = model.state_dict()
'''print("loaded_state_dict", loaded_state_dict.keys())
print("\n\n\n\n\n")
print("model_state_dict", model_state_dict.keys())
assert(0)'''
new_loaded_state_dict = {}
for key in list(loaded_state_dict.keys()):
if "lxrt" in key:
new_loaded_state_dict[key.split("lxrt.")[-1]] = loaded_state_dict[key]
# module.rel_heads.rel_predictor.lxrt.encoder.r_layers.3.output.LayerNorm.weight -> encoder.r_layers.3.output.LayerNorm.weight
load_state_dict_flexible(model.bert, new_loaded_state_dict)
def load_lxmert_to_sgg(path, model):
print("Load rel pre-trained LXMERT from %s " % path)
loaded_state_dict = torch.load(path)["model"]
model_state_dict = model.state_dict()
'''print("loaded_state_dict", loaded_state_dict.keys())
print("\n\n\n\n\n")
print("model_state_dict", model_state_dict.keys())
assert(0)'''
new_loaded_state_dict = {}
for key in list(loaded_state_dict.keys()):
if "lxrt" in key:
new_loaded_state_dict[key.split("lxrt.")[-1]] = loaded_state_dict[key]
# module.rel_heads.rel_predictor.lxrt.encoder.r_layers.3.output.LayerNorm.weight -> encoder.r_layers.3.output.LayerNorm.weight
load_state_dict_flexible(model.bert, new_loaded_state_dict)
def load_state_dict_flexible(model, state_dict):
try:
model.load_state_dict(state_dict)
except:
print("Full loading failed!! Try partial loading!!")
own_state = model.state_dict()
for name, param in state_dict.items():
if name not in own_state:
print("Skipped: " + name)
continue
if isinstance(param, torch.nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
own_state[name].copy_(param)
print("Successfully loaded: "+name)
except:
print("Part load failed: " + name) | 9,752 | 38.646341 | 138 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/param.py | # coding=utf-8
# Copyleft 2019 project LXRT.
import argparse
import random
import numpy as np
import torch
import logging
logging.basicConfig(level=logging.INFO)
def get_optimizer(optim):
# Bind the optimizer
if optim == 'rms':
print("Optimizer: Using RMSProp")
optimizer = torch.optim.RMSprop
elif optim == 'adam':
print("Optimizer: Using Adam")
optimizer = torch.optim.Adam
elif optim == 'adamax':
print("Optimizer: Using Adamax")
optimizer = torch.optim.Adamax
elif optim == 'sgd':
print("Optimizer: sgd")
optimizer = torch.optim.SGD
elif 'bert' in optim:
optimizer = 'bert' # The bert optimizer will be bind later.
else:
assert False, "Please add your optimizer %s in the list." % optim
return optimizer
def parse_args():
parser = argparse.ArgumentParser()
# Data Splits
parser.add_argument("--train", default='train')
parser.add_argument("--valid", default='valid')
parser.add_argument("--test", default=None)
# Training Hyper-parameters
parser.add_argument('--batchSize', dest='batch_size', type=int, default=256)
parser.add_argument('--optim', default='bert')
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--dropout', type=float, default=0.1)
parser.add_argument('--seed', type=int, default=9595, help='random seed')
# Debugging
parser.add_argument('--output', type=str, default='snap/test')
parser.add_argument("--fast", action='store_const', default=False, const=True)
parser.add_argument("--tiny", action='store_const', default=False, const=True)
parser.add_argument("--tqdm", action='store_const', default=False, const=True)
# Model Loading
parser.add_argument('--load', type=str, default=None,
help='Load the model (usually the fine-tuned model).')
parser.add_argument('--loadLXMERT', dest='load_lxmert', type=str, default=None,
help='Load the pre-trained LXMERT model.')
parser.add_argument('--loadLXMERTQA', dest='load_lxmert_qa', type=str, default=None,
help='Load the pre-trained LXMERT model with QA answer head.')
parser.add_argument("--fromScratch", dest='from_scratch', action='store_const', default=False, const=True,
help='If none of the --load, --loadLXMERT, --loadLXMERTQA is set, '
'the model would be trained from scratch. If --fromScratch is'
' not specified, the model would load BERT-pre-trained weights by'
' default. ')
# Optimization
parser.add_argument("--mceLoss", dest='mce_loss', action='store_const', default=False, const=True)
# LXRT Model Config
# Note: LXRT = L, X, R (three encoders), Transformer
parser.add_argument("--llayers", default=9, type=int, help='Number of Language layers')
parser.add_argument("--xlayers", default=5, type=int, help='Number of CROSS-modality layers.')
parser.add_argument("--rlayers", default=5, type=int, help='Number of object Relationship layers.')
# LXMERT Pre-training Config
parser.add_argument("--taskMatched", dest='task_matched', action='store_const', default=False, const=True)
parser.add_argument("--taskMaskLM", dest='task_mask_lm', action='store_const', default=False, const=True)
parser.add_argument("--taskObjPredict", dest='task_obj_predict', action='store_const', default=False, const=True)
parser.add_argument("--taskQA", dest='task_qa', action='store_const', default=False, const=True)
parser.add_argument("--visualLosses", dest='visual_losses', default='obj,attr,feat', type=str)
parser.add_argument("--qaSets", dest='qa_sets', default=None, type=str)
parser.add_argument("--wordMaskRate", dest='word_mask_rate', default=0.15, type=float)
parser.add_argument("--objMaskRate", dest='obj_mask_rate', default=0.15, type=float)
# Training configuration
parser.add_argument("--multiGPU", action='store_const', default=False, const=True)
parser.add_argument("--numWorkers", dest='num_workers', default=0)
parser.add_argument("--config", dest='config', default=None, type=str)
parser.add_argument("--save_folder", dest='save_folder', default="test", type=str)
# Invalid parameters just designed to accomodate sgg code
parser.add_argument("--config-file", dest="config-file", default=None, type=str)
parser.add_argument("--algorithm", dest="algorithm", default=None, type=str)
parser.add_argument("--save_path", dest="save_path", default=None, type=str)
# Parse the arguments.
args = parser.parse_args()
# Bind optimizer class.
args.optimizer = get_optimizer(args.optim)
# Set seeds
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
# Added by harold. Allows additional parameters specified by the json file.
import commentjson
from attrdict import AttrDict
from pprint import pprint
if args.config is not None:
with open(args.config) as f:
config_json = commentjson.load(f)
dict_args = vars(args)
dict_args.update(config_json) # Update with overwrite
args = AttrDict(dict_args)
import shutil
import os
output = args.output
if not os.path.exists(output):
os.mkdir(output)
shutil.copyfile(args.config, os.path.join(output, os.path.basename(args.config)))
# Set up logs
import sys
run_log_counter = 0
while(os.path.exists(args.output + '/run_{}.log'.format(run_log_counter))):
run_log_counter += 1
file_log = open(args.output + '/run_{}.log'.format(run_log_counter),'w') # File where you need to keep the logs
file_log.write("")
class Unbuffered:
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
file_log.write(data) # Write the data of stdout here to a text file as well
def flush(self):
pass
sys.stdout = Unbuffered(sys.stdout)
from pprint import pprint
pprint(args)
print("\n\n\n\n")
with open(args.config) as f:
print(f.read())
return args
args = parse_args()
| 6,424 | 38.906832 | 117 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/tools/create_open_image_data_lxmert_style.py | #{'img_id': 'COCO_train2014_000000318556', 'labelf': {'vqa': [{'no': 1}, {'yes': 1}, {'no': 1}, {'blue': 1, 'blue and white': 0.3}]}, 'sentf': {'mscoco': ['A very clean and well decorated empty bathroom', 'A blue and white bathroom with butterfly themed wall tiles.', 'A bathroom with a border of butterflies and blue paint on the walls above it.', 'An angled view of a beautifully decorated bathroom.', 'A clock that blends in with the wall hangs in a bathroom. '], 'vqa': ['Is the sink full of water?', 'Are there any butterflies on the tiles?', 'Is this bathroom in a hotel?', 'What color are the walls?']}}
import json
import os
target = "/local/harold/ubert/lxmert/data/open_image/butd_feat/"
all_image_files = []
for root, dirs, files in os.walk(target, topdown=False):
for txt_file in files:
if txt_file.endswith("txt"):
with open(os.path.join(target, txt_file)) as f:
lines = f.read().split("\n")
for line in lines:
if len(line) != 0:
all_image_files.append(line)
train_file_name = "/local/harold/ubert/lxmert/data/lxmert/open_images_train.json" #.format("open_images_train" if "train" in target else "open_images_valid")
train_data = []
for i in range(len(all_image_files)):
#caption, url = lines[i].strip('\n').split("\t", 1)
one_datatum = {}
one_datatum["img_id"] = all_image_files[i] #"{}/{}.jpg".format(target, i)
one_datatum["labelf"] = {}
one_datatum["sentf"] = {}
one_datatum["sentf"]["open_image"] = []
one_datatum["sentf"]["open_image"].append("")
train_data.append(one_datatum)
with open(train_file_name, 'w') as f:
json.dump(train_data, f) | 1,689 | 48.705882 | 610 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/tools/sharearray.py | # Copyright 2017 Brendan Shillingford
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
import os.path
import re
import time
import sys
import numpy as np
__all__ = ['cache', 'decorator', 'valid_id', 'TimeoutException']
if sys.version_info[0] == 2:
FileExistsError = OSError
class TimeoutException(Exception):
pass
_ID_REGEX = re.compile(r"^[A-Za-z0-9=+._-]+$")
def valid_id(id):
if _ID_REGEX.match(id):
return True
return False
def _memmapped_view(filename):
return np.lib.format.open_memmap(filename, mode='r')
def _build_path(id, prefix, shm_path):
fn = os.path.join(shm_path, prefix + id + '.npy')
fn_lock = fn + '.lock'
return fn, fn_lock
def free(id, shm_path='/dev/shm', prefix='sharearray_'):
fn, fn_lock = _build_path(id, prefix=prefix, shm_path=shm_path)
fn_exists = os.path.exists(fn)
fn_lock_exists = os.path.exists(fn_lock)
if fn_lock_exists:
import warnings
warnings.warn("lock still exists")
os.unlink(fn_lock)
if fn_exists:
os.unlink(fn)
def cache(id, array_or_callback,
shm_path='/dev/shm',
prefix='sharearray_',
timeout=-1,
verbose=True,
log_func=None):
"""
Stores a `numpy` `ndarray` into shared memory, caching subsequent requests
(globally, across all processes) to the function so they point to the same
memory.
By default, does this be creating a file at `/dev/shm/shareddataset_<id>`.
If:
1. The file is not created yet, saves `array_or_callback` to the path
listed above (see NOTE 1). Then, returns a read-only memmapped view to
this numpy array.
2. The file is already created. We return a read-only memmapped view of it.
Args:
id (str): identifier for shared array, global across system.
Must match `[A-Za-z0-9=+._-]+`. You may want to include your
program's name to prevent name collisions.
array_or_callback: either a `numpy.ndarray` containing value types, or
a callback function taking no arguments that returns one.
shm_path (str, optional): path to the Linux shared memory
tmpfs mountpoint. In almost all kernel builds, one lives at
`/dev/shm` with size defaulting to half the RAM; sometimes it's a
symlink to `/run/shm`.
timeout (int, optional): number of seconds to wait before timing out
waiting for lock to be released, when file is already being created.
If -1, waits indefinitely.
prefix (str, optional): prefix added to files in `shm_path`.
verbose (bool): if True, prints useful information (2-3 lines).
log_func (callable): if verbose is True, this is used if specified.
Else just uses `print`.
Returns:
A `numpy.ndarray` read-only view into the shared memory, whether it
was newly created or previously created.
Raises:
ValueError: `id` is not a valid identifier (must match
`[A-Za-z0-9=+._-]+`), or `array_or_callback` is not a callback
or returns
TimeoutException: if `timeout` is positive, the lock file exists, and
we have waited at least `timeout` seconds yet the lock still exists.
Notes:
NOTE 1: For concurrency safety, this function creates a lock file at
`/dev/shm/shareddataset_<id>.lock` when initially writing the file
(lock file is empty, doesn't contain PID). File creating is hence
checked using the lock, rather than the file's existence itself. We
don't use the standard create-rename method of ensuring atomicity,
since `array_or_callback` may be expensive to call or large to write.
NOTE 2: `id`s are currently global to the system. Include your program's
name to prevent name collisions.
NOTE 3: memmapped views are created using
`numpy.lib.format.open_memmap`.
NOTE 4: If the array is very large, to save memory, you may want to
immediately remove all references to the original array, then do a full
garbage collection (`import gc; gc.collect()`).
Examples:
An expensive operation (e.g. data preprocessing) that is the same
across all running instances of this program:
x_times_y = cache("myprog_x_times_y", lambda: np.dot(x, y))
A large (large enough to warrant concern, but small enough to fit in
RAM once) training set that we only want one instance of across
many training jobs:
def load_training_set():
# load and/or preprocess training_set once here
return training_set
training_set = cache("myprog_training_set", load_training_set)
Only passing a callback to array_or_callback makes sense here,
of course.
"""
if not valid_id(id):
raise ValueError('invalid id: ' + id)
'''if not (hasattr(array_or_callback, '__call__')
or isinstance(array_or_callback, np.ndarray)):
raise ValueError(
'array_or_callback should be ndarray or zero-argument callable')'''
if verbose and log_func:
print_ = log_func
elif verbose:
def print_(s):
print(s)
else:
def print_(s):
pass
fn, fn_lock = _build_path(id, prefix=prefix, shm_path=shm_path)
fd_lock = -1
try:
fd_lock = os.open(fn_lock, os.O_CREAT | os.O_EXCL)
if fd_lock < 0:
raise OSError("Lock open failure (bug?)", fn_lock, fd_lock)
except FileExistsError:
if timeout < 0:
#print_(("'{}' is being created by another process. "
# "Waiting indefinitely... (timeout < 0)").format(id))
while os.path.exists(fn_lock):
time.sleep(1)
else:
#print_(("'{}' is being created by another process. "
# "Waiting up to {} seconds...").format(id, timeout))
for _ in range(timeout):
time.sleep(1)
if not os.path.exists(fn_lock):
break
else:
raise TimeoutException(
"timed out waiting for %s to unlock (be created)" % id)
else:
if not os.path.exists(fn):
print_("'%s' doesn't exist yet. Locking and creating..." % id)
if hasattr(array_or_callback, '__call__'):
array = array_or_callback()
else:
array = array_or_callback
'''if isinstance(array_or_callback, np.ndarray):
array = array_or_callback
else:
array = array_or_callback()
if not isinstance(array, np.ndarray):
raise ValueError(
'callback did not return a numpy.ndarray, returned:',
type(array))'''
np.save(fn, array, allow_pickle=False)
print_("'%s': written." % id)
finally:
if fd_lock > 0:
os.close(fd_lock)
os.unlink(fn_lock)
print_("'%s': returning memmapped view." % id)
return _memmapped_view(fn)
def cache_with_delete_previous(id, array_or_callback,
shm_path='/dev/shm',
prefix='sharearray_',
timeout=-1,
verbose=True,
log_func=None,
delete=None,
wait=0):
"""
Stores a `numpy` `ndarray` into shared memory, caching subsequent requests
(globally, across all processes) to the function so they point to the same
memory.
By default, does this be creating a file at `/dev/shm/shareddataset_<id>`.
If:
1. The file is not created yet, saves `array_or_callback` to the path
listed above (see NOTE 1). Then, returns a read-only memmapped view to
this numpy array.
2. The file is already created. We return a read-only memmapped view of it.
Args:
id (str): identifier for shared array, global across system.
Must match `[A-Za-z0-9=+._-]+`. You may want to include your
program's name to prevent name collisions.
array_or_callback: either a `numpy.ndarray` containing value types, or
a callback function taking no arguments that returns one.
shm_path (str, optional): path to the Linux shared memory
tmpfs mountpoint. In almost all kernel builds, one lives at
`/dev/shm` with size defaulting to half the RAM; sometimes it's a
symlink to `/run/shm`.
timeout (int, optional): number of seconds to wait before timing out
waiting for lock to be released, when file is already being created.
If -1, waits indefinitely.
prefix (str, optional): prefix added to files in `shm_path`.
verbose (bool): if True, prints useful information (2-3 lines).
log_func (callable): if verbose is True, this is used if specified.
Else just uses `print`.
Returns:
A `numpy.ndarray` read-only view into the shared memory, whether it
was newly created or previously created.
Raises:
ValueError: `id` is not a valid identifier (must match
`[A-Za-z0-9=+._-]+`), or `array_or_callback` is not a callback
or returns
TimeoutException: if `timeout` is positive, the lock file exists, and
we have waited at least `timeout` seconds yet the lock still exists.
Notes:
NOTE 1: For concurrency safety, this function creates a lock file at
`/dev/shm/shareddataset_<id>.lock` when initially writing the file
(lock file is empty, doesn't contain PID). File creating is hence
checked using the lock, rather than the file's existence itself. We
don't use the standard create-rename method of ensuring atomicity,
since `array_or_callback` may be expensive to call or large to write.
NOTE 2: `id`s are currently global to the system. Include your program's
name to prevent name collisions.
NOTE 3: memmapped views are created using
`numpy.lib.format.open_memmap`.
NOTE 4: If the array is very large, to save memory, you may want to
immediately remove all references to the original array, then do a full
garbage collection (`import gc; gc.collect()`).
Examples:
An expensive operation (e.g. data preprocessing) that is the same
across all running instances of this program:
x_times_y = cache("myprog_x_times_y", lambda: np.dot(x, y))
A large (large enough to warrant concern, but small enough to fit in
RAM once) training set that we only want one instance of across
many training jobs:
def load_training_set():
# load and/or preprocess training_set once here
return training_set
training_set = cache("myprog_training_set", load_training_set)
Only passing a callback to array_or_callback makes sense here,
of course.
"""
if not valid_id(id):
raise ValueError('invalid id: ' + id)
'''if not (hasattr(array_or_callback, '__call__')
or isinstance(array_or_callback, np.ndarray)):
raise ValueError(
'array_or_callback should be ndarray or zero-argument callable')'''
if verbose and log_func:
print_ = log_func
elif verbose:
def print_(s):
print(s)
else:
def print_(s):
pass
fn, fn_lock = _build_path(id, prefix=prefix, shm_path=shm_path)
fd_lock = -1
try:
fd_lock = os.open(fn_lock, os.O_CREAT | os.O_EXCL)
if fd_lock < 0:
raise OSError("Lock open failure (bug?)", fn_lock, fd_lock)
except FileExistsError:
if timeout < 0:
#print_(("'{}' is being created by another process. "
# "Waiting indefinitely... (timeout < 0)").format(id))
while os.path.exists(fn_lock):
time.sleep(1)
else:
#print_(("'{}' is being created by another process. "
# "Waiting up to {} seconds...").format(id, timeout))
for _ in range(timeout):
time.sleep(1)
if not os.path.exists(fn_lock):
break
else:
raise TimeoutException(
"timed out waiting for %s to unlock (be created)" % id)
else:
if not os.path.exists(fn):
print("Sleep a bit for other workers to finish whatever they are doing")
for _ in range(wait):
time.sleep(1)
if delete is not None:
print("Deleting {} on the way there".format(delete))
for i in delete:
if os.path.exists(i):
os.remove(i)
print_("'%s' doesn't exist yet. Locking and creating..." % id)
if hasattr(array_or_callback, '__call__'):
array = array_or_callback()
else:
array = array_or_callback
'''if isinstance(array_or_callback, np.ndarray):
array = array_or_callback
else:
array = array_or_callback()
if not isinstance(array, np.ndarray):
raise ValueError(
'callback did not return a numpy.ndarray, returned:',
type(array))'''
np.save(fn, array, allow_pickle=False)
print_("'%s': written." % id)
finally:
if fd_lock > 0:
os.close(fd_lock)
os.unlink(fn_lock)
#print_("'%s': returning memmapped view." % id)
return _memmapped_view(fn)
def decorator(id, **kwargs):
"""
Decorator version of `cache`, analogous to a memoization decorator.
Besides `array_or_callback` which isn't needed, arguments are identical to
those of `cache`, see there for docs. They must be passed as keyword args
except for `id`.
Note that `id` can't depend on the arguments to the decorated function.
For that, use `cache` directly.
Example:
Alternative to callback syntax above.
@decorator("my_large_array")
def foo():
# ...do some expensive computation to generate arr...
return arr
arr = foo() # first call, in shared memory arr global to system
arr2 = foo() # here or another script, returns read-only view
"""
if not valid_id(id):
raise ValueError('invalid id: ' + id)
def decorate(f):
@functools.wraps(f)
def wrapped():
return cache(id, f, **kwargs)
return wrapped
return decorate | 15,374 | 35.007026 | 84 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/tools/create_cc_data_lxmert_style.py | #{'img_id': 'COCO_train2014_000000318556', 'labelf': {'vqa': [{'no': 1}, {'yes': 1}, {'no': 1}, {'blue': 1, 'blue and white': 0.3}]}, 'sentf': {'mscoco': ['A very clean and well decorated empty bathroom', 'A blue and white bathroom with butterfly themed wall tiles.', 'A bathroom with a border of butterflies and blue paint on the walls above it.', 'An angled view of a beautifully decorated bathroom.', 'A clock that blends in with the wall hangs in a bathroom. '], 'vqa': ['Is the sink full of water?', 'Are there any butterflies on the tiles?', 'Is this bathroom in a hotel?', 'What color are the walls?']}}
import json
target = "/local/harold/vqa/google_concetual/train"
with open("%s.tsv" % target, 'r') as f:
lines = f.readlines()
train_file_name = "/local/harold/ubert/lxmert/data/lxmert/{}.json".format("google_cc_train" if "train" in target else "google_cc_valid")
train_data = []
for i in range(len(lines)):
caption, url = lines[i].strip('\n').split("\t", 1)
one_datatum = {}
one_datatum["img_id"] = "{}/{}.jpg".format(target, i)
one_datatum["labelf"] = {}
one_datatum["sentf"] = {}
one_datatum["sentf"]["google_cc"] = []
one_datatum["sentf"]["google_cc"].append(caption)
train_data.append(one_datatum)
with open(train_file_name, 'w') as f:
json.dump(train_data, f) | 1,319 | 54 | 610 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/tools/convert_nlvr2_lxmert_style.py | #{'img_id': 'COCO_train2014_000000318556', 'labelf': {'vqa': [{'no': 1}, {'yes': 1}, {'no': 1}, {'blue': 1, 'blue and white': 0.3}]}, 'sentf': {'mscoco': ['A very clean and well decorated empty bathroom', 'A blue and white bathroom with butterfly themed wall tiles.', 'A bathroom with a border of butterflies and blue paint on the walls above it.', 'An angled view of a beautifully decorated bathroom.', 'A clock that blends in with the wall hangs in a bathroom. '], 'vqa': ['Is the sink full of water?', 'Are there any butterflies on the tiles?', 'Is this bathroom in a hotel?', 'What color are the walls?']}}
import json
split = "valid"
target = "/local/harold/ubert/lxmert/data/lxmert/nlvr_for_pretrain_{}.json".format(split)
train_file_name = "/local/harold/ubert/lxmert/data/nlvr2/{}.json".format(split)
train_data = []
with open(train_file_name) as f:
data = json.load(f)
'''
{'identifier': 'train-10171-0-0', 'img0': 'train-10171-0-img0', 'img1': 'train-10171-0-img1', 'label': 0, 'sent': 'An image shows one leather pencil case, displayed open with writing implements tucked inside.', 'uid': 'nlvr2_train_0'}
'''
for one_data in data:
one_datatum = {}
one_datatum["img_id"] = one_data["img0"]
one_datatum["img_id_1"] = one_data["img1"]
one_datatum["uid"] = one_data["uid"]
one_datatum["identifier"] = one_data["identifier"]
one_datatum["label"] = one_data["label"]
one_datatum["labelf"] = {}
one_datatum["sentf"] = {}
one_datatum["sentf"]["nlvr"] = []
one_datatum["sentf"]["nlvr"].append(one_data["sent"])
train_data.append(one_datatum)
with open(target, 'w') as f:
json.dump(train_data, f) | 1,668 | 45.361111 | 610 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/tools/convert_tsv_to_h5.py | import sys
import csv
import base64
import time
import torch
import numpy as np
from src.utils import load_obj_tsv_save_to_h5
load_obj_tsv_save_to_h5(
"data/mscoco_imgfeat/train2014_obj36.tsv",
"data/mscoco_imgfeat/train2014_obj36.h5",
"data/mscoco_imgfeat/train2014_obj36.json",
82783
)
load_obj_tsv_save_to_h5(
"data/vg_gqa_imgfeat/vg_gqa_obj36.tsv",
"data/vg_gqa_imgfeat/vg_gqa_obj36.h5",
"data/vg_gqa_imgfeat/vg_gqa_obj36.json",
148854
)
load_obj_tsv_save_to_h5(
"data/mscoco_imgfeat/val2014_obj36.tsv",
"data/mscoco_imgfeat/val2014_obj36.h5",
"data/mscoco_imgfeat/val2014_obj36.json",
40504
)
'''
load_obj_tsv_save_to_h5(
"data/nlvr2_imgfeat/train_obj36.tsv",
"data/nlvr2_imgfeat/train_obj36.h5",
"data/nlvr2_imgfeat/train_obj36.json",
103170
)
load_obj_tsv_save_to_h5(
"data/nlvr2_imgfeat/valid_obj36.tsv",
"data/nlvr2_imgfeat/valid_obj36.h5",
"data/nlvr2_imgfeat/valid_obj36.json",
8102
)'''
'''
load_obj_tsv_save_to_h5(
"data/nlvr2_imgfeat/test_obj36.tsv",
"data/nlvr2_imgfeat/test_obj36.h5",
"data/nlvr2_imgfeat/test_obj36.json",
8082
)'''
| 1,165 | 20.592593 | 47 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/pretrain/box.py | import torch
import numpy
import numpy as np
def heuristic_filter(box_a, box_b, image_size, threshhold = 0.15):
# center_mass
box_a_x_center = (box_a[0] + box_a[2]) / 2
box_b_x_center = (box_b[0] + box_b[2]) / 2
box_a_y_center = (box_a[1] + box_a[3]) / 2
box_b_y_center = (box_b[1] + box_b[3]) / 2
# X non overlap
if box_a[0] > box_b[2] or box_b[0] > box_a[2]:
if min(abs(box_a[0] - box_b[2]), abs(box_b[0] - box_a[2])) / image_size[0] > threshhold:
return False
if box_a[1] > box_b[3] or box_b[1] > box_a[3]:
if min(abs(box_a[1] - box_b[3]), abs(box_b[1] - box_a[3])) / image_size[1] > threshhold:
return False
'''print(abs(box_b_x_center - box_a_x_center) / image_size[0])
if abs(box_b_x_center - box_a_x_center) / image_size[0] > threshhold:
return False
if abs(box_b_y_center - box_a_y_center) / image_size[1] > threshhold:
return False'''
return True
def determine_box_position_type(box_a, box_b, image_size):
if box_a[0] > box_b[2] or box_b[0] > box_a[2]: # No overlap
# Then calculate their distance
if box_a[1] > box_b[3] or box_b[1] > box_a[3]: # y not overlap
return ( "x, y not overlap",
(min(abs(box_a[0] - box_b[2]), abs(box_b[0] - box_a[2])) / image_size[0]).item(),
(min(abs(box_a[0] - box_b[2]), abs(box_b[0] - box_a[2])) / min(abs(box_a[0] - box_a[2]), abs(box_b[0] - box_b[2]))).item(),
(min(abs(box_a[0] - box_a[2]), abs(box_b[0] - box_b[2])) / image_size[0]).item()
)
else:
overlap_length = min(abs(box_a[1] - box_b[3]), abs(box_b[1] - box_a[3]))
overlap_ratio = overlap_length / min(abs(box_a[1] - box_a[3]), abs(box_b[1] - box_b[3]))
return ("x not overlap, y overlap", min(overlap_ratio.item(), 1))
else:
# there is overlap, calculate how much they overlap
overlap_length = min(abs(box_a[0] - box_b[2]), abs(box_b[0] - box_a[2]))
overlap_ratio = overlap_length / min(abs(box_a[0] - box_a[2]), abs(box_b[0] - box_b[2]))
return min(overlap_ratio.item(), 1)
def add_to_the_left_to_the_right_relation(box_a, box_b, image_size, y_overlap_ratio_thresh, x_overlap_ratio_thresh):
if box_a[0] > box_b[2] or box_b[0] > box_a[2]: # No overlap
'''distance_ratio = min(abs(box_a[0] - box_b[2]), abs(box_b[0] - box_a[2])) / image_size[0]
if distance_ratio < no_overlap_thresh:
return (True, box_a[0] > box_b[2]) # a is to the right of b, if box_a[0] > box_b[2]
else:
return (False, box_a[0] > box_b[2]) '''
if box_a[1] > box_b[3] or box_b[1] > box_a[3]: # y not overlap
return (False, box_a[0] > box_b[2])
else:
overlap_length = min(abs(box_a[1] - box_b[3]), abs(box_b[1] - box_a[3]))
overlap_ratio = overlap_length / min(abs(box_a[1] - box_a[3]), abs(box_b[1] - box_b[3]))
if overlap_ratio > y_overlap_ratio_thresh:
return (True, box_a[0] > box_b[0])
else:
return (False, box_a[0] > box_b[0])
else:
# there is overlap, calculate how much they overlap
overlap_length = min(abs(box_a[0] - box_b[2]), abs(box_b[0] - box_a[2]))
overlap_ratio = overlap_length / min(abs(box_a[0] - box_a[2]), abs(box_b[0] - box_b[2]))
if overlap_ratio < x_overlap_ratio_thresh:
return (True, box_a[0] > box_b[0])
else:
return (False, box_a[0] > box_b[0])
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
# with slight modifications
def boxlist_iou(boxlist1, boxlist2):
"""Compute the intersection over union of two set of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Arguments:
box1: (BoxList) bounding boxes, sized [N,4].
box2: (BoxList) bounding boxes, sized [M,4].
Returns:
(tensor) iou, sized [N,M].
Reference:
https://github.com/chainer/chainercv/blob/master/chainercv/utils/bbox/bbox_iou.py
"""
if boxlist1.size != boxlist2.size:
raise RuntimeError(
"boxlists should have same image size, got {}, {}".format(boxlist1, boxlist2))
boxlist1 = boxlist1.convert("xyxy")
boxlist2 = boxlist2.convert("xyxy")
N = len(boxlist1)
M = len(boxlist2)
area1 = boxlist1.area()
area2 = boxlist2.area()
box1, box2 = boxlist1.bbox, boxlist2.bbox
lt = torch.max(box1[:, None, :2], box2[:, :2]) # [N,M,2]
rb = torch.min(box1[:, None, 2:], box2[:, 2:]) # [N,M,2]
TO_REMOVE = 1
wh = (rb - lt + TO_REMOVE).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
iou = inter / (area1[:, None] + area2 - inter)
return iou
###########################################################################
### Torch Utils, creds to Max de Groot
###########################################################################
def bbox_intersections(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
"""
if isinstance(box_a, np.ndarray):
assert isinstance(box_b, np.ndarray)
return bbox_intersections_np(box_a, box_b)
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1]
def bbox_overlaps(box_a, box_b):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes. Here we operate on
ground truth boxes and default boxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
Return:
jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
"""
if isinstance(box_a, np.ndarray):
assert isinstance(box_b, np.ndarray)
return bbox_overlaps_np(box_a, box_b)
inter = bbox_intersections(box_a, box_b)
area_a = ((box_a[:, 2] - box_a[:, 0]) *
(box_a[:, 3] - box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]
area_b = ((box_b[:, 2] - box_b[:, 0]) *
(box_b[:, 3] - box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]
union = area_a + area_b - inter
return inter / union # [A,B]
| 7,027 | 40.099415 | 135 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/pretrain/tag_data_utilis.py | import numpy as np
import torch.nn as nn
from param import args
from lxrt.entry import LXRTEncoder
from lxrt.modeling import BertLayerNorm, GeLU
from lxrt.tokenization import BertTokenizer
import torch
import numpy as np
from collections import defaultdict
import numpy
import random
'''
Given that tags will be extensively used now, writing some snippets for creating tags.
'''
def pad_np_arrays(list_of_np_array, padding_value, dtype):
if isinstance(list_of_np_array[0], list):
list_of_np_array = [np.array(i, dtype = dtype) for i in list_of_np_array]
padding_lengths = get_padding_lengths(list_of_np_array)
max_shape = [padding_lengths["dimension_{}".format(i)]
for i in range(len(padding_lengths))]
# Convert explicitly to an ndarray just in case it's an scalar (it'd end up not being an ndarray otherwise)
final_list = []
for array_index, array in enumerate(list_of_np_array):
return_array = numpy.asarray(numpy.ones(max_shape, dtype = dtype) * padding_value)
# If the tensor has a different shape from the largest tensor, pad dimensions with zeros to
# form the right shaped list of slices for insertion into the final tensor.
slicing_shape = list(array.shape)
#if len(array.shape) < len(max_shape):
# slicing_shape = slicing_shape + [0 for _ in range(len(max_shape) - len(array.shape))]
slices = tuple([slice(0, x) for x in slicing_shape])
return_array[slices] = array
final_list.append(return_array)
final_list = np.stack(final_list, 0)
tensor = torch.from_numpy(final_list)
return tensor
def transfer_object_labels_to_symbolic_ids(obj_labels, attribute_labels, symbolic_vocab, obj_confs = None, attr_confs = None):
return_list = []
for index in range(len(obj_labels)):
prob = random.random()
if prob < args.get("insert_attr_ratio", 0.0):
if args.get("kl_divergence", False):
if args.get("non_top1_sampling", False):
p = attr_confs[index][attribute_labels[index]]
p = p / p.sum()
attr_label_i = np.random.choice(attribute_labels[index], p=p)
#attr_label_i = np.random.choice(attr_confs.shape[-1], p=attr_confs[index])
else:
attr_label_i = attribute_labels[index, 0]
else:
attr_label_i = attribute_labels[index]
return_list.append(symbolic_vocab.word2id[symbolic_vocab.attr_id2word(attr_label_i)])
else:
if args.get("kl_divergence", False):
if args.get("non_top1_sampling", False):
new_obj_confs = deepcopy(obj_confs)
new_obj_confs[new_obj_confs<0.1] = 0
p = new_obj_confs[index][obj_labels[index]]
sum_p = p.sum()
if sum_p == 0:
obj_label_i = obj_labels[index, 0]
else:
p = p / sum_p
obj_label_i =np.random.choice(obj_labels[index], p=p)
#obj_label_i = np.random.choice(obj_confs.shape[-1], p=obj_confs[index])
else:
obj_label_i = obj_labels[index, 0]
else:
obj_label_i = obj_labels[index]
return_list.append(symbolic_vocab.word2id[symbolic_vocab.obj_id2word(obj_label_i)])
return np.array(return_list, dtype=np.int64)
def convert_semantic_objective(labels, symbolic_vocab, obj = False, attr = False, tokenizer=None):
if obj:
words = [symbolic_vocab.obj_id2word(i) for i in labels]
elif attr:
words = [symbolic_vocab.attr_id2word(i) for i in labels]
else:
assert(0)
words = [symbolic_vocab.id2objective[symbolic_vocab.word2id[i]] for i in words]
semantic_objective = np.array(words, dtype=np.int64) # object_num * 2
return semantic_objective
def create_tags_pretrain(obj_labels, attr_labels, obj_confs, attr_confs, tokenizer, symbolic_vocab, visual_tags_box, feat_mask, use_bert_input = True):
obj_labels_transformed = transfer_object_labels_to_symbolic_ids(obj_labels, attr_labels, symbolic_vocab, obj_confs, attr_confs)
visual_tags_bert_words = []
visual_tags_box_bert_input = []
visual_tags_mlm_labels = []
visual_tags_segment_ids = []
for tag_index, tag in enumerate(obj_labels_transformed):
tag_word = symbolic_vocab.id2word[tag]
if args.get("use_segment_id_for_attr", False):
seg_id = symbolic_vocab.get_seg_id(tag)
sub_tokens = tokenizer.tokenize(tag_word)
prob = random.random()
if prob < args.get('tag_mask_ratio', 0.15) or (feat_mask[tag_index] != 0 and random.random() < args.get("tag_joint_mask_ratio", 0.5)):
new_prob = random.random()
if new_prob < 0.8:
for sub_token in sub_tokens:
visual_tags_bert_words.append("[MASK]")
elif new_prob < 0.9:
for sub_token in sub_tokens:
visual_tags_bert_words.append(random.choice(list(tokenizer.vocab.keys())))
else:
visual_tags_bert_words.extend(sub_tokens)
for sub_token in sub_tokens:
try:
visual_tags_mlm_labels.append(tokenizer.vocab[sub_token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
visual_tags_mlm_labels.append(tokenizer.vocab["[UNK]"])
logging.warning("Cannot find sub_token '{}' in vocab. Using [UNK] insetad".format(sub_token))
else:
for sub_token in sub_tokens:
# no masking token (will be ignored by loss function later)
visual_tags_bert_words.append(sub_token)
visual_tags_mlm_labels.append(-1)
# duplicate box
for sub_token in sub_tokens:
visual_tags_box_bert_input.append(visual_tags_box[tag_index])
if args.get("use_segment_id_for_attr", False):
visual_tags_segment_ids.append(seg_id)
visual_tags = tokenizer.convert_tokens_to_ids(visual_tags_bert_words)
visual_tags_objective = visual_tags_mlm_labels
visual_tags_mask = [1] * len(visual_tags)
visual_tags_box = visual_tags_box_bert_input
visual_tags_segment_ids = None
return visual_tags, visual_tags_objective, visual_tags_mask, visual_tags_box, visual_tags_segment_ids
def create_tags(obj_labels, attr_labels, obj_confs, attr_confs, tokenizer, symbolic_vocab, visual_tags_box, use_bert_input = True, record_index = None):
obj_labels_transformed = transfer_object_labels_to_symbolic_ids(obj_labels, attr_labels, symbolic_vocab, obj_confs, attr_confs)
visual_tags_bert_words = []
visual_tags_box_bert_input = []
#visual_tags_mlm_labels = []
visual_tags_segment_ids = []
recorded_indexes = []
counter = 0
for tag_index, tag in enumerate(obj_labels_transformed):
tag_word = symbolic_vocab.id2word[tag]
if args.get("use_segment_id_for_attr", False):
seg_id = symbolic_vocab.get_seg_id(tag)
sub_tokens = tokenizer.tokenize(tag_word)
for sub_token in sub_tokens:
# no masking token (will be ignored by loss function later)
visual_tags_bert_words.append(sub_token)
#visual_tags_mlm_labels.append(-1)
if tag_index == record_index:
recorded_indexes.append(counter)
counter += 1
# duplicate box
for sub_token in sub_tokens:
visual_tags_box_bert_input.append(visual_tags_box[tag_index])
if args.get("use_segment_id_for_attr", False):
visual_tags_segment_ids.append(seg_id)
visual_tags = tokenizer.convert_tokens_to_ids(visual_tags_bert_words)
visual_tags_mask = [1] * len(visual_tags)
visual_tags_box = visual_tags_box_bert_input
visual_tags_segment_ids = None
visual_tags_type = None
if record_index is not None:
return visual_tags, visual_tags_mask, visual_tags_box, visual_tags_type, visual_tags_segment_ids, recorded_indexes
return visual_tags, visual_tags_mask, visual_tags_box, visual_tags_type, visual_tags_segment_ids | 8,378 | 44.291892 | 152 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/pretrain/text_data.py | import random
from torch.utils.data import Dataset
from lxrt.tokenization import BertTokenizer
import logging
from lxmert_data import InputExample
import json
from param import args
from lxmert_data import InputFeatures, random_word
import os
from src.tools import sharearray
import gc
from tqdm import tqdm
import numpy as np
class GeneralCorpusNP(Dataset):
def __init__(self, ann_file, pretrained_model_name, tokenizer=None, seq_len=64, min_seq_len=64,
encoding="utf-8", on_memory=True,
**kwargs):
assert on_memory, "only support on_memory mode!"
self.tokenizer = tokenizer if tokenizer is not None else BertTokenizer.from_pretrained(pretrained_model_name)
self.vocab = self.tokenizer.vocab
self.seq_len = seq_len
self.min_seq_len = min_seq_len
self.on_memory = on_memory
self.ann_file = ann_file
self.encoding = encoding
self.test_mode = False
self.do_no_fill = False
self.use_mismatch_objective = args.get("task_matched", False)
#self.load_corpus_with_passages()
# load samples into memory
if on_memory:
if self.use_mismatch_objective:
#self.corpus = self.load_corpus_with_passages_preprocess()
self.load_corpus_with_passages_preprocess()
else:
self.corpus = self.load_corpus()
if args.get("presegment_sentence", False):
self.presegment_sentence()
print("Using {} with {} data.\n\n".format(self.ann_file, len(self)))
def load_corpus(self):
corpus = []
print("\n\nLoading text only corpus...")
for ann_file in self.ann_file.split('+'):
with open(ann_file, 'r', encoding=self.encoding) as f:
all_text = f.read().lower()
corpus.extend([l.strip('\n').strip('\r').strip('\n') for l in all_text.split("\n")])
corpus = [l.strip() for l in corpus if l.strip() != '']
return corpus
def load_corpus_with_passages_preprocess(self):
corpus = []
print("\n\nLoading text only corpus...")
if os.path.exists(args.text_only_corpus_cache):
with open(args.text_only_corpus_cache, 'rb') as f:
corpus = np.load(f)
self.corpus = sharearray.cache(self.ann_file.split("/")[-1], corpus)
del corpus
gc.collect()
with open(args.text_only_corpus_cache.replace("npy", "json"), 'r') as f:
files = json.load(f)
[self.passage_split, self.sentence_split] = files
self.sentence_counter = [0] * len(self.sentence_split)
else:
new_text = []
passage_split = []
sentence_split = []
current_counter = 0
for ann_file in self.ann_file.split('+'):
with open(ann_file, 'r', encoding=self.encoding) as f:
all_text = f.read().lower()
one_passage_sentence_split = []
counter = 0
for line in tqdm(all_text.split("\n")):
line = line.strip('\n').strip('\r').strip('\n')
line = self.tokenizer.wordpiece_tokenizer.tokenize(line)
line_ids = self.tokenizer.convert_tokens_to_ids(line)
if len(line) != 0:
new_text.extend(line_ids)
counter += len(line_ids)
one_passage_sentence_split.append(counter)
else:
if counter != 0:
#all_text.extend(one_passage)
sentence_split.append(one_passage_sentence_split)
current_counter += counter
passage_split.append(current_counter)
one_passage = []
one_passage_sentence_split = []
counter = 0
#corpus.extend([l.strip('\n').strip('\r').strip('\n') for l in all_text.split("\n")])
#corpus = [l.strip() for l in corpus if l.strip() != '']
self.sentence_counter = [0] * len(passage_split) # we keep a record of when
self.corpus = np.array(new_text)
self.passage_split = passage_split
self.sentence_split = sentence_split
with open(args.text_only_corpus_cache, 'wb') as f:
np.save(f, self.corpus)
with open(args.text_only_corpus_cache.replace("npy", "json"), 'w') as f:
json.dump([self.passage_split, self.sentence_split], f)
assert(0)
#def save_sentence_counter(self):
#
def __len__(self):
if args.get("presegment_sentence", False) and "sbu-captions-all.json" not in self.ann_file:
return len(self.mapping)
return len(self.passage_split)
def retrieve_a_piece(self, index, seq_len):
if index == 0:
begin = 0
else:
begin = self.passage_split[index - 1]
end = self.passage_split[index]
text = self.corpus[begin:end]
sentence_split = self.sentence_split[index]
## Retrive part of
start_index = self.sentence_counter[index]
all_tokenized_words = []
all_mlm_labels = []
current_length = 0
final_index = -1
for i in range(start_index, len(sentence_split)):
if i == 0:
begin = 0
else:
begin = sentence_split[i - 1]
end = sentence_split[i]
tokens = self.tokenizer.convert_ids_to_tokens(text[begin:end])
tokens, mlm_labels = self.random_word_wwm(tokens)
if current_length == 0 or len(tokens) + current_length <= seq_len:
all_tokenized_words.extend(tokens)
all_mlm_labels.extend(mlm_labels)
current_length += len(tokens)
final_index = (i + 1) % len(sentence_split)
else:
final_index = (i + 1) % len(sentence_split)
break
self.sentence_counter[index] = final_index # Start from here next time retrieve a piece is called; Not sure how this will behave if we have multiple workers...
#print(index, self.sentence_counter[index])
all_tokenized_words = all_tokenized_words[:seq_len]
all_mlm_labels = all_mlm_labels[:seq_len]
return all_tokenized_words, all_mlm_labels
def exhaustively_retrieve_a_piece(self, index, seq_len):
all_ranges = []
if index == 0:
begin = 0
else:
begin = self.passage_split[index - 1]
end = self.passage_split[index]
text = self.corpus[begin:end]
sentence_split = self.sentence_split[index]
## Retrive part of
start_index = 0 #self.sentence_counter[index]
while True:
all_tokenized_words = []
all_mlm_labels = []
current_length = 0
final_index = -1
sent_begin = 0
sent_end = 0
for i in range(start_index, len(sentence_split)):
if i == 0:
sent_begin = 0
else:
sent_begin = sentence_split[i - 1]
tmp_sent_end = sentence_split[i]
if current_length == 0 or (tmp_sent_end - sent_begin) + current_length <= seq_len:
current_length += tmp_sent_end - sent_begin
sent_end = tmp_sent_end
final_index = (i + 1) % len(sentence_split)
else:
final_index = (i + 1) % len(sentence_split)
break
if start_index == 0:
sent_begin = 0
else:
sent_begin = sentence_split[start_index - 1]
start_index = final_index
all_ranges.append((begin + sent_begin, begin + sent_end))
if start_index == 0:
break
return all_ranges
def presegment_sentence(self):
all_segments = []
self.mapping = {}
current_len = 0
for i in tqdm(range(len(self.passage_split))):
tmp = self.exhaustively_retrieve_a_piece(i, self.seq_len // 2)
for j in range(len(tmp)):
self.mapping[current_len + j] = current_len + (j + 1)%len(tmp)
current_len += len(tmp)
all_segments.extend(tmp)
self.all_segments = all_segments
def retrieve_a_piece_preseged(self, index, seq_len):
seg = self.all_segments[index]
tokens = self.tokenizer.convert_ids_to_tokens(self.corpus[seg[0]:seg[1]])
tokens, mlm_labels = self.random_word_wwm(tokens)
tokens = tokens[:seq_len]
mlm_labels = mlm_labels[:seq_len]
return tokens, mlm_labels
def __getitem__(self, item):
if self.use_mismatch_objective:
i = 0
max_seq_length = self.seq_len // 2 # We have two parts
if args.get("presegment_sentence", False) and "sbu-captions-all.json" not in self.ann_file:
text_a_tokens, text_a_labels = self.retrieve_a_piece_preseged(item, seq_len = max_seq_length)
# First we take out some sentences
if random.random() < 0.5:
# Take out our own
b_index = self.mapping[item]
text_b_tokens, text_b_labels = self.retrieve_a_piece_preseged(b_index, seq_len=max_seq_length)
match = 1
else:
random_index = random.randint(0, len(self) - 1)
while random_index == item:
random_index = random.randint(0, len(self) - 1)
text_b_tokens, text_b_labels = self.retrieve_a_piece_preseged(random_index, seq_len=max_seq_length)
match = 0
else:
text_a_tokens, text_a_labels = self.retrieve_a_piece(item, seq_len = max_seq_length)
# First we take out some sentences
if random.random() < 0.5:
# Take out our own
text_b_tokens, text_b_labels = self.retrieve_a_piece(item, seq_len=max_seq_length)
match = 1
else:
random_index = random.randint(0, len(self) - 1)
while random_index == item:
random_index = random.randint(0, len(self) - 1)
text_b_tokens, text_b_labels = self.retrieve_a_piece(random_index, seq_len=max_seq_length)
match = 0
text_a_ids = self.tokenizer.convert_tokens_to_ids(text_a_tokens)
text_b_ids = self.tokenizer.convert_tokens_to_ids(text_b_tokens)
example = InputExample(
None, (text_a_tokens, text_b_tokens), (None, None),
(None, None), (None, None),
match, 1,
mlm_labels=(text_a_labels, text_b_labels),
token_ids=(text_a_ids, text_b_ids),
max_seq_len = self.seq_len + 3
)
if args.get("faster_loading", False):
return self.convert_example_to_features(example, self.seq_len + 3, self.tokenizer)
raw = self.corpus[item]
# tokenize
tokens = self.tokenizer.basic_tokenizer.tokenize(raw.lower())
if not self.do_no_fill:
# add more tokens if len(tokens) < min_len
_cur = (item + 1) % len(self.corpus)
while len(tokens) < self.min_seq_len:
_cur_tokens = self.tokenizer.basic_tokenizer.tokenize(self.corpus[_cur])
tokens.extend(_cur_tokens)
_cur = (_cur + 1) % len(self.corpus)
# masked language modeling
tokens, mlm_labels = self.random_word_wwm(tokens)
# convert token to its vocab id
ids = self.tokenizer.convert_tokens_to_ids(tokens)
# truncate
if len(ids) > self.seq_len:
ids = ids[:self.seq_len]
mlm_labels = mlm_labels[:self.seq_len]
example = InputExample(
None, tokens, (None, None),
(None, None), (None, None),
None, 1,
mlm_labels=mlm_labels,
token_ids=ids,
max_seq_len = self.seq_len
)
if args.get("faster_loading", False):
return self.convert_example_to_features(example, args.get("max_seq_length", 20), self.tokenizer)
return example
def convert_example_to_features(self, example: InputExample, max_seq_length, tokenizer, hybrid_num=10):
if isinstance(example.mlm_labels, tuple):
text_a_ids, text_b_ids = example.token_ids
text_a_labels, text_b_labels = example.mlm_labels
input_ids = tokenizer.convert_tokens_to_ids(["[CLS]"]) + text_a_ids + tokenizer.convert_tokens_to_ids(["[SEP]"]) + text_b_ids + tokenizer.convert_tokens_to_ids(["[SEP]"])
lm_label_ids = [-1] + text_a_labels + [-1] + text_b_labels + [-1]
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
lm_label_ids.append(-1)
features = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
lm_label_ids=lm_label_ids,
visual_feats=(None, None),
obj_labels={
'obj': (None, None),
'attr': (None, None),
'feat': (None, None),
},
is_matched=example.is_matched,
ans=-1,
visual_tags = None,
visual_tags_objective = None,
visual_tags_mask = None,
visual_tags_box=None,
visual_tags_mismatch=None
)
return features
if example.mlm_labels is not None: # The data is already pre-masked
input_ids = example.token_ids
lm_label_ids = example.mlm_labels
max_seq_len = example.max_seq_len + 2
# Add [CLS] and [SEP]
input_ids = tokenizer.convert_tokens_to_ids(["[CLS]"]) + input_ids + tokenizer.convert_tokens_to_ids(["[SEP]"])
lm_label_ids = [-1] + lm_label_ids + [-1]
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_len:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
lm_label_ids.append(-1)
features = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
lm_label_ids=lm_label_ids,
visual_feats=(None, None),
obj_labels={
'obj': (None, None),
'attr': (None, None),
'feat': (None, None),
},
is_matched=1,
ans=-1,
visual_tags = None,
visual_tags_objective = None,
visual_tags_mask = None,
visual_tags_box=None,
visual_tags_mismatch=None
)
return features
def random_word_wwm(self, tokens):
output_tokens = []
output_label = []
for i, token in enumerate(tokens):
sub_tokens = self.tokenizer.wordpiece_tokenizer.tokenize(token)
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
for sub_token in sub_tokens:
output_tokens.append("[MASK]")
# 10% randomly change token to random token
elif prob < 0.9:
for sub_token in sub_tokens:
output_tokens.append(random.choice(list(self.tokenizer.vocab.keys())))
# -> rest 10% randomly keep current token
else:
for sub_token in sub_tokens:
output_tokens.append(sub_token)
# append current token to output (we will predict these later)
for sub_token in sub_tokens:
try:
output_label.append(self.tokenizer.vocab[sub_token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
output_label.append(self.tokenizer.vocab["[UNK]"])
logging.warning("Cannot find sub_token '{}' in vocab. Using [UNK] insetad".format(sub_token))
else:
for sub_token in sub_tokens:
# no masking token (will be ignored by loss function later)
output_tokens.append(sub_token)
output_label.append(-1)
## if no word masked, random choose a word to mask
# if all([l_ == -1 for l_ in output_label]):
# choosed = random.randrange(0, len(output_label))
# output_label[choosed] = self.tokenizer.vocab[tokens[choosed]]
return output_tokens, output_label
def string_to_sequence(s: str, dtype=np.int32) -> np.ndarray:
return np.array([ord(c) for c in s], dtype=dtype)
def sequence_to_string(seq: np.ndarray) -> str:
return ''.join([chr(c) for c in seq]) | 18,260 | 38.270968 | 182 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/pretrain/qa_answer_table.py | # coding=utf-8
# Copyleft 2019 project LXRT.
import json
import torch
class AnswerTable:
ANS_CONVERT = {
"a man": "man",
"the man": "man",
"a woman": "woman",
"the woman": "woman",
'one': '1',
'two': '2',
'three': '3',
'four': '4',
'five': '5',
'six': '6',
'seven': '7',
'eight': '8',
'nine': '9',
'ten': '10',
'grey': 'gray',
}
def __init__(self, dsets=None):
try:
self.all_ans = json.load(open("data/lxmert/all_ans.json"))
except:
self.all_ans = json.load(open("/local/harold/ubert/lxmert/data/lxmert/all_ans.json"))
if dsets is not None:
dsets = set(dsets)
# If the answer is used in the dsets
self.anss = [ans['ans'] for ans in self.all_ans if
len(set(ans['dsets']) & dsets) > 0]
else:
self.anss = [ans['ans'] for ans in self.all_ans]
self.ans_set = set(self.anss)
self._id2ans_map = self.anss
self._ans2id_map = {ans: ans_id for ans_id, ans in enumerate(self.anss)}
assert len(self._id2ans_map) == len(self._ans2id_map)
for ans_id, ans in enumerate(self._id2ans_map):
assert self._ans2id_map[ans] == ans_id
def convert_ans(self, ans):
if len(ans) == 0:
return ""
ans = ans.lower()
if ans[-1] == '.':
ans = ans[:-1].strip()
if ans.startswith("a "):
ans = ans[2:].strip()
if ans.startswith("an "):
ans = ans[3:].strip()
if ans.startswith("the "):
ans = ans[4:].strip()
if ans in self.ANS_CONVERT:
ans = self.ANS_CONVERT[ans]
return ans
def ans2id(self, ans):
return self._ans2id_map[ans]
def id2ans(self, ans_id):
return self._id2ans_map[ans_id]
def ans2id_map(self):
return self._ans2id_map.copy()
def id2ans_map(self):
return self._id2ans_map.copy()
def used(self, ans):
return ans in self.ans_set
def all_answers(self):
return self.anss.copy()
@property
def num_answers(self):
return len(self.anss)
def load_lxmert_qa(path, model, label2ans):
"""
Load model weights from LXMERT pre-training.
The answers in the fine-tuned QA task (indicated by label2ans)
would also be properly initialized with LXMERT pre-trained
QA heads.
:param path: Path to LXMERT snapshot.
:param model: LXRT model instance.
:param label2ans: The label2ans dict of fine-tuned QA datasets, like
{0: 'cat', 1: 'dog', ...}
:return:
"""
print("Load QA pre-trained LXMERT from %s " % path)
loaded_state_dict = torch.load("%s_LXRT.pth" % path)
model_state_dict = model.state_dict()
# Handle Multi-GPU pre-training --> Single GPU fine-tuning
for key in list(loaded_state_dict.keys()):
loaded_state_dict[key.replace("module.", '')] = loaded_state_dict.pop(key)
# Isolate bert model
bert_state_dict = {}
for key, value in loaded_state_dict.items():
if key.startswith('bert.'):
bert_state_dict[key] = value
# Isolate answer head
answer_state_dict = {}
for key, value in loaded_state_dict.items():
if key.startswith("answer_head."):
answer_state_dict[key.replace('answer_head.', '')] = value
# Do surgery on answer state dict
ans_weight = answer_state_dict['logit_fc.3.weight']
ans_bias = answer_state_dict['logit_fc.3.bias']
import copy
new_answer_weight = copy.deepcopy(model_state_dict['logit_fc.3.weight'])
new_answer_bias = copy.deepcopy(model_state_dict['logit_fc.3.bias'])
answer_table = AnswerTable()
loaded = 0
unload = 0
if type(label2ans) is list:
label2ans = {label: ans for label, ans in enumerate(label2ans)}
for label, ans in label2ans.items():
new_ans = answer_table.convert_ans(ans)
if answer_table.used(new_ans):
ans_id_9500 = answer_table.ans2id(new_ans)
new_answer_weight[label] = ans_weight[ans_id_9500]
new_answer_bias[label] = ans_bias[ans_id_9500]
loaded += 1
else:
new_answer_weight[label] = 0.
new_answer_bias[label] = 0.
unload += 1
print("Loaded %d answers from LXRTQA pre-training and %d not" % (loaded, unload))
print()
answer_state_dict['logit_fc.3.weight'] = new_answer_weight
answer_state_dict['logit_fc.3.bias'] = new_answer_bias
# Load Bert Weights
bert_model_keys = set(model.lxrt_encoder.model.state_dict().keys())
bert_loaded_keys = set(bert_state_dict.keys())
assert len(bert_model_keys - bert_loaded_keys) == 0
model.lxrt_encoder.model.load_state_dict(bert_state_dict, strict=False)
# Load Answer Logic FC Weights
model_keys = set(model.state_dict().keys())
ans_loaded_keys = set(answer_state_dict.keys())
assert len(ans_loaded_keys - model_keys) == 0
model.load_state_dict(answer_state_dict, strict=False)
def load_lxmert_from_pretrain_noqa(path, model):
"""
Load model weights from LXMERT pre-training.
The answers in the fine-tuned QA task (indicated by label2ans)
would also be properly initialized with LXMERT pre-trained
QA heads.
:param path: Path to LXMERT snapshot.
:param model: LXRT model instance.
:param label2ans: The label2ans dict of fine-tuned QA datasets, like
{0: 'cat', 1: 'dog', ...}
:return:
"""
print("Load QA pre-trained LXMERT from %s " % path)
loaded_state_dict = torch.load("%s_LXRT.pth" % path)
model_state_dict = model.state_dict()
# Handle Multi-GPU pre-training --> Single GPU fine-tuning
for key in list(loaded_state_dict.keys()):
loaded_state_dict[key.replace("module.", '')] = loaded_state_dict.pop(key)
# Isolate bert model
#bert_state_dict = {}
#for key, value in loaded_state_dict.items():
# if key.startswith('bert.'):
# bert_state_dict[key] = value
# Load Bert Weights
load_state_dict_flexible(model.lxrt_encoder.model, loaded_state_dict) #model.lxrt_encoder.model.load_state_dict(bert_state_dict, strict=False)
if model.lxrt_encoder.load_pretrain_head:
print("\nLoad pre-trained head\n")
head_state_dict = {}
for key, value in loaded_state_dict.items():
if key.startswith('cls.'):
head_state_dict[key.replace("cls.", "")] = value
load_state_dict_flexible(model.lxrt_encoder.pretrained_head, head_state_dict)
'''# Load Answer Logic FC Weights
model_keys = set(model.state_dict().keys())
ans_loaded_keys = set(answer_state_dict.keys())
assert len(ans_loaded_keys - model_keys) == 0
model.load_state_dict(answer_state_dict, strict=False)'''
def load_lxmert_for_vcr_finetune_from_vcr_pretrain(path, model):
"""
Load model weights from LXMERT pre-training.
The answers in the fine-tuned QA task (indicated by label2ans)
would also be properly initialized with LXMERT pre-trained
QA heads.
:param path: Path to LXMERT snapshot.
:param model: LXRT model instance.
:param label2ans: The label2ans dict of fine-tuned QA datasets, like
{0: 'cat', 1: 'dog', ...}
:return:
"""
print("Load QA pre-trained LXMERT from %s " % path)
loaded_state_dict = torch.load("%s_LXRT.pth" % path)
model_state_dict = model.state_dict()
# Handle Multi-GPU pre-training --> Single GPU fine-tuning
for key in list(loaded_state_dict.keys()):
loaded_state_dict[key.replace("model.module.", '')] = loaded_state_dict.pop(key)
# Isolate bert model
bert_state_dict = {}
for key, value in loaded_state_dict.items():
if key.startswith('bert.'):
bert_state_dict[key] = value
# Load Bert Weights
bert_model_keys = set(model.lxrt_encoder.model.state_dict().keys())
bert_loaded_keys = set(bert_state_dict.keys())
load_state_dict_flexible(model.lxrt_encoder.model, bert_state_dict) #model.lxrt_encoder.model.load_state_dict(bert_state_dict, strict=False)
'''# Load Answer Logic FC Weights
model_keys = set(model.state_dict().keys())
ans_loaded_keys = set(answer_state_dict.keys())
assert len(ans_loaded_keys - model_keys) == 0
model.load_state_dict(answer_state_dict, strict=False)'''
def load_lxmert_from_pretrain_vcr_pretrain(path, model):
"""
Load model weights from LXMERT pre-training.
The answers in the fine-tuned QA task (indicated by label2ans)
would also be properly initialized with LXMERT pre-trained
QA heads.
:param path: Path to LXMERT snapshot.
:param model: LXRT model instance.
:param label2ans: The label2ans dict of fine-tuned QA datasets, like
{0: 'cat', 1: 'dog', ...}
:return:
"""
print("Load QA pre-trained LXMERT from %s " % path)
loaded_state_dict = torch.load("%s_LXRT.pth" % path)
model_state_dict = model.state_dict()
# Handle Multi-GPU pre-training --> Single GPU fine-tuning
for key in list(loaded_state_dict.keys()):
loaded_state_dict[key.replace("module.", '')] = loaded_state_dict.pop(key)
load_state_dict_flexible(model.model, loaded_state_dict) #model.lxrt_encoder.model.load_state_dict(bert_state_dict, strict=False)
'''# Load Answer Logic FC Weights
model_keys = set(model.state_dict().keys())
ans_loaded_keys = set(answer_state_dict.keys())
assert len(ans_loaded_keys - model_keys) == 0
model.load_state_dict(answer_state_dict, strict=False)'''
def load_lxmert_from_sgg_and_lxmert_pretrain(path, model, label2ans):
"""
Load model weights from LXMERT pre-training.
The answers in the fine-tuned QA task (indicated by label2ans)
would also be properly initialized with LXMERT pre-trained
QA heads.
:param path: Path to LXMERT snapshot.
:param model: LXRT model instance.
:param label2ans: The label2ans dict of fine-tuned QA datasets, like
{0: 'cat', 1: 'dog', ...}
:return:
"""
print("Load LXMERT pre-trained for sgg and lxmert pre-training from %s " % path)
loaded_state_dict = torch.load(path)["model"]
model_state_dict = model.state_dict()
# Handle Multi-GPU pre-training --> Single GPU fine-tuning
#for key in list(loaded_state_dict.keys()):
# loaded_state_dict[key.replace("module.", '')] = loaded_state_dict.pop(key)
new_loaded_state_dict = {}
for key in list(loaded_state_dict.keys()):
if "lxrt" in key:
new_loaded_state_dict[key.split("lxrt.")[-1]] = loaded_state_dict[key]
loaded_state_dict = new_loaded_state_dict
# Isolate bert model
bert_state_dict = {}
for key, value in loaded_state_dict.items():
if key.startswith('bert.'):
bert_state_dict[key] = value
# Isolate answer head
answer_state_dict = {}
for key, value in loaded_state_dict.items():
if key.startswith("answer_head."):
answer_state_dict[key.replace('answer_head.', '')] = value
# Do surgery on answer state dict
ans_weight = answer_state_dict['logit_fc.3.weight']
ans_bias = answer_state_dict['logit_fc.3.bias']
import copy
new_answer_weight = copy.deepcopy(model_state_dict['logit_fc.3.weight'])
new_answer_bias = copy.deepcopy(model_state_dict['logit_fc.3.bias'])
answer_table = AnswerTable()
loaded = 0
unload = 0
if type(label2ans) is list:
label2ans = {label: ans for label, ans in enumerate(label2ans)}
for label, ans in label2ans.items():
new_ans = answer_table.convert_ans(ans)
if answer_table.used(new_ans):
ans_id_9500 = answer_table.ans2id(new_ans)
new_answer_weight[label] = ans_weight[ans_id_9500]
new_answer_bias[label] = ans_bias[ans_id_9500]
loaded += 1
else:
new_answer_weight[label] = 0.
new_answer_bias[label] = 0.
unload += 1
print("Loaded %d answers from LXRTQA pre-training and %d not" % (loaded, unload))
print()
answer_state_dict['logit_fc.3.weight'] = new_answer_weight
answer_state_dict['logit_fc.3.bias'] = new_answer_bias
# Load Bert Weights
bert_model_keys = set(model.lxrt_encoder.model.state_dict().keys())
bert_loaded_keys = set(bert_state_dict.keys())
#print(len(bert_model_keys - bert_loaded_keys))
assert len(bert_model_keys - bert_loaded_keys) == 0
model.lxrt_encoder.model.load_state_dict(bert_state_dict, strict=False)
#load_state_dict_flexible(model.lxrt_encoder.model, bert_state_dict)
# Load Answer Logic FC Weights
model_keys = set(model.state_dict().keys())
ans_loaded_keys = set(answer_state_dict.keys())
assert len(ans_loaded_keys - model_keys) == 0
model.load_state_dict(answer_state_dict, strict=False)
def load_state_dict_flexible(model, state_dict):
try:
model.load_state_dict(state_dict)
except:
print("Full loading failed!! Try partial loading!!")
own_state = model.state_dict()
for name, param in state_dict.items():
if name not in own_state:
print("Skipped: " + name)
continue
if isinstance(param, torch.nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
own_state[name].copy_(param)
print("Successfully loaded: "+name)
except:
print("Part load failed: " + name) | 13,691 | 34.842932 | 147 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/pretrain/lxmert_data.py | # coding=utf-8
# Copyleft 2019 project LXRT.
from collections import defaultdict
import json
import random
import numpy as np
from torch.utils.data import Dataset
import torch
from param import args
from src.pretrain.qa_answer_table import AnswerTable
from src.utils import load_obj_tsv
from copy import deepcopy
import h5py
from lxrt.h5_data import ImageFeatureDataset
from lxrt.tokenization import BertTokenizer
from src.pretrain import tag_data_utilis
from tqdm import tqdm
from src.tools import sharearray
import os
TINY_IMG_NUM = 500
FAST_IMG_NUM = 5000
Split2ImgFeatPath = {
'mscoco_train': '/local/harold/ubert/lxmert/data/mscoco_imgfeat/train2014_obj36.tsv',
'mscoco_minival': '/local/harold/ubert/lxmert/data/mscoco_imgfeat/val2014_obj36.tsv',
'mscoco_nominival': '/local/harold/ubert/lxmert/data/mscoco_imgfeat/val2014_obj36.tsv',
'vgnococo': '/local/harold/ubert/lxmert/data/vg_gqa_imgfeat/vg_gqa_obj36.tsv',
}
Split2ImgFeatPath_h5 = {
'mscoco_train': 'data/mscoco_imgfeat/train2014_obj36.h5',
'mscoco_minival': 'data/mscoco_imgfeat/val2014_obj36.h5',
'mscoco_nominival': 'data/mscoco_imgfeat/val2014_obj36.h5',
'vgnococo': 'data/vg_gqa_imgfeat/vg_gqa_obj36.h5',
"nlvr_for_pretrain_train": "data/nlvr2_imgfeat/train_obj36.h5",
"nlvr_for_pretrain_valid": "data/nlvr2_imgfeat/valid_obj36.h5",
"flickr_train": 'data/flickr30k/fixed36_no_features_split_0_of_1_splits.h5'
}
class InputExample(object):
"""A single training/test example for the language model."""
def __init__(self, uid, sent, visual_feats=None,
obj_labels=None, attr_labels=None,
is_matched=None, label=None, sent_b=None,
use_visual_tag_flag=False,
mlm_labels=None,token_ids=None,max_seq_len=96):
self.uid = uid
self.sent = sent
self.visual_feats = visual_feats
self.obj_labels = obj_labels
self.attr_labels = attr_labels
self.is_matched = is_matched # whether the visual and obj matched
self.label = label
self.sent_b = sent_b
self.use_visual_tag_flag = use_visual_tag_flag
# The following attributes are used for the bookcorpus/wikipedia pre-training
self.mlm_labels = mlm_labels
self.token_ids = token_ids
self.max_seq_len = max_seq_len
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids, input_mask, segment_ids, lm_label_ids,
visual_feats, obj_labels,
is_matched, ans,
visual_tags = None,
visual_tags_objective = None,
visual_tags_mask = None,
visual_tags_box=None,
visual_tags_mismatch=None,
obj_labels_transformed_mismatch=None,
visual_tags_box_mismatch=None,
use_visual_tag_flag=False,
visual_tags_segment_ids=None,
visual_feats_seg_ids=None
):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.lm_label_ids = lm_label_ids
self.visual_feats = visual_feats
self.obj_labels = obj_labels
self.is_matched = is_matched
self.ans = ans
self.visual_tags = visual_tags
self.visual_tags_objective = visual_tags_objective
self.visual_tags_mask = visual_tags_mask
self.visual_tags_box = visual_tags_box
self.visual_tags_mismatch = visual_tags_mismatch
self.obj_labels_transformed_mismatch = obj_labels_transformed_mismatch
self.visual_tags_box_mismatch = visual_tags_box_mismatch
self.use_visual_tag_flag = use_visual_tag_flag
self.visual_tags_segment_ids = visual_tags_segment_ids
self.visual_feats_seg_ids = visual_feats_seg_ids
class LXMERTDataset:
def __init__(self, splits: str, qa_sets=None):
"""
:param splits: The data sources to be loaded
:param qa_sets: if None, no action
o.w., only takes the answers appearing in these dsets
and remove all unlabeled data (MSCOCO captions)
"""
self.name = splits
self.sources = splits.split(',')
# Loading datasets to data
self.data = []
for source in self.sources:
try:
self.data.extend(json.load(open("data/lxmert/%s.json" % source)))
except:
self.data.extend(json.load(open("/local/harold/ubert/lxmert/data/lxmert/%s.json" % source))) # hacky
print("Load %d data from %s" % (len(self.data), self.name))
# Create answer table according to the qa_sets
self.answer_table = AnswerTable(qa_sets)
print("Load an answer table of size %d." % (len(self.answer_table.ans2id_map())))
# Modify the answers
for datum in self.data:
labelf = datum['labelf']
for cat, labels in labelf.items():
for label in labels:
for ans in list(label.keys()):
new_ans = self.answer_table.convert_ans(ans)
if self.answer_table.used(new_ans):
if ans != new_ans:
label[new_ans] = label.pop(ans)
else:
label.pop(ans)
def __len__(self):
return len(self.data)
def make_uid(img_id, dset, sent_idx):
return "%s_%s_%03d" % (img_id, dset, sent_idx),
def load_vocabs():
attributes = []
with open(args.attributes_vocab) as f:
for line in f:
attr = line.strip("\n")
if len(attr) != 0:
attributes.append(attr)
assert (len(attributes) == 400)
objects = []
with open(args.objects_vocab) as f:
for line in f:
attr = line.strip("\n")
if len(attr) != 0:
objects.append(attr)
assert (len(objects) == 1600)
return objects, attributes
def random_word(tokens, tokenizer):
"""
Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
:param tokens: list of str, tokenized sentence.
:param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
:return: (list of str, list of int), masked tokens and related labels for LM prediction
"""
output_label = []
for i, token in enumerate(tokens):
prob = random.random()
# mask token with probability
ratio = args.word_mask_rate
if prob < ratio:
prob /= ratio
# 80% randomly change token to mask token
if prob < 0.8:
tokens[i] = "[MASK]"
# 10% randomly change token to random token
elif prob < 0.9:
tokens[i] = random.choice(list(tokenizer.vocab.items()))[0]
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
try:
output_label.append(tokenizer.vocab[token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
output_label.append(tokenizer.vocab["[UNK]"])
else:
# no masking token (will be ignored by loss function later)
output_label.append(-1)
return tokens, output_label
"""
Example in obj tsv:
FIELDNAMES = ["img_id", "img_h", "img_w", "objects_id", "objects_conf",
"attrs_id", "attrs_conf", "num_boxes", "boxes", "features"]
"""
mapping_rawdataset_name_to_json = {
"mscoco_train,mscoco_nominival,vgnococo": "train",
"mscoco_minival": "val"
}
from lxrt.symbolic_vocabulary import SymbolicVocab
global symbolic_vocab
symbolic_vocab = SymbolicVocab(args.objects_vocab, args.attributes_vocab)
class LXMERTTorchDataset(Dataset):
def __init__(self, dataset: LXMERTDataset, topk=-1, sgg_dataset = None, image_only = False, text_only = False, use_visual_tag_flag = False, limit_source = [], available_split_for_cc = None):
super().__init__()
self.raw_dataset = dataset
self.name = '_'.join(self.raw_dataset.sources)
if args.get('disable_mismatch_for_other_dataset', False):
# Do not resample for datasets such as BookCorpus
self.task_matched = args.task_matched if "book_corpus" in self.raw_dataset.sources else False
else:
self.task_matched = args.task_matched
print(self.raw_dataset.sources)
print(self.task_matched)
print("\n\n\n")
self.sgg_dataset = sgg_dataset
self.image_only = image_only
self.text_only = text_only
self.use_visual_tag_flag = use_visual_tag_flag
self.tokenizer = BertTokenizer.from_pretrained(
"bert-base-uncased",
do_lower_case=True
)
self.task_nlvr2 = args.get("task_nlvr2", False)
if args.tiny:
topk = TINY_IMG_NUM
elif args.fast:
topk = FAST_IMG_NUM
#self.fake_data = args.get("fake_data", False)
self.custom_coco_data = args.get("custom_coco_data", False)
self.use_h5_file = args.get("use_h5_file", False)
if self.use_h5_file:
if "google_cc_train" in dataset.sources:
if args.get('change_split', False):
available_split_for_cc = [39]
else:
available_split_for_cc = args.get("available_split_for_cc", [0])
sources = []
split_map = {}
for i in available_split_for_cc:
sources.append("google_cc_{}".format(i))
split_map["google_cc_{}".format(i)] = "data/google_concetual/butd_feat/train_no_features_split_{}_of_40_splits.h5".format(i)
self.image_feature_dataset = ImageFeatureDataset.create(sources, split_map, load_custom_h5_version2=True, text_only = self.text_only, on_memory = False)
elif "open_images_train" in dataset.sources:
available_split_for_open_image = args.get("available_split_for_open_image", [0])
sources = []
split_map = {}
for split_i, split_j, total_split in available_split_for_open_image:
sources.append("open_image_{}_{}".format(split_i, split_j))
split_map["open_image_{}_{}".format(split_i, split_j)] = "data/open_image/butd_feat/train_{}_no_features_split_{}_of_{}_splits.h5".format(split_i, split_j, total_split)
self.image_feature_dataset = ImageFeatureDataset.create(sources, split_map, load_custom_h5_version2=True, on_memory = False)
else:
self.image_feature_dataset = ImageFeatureDataset.create(dataset.sources, Split2ImgFeatPath_h5, text_only = self.text_only, load_custom_h5_version2 = True if "flickr_train" in dataset.sources else False, on_memory = args.get("on_memory", False))
self.ids_to_index = self.image_feature_dataset.ids_to_index
# Screen data
used_data = []
for datum in self.raw_dataset.data:
if datum['img_id'] in self.ids_to_index:
used_data.append(datum)
else:
# Original LXMERT. Load the dataset
img_data = []
for source in self.raw_dataset.sources:
img_data.extend(load_obj_tsv(Split2ImgFeatPath[source], topk))
self.imgid2img = {}
for img_datum in img_data:
self.imgid2img[img_datum['img_id']] = img_datum
# Filter out the dataset
used_data = []
for datum in self.raw_dataset.data:
if datum['img_id'] in self.imgid2img:
used_data.append(datum)
used_data = used_data[::args.get("partial_dataset", 1)]
if sgg_dataset is not None:
used_data = [datum for datum in used_data if str(datum["img_id"]) in self.sgg_dataset.imageids_to_index]
# Flatten the dataset (into one sent + one image entries)
self.data = []
record_img_id = set()
remaining_set = set()
for datum in used_data:
# datum: {'img_id': 'COCO_train2014_000000318556', 'labelf': {'vqa': [{'no': 1}, {'yes': 1}, {'no': 1}, {'blue': 1, 'blue and white': 0.3}]}, 'sentf': {'mscoco': ['A very clean and well decorated empty bathroom', 'A blue and white bathroom with butterfly themed wall tiles.', 'A bathroom with a border of butterflies and blue paint on the walls above it.', 'An angled view of a beautifully decorated bathroom.', 'A clock that blends in with the wall hangs in a bathroom. '], 'vqa': ['Is the sink full of water?', 'Are there any butterflies on the tiles?', 'Is this bathroom in a hotel?', 'What color are the walls?']}}
sentf = datum['sentf']
for sents_cat, sents in sentf.items():
if sents_cat in limit_source:
continue
remaining_set.add(sents_cat)
if sents_cat in datum['labelf']:
labels = datum['labelf'][sents_cat]
else:
labels = None
for sent_idx, sent in enumerate(sents):
new_datum = {
'uid': make_uid(datum['img_id'], sents_cat, sent_idx) if args.task_qa else None,
'img_id': datum['img_id'], # if not self.text_only else "",
'sent': sent #if not self.image_only else ""
}
if image_only: # If we only use image, make sure one image only appears one time
if datum["img_id"] in record_img_id:
continue
record_img_id.add(datum["img_id"])
if labels is not None and args.task_qa:
new_datum['label'] = labels[sent_idx]
if self.task_nlvr2:
new_datum['match_label'] = datum["label"]
new_datum['img_id_1'] = datum["img_id_1"]
self.data.append(new_datum)
if image_only:
dataset_str = "image_only"
elif text_only:
dataset_str = "text_only"
else:
dataset_str = "vision and language"
if self.image_only and args.get("screen_image", False):
counter = 0
from tqdm import tqdm
_data = []
for data_item in tqdm(self.data):
img_id = data_item["img_id"]
image_index = self.image_feature_dataset.ids_to_index[img_id]
img_h = self.image_feature_dataset.h5_wh[image_index][1]
img_w = self.image_feature_dataset.h5_wh[image_index][0]
if img_h == 0 or img_w == 0:
counter += 1
else:
_data.append(data_item)
print("Screened {} images with zero heights and weidths, {} in total".format(counter, len(_data)))
self.data = _data
print("Use {} data in {} torch dataset, {}, limit_source {}".format(len(self.data), dataset_str, remaining_set, limit_source))
if text_only:
del self.image_feature_dataset
if text_only or image_only:
del self.raw_dataset.data
del self.raw_dataset
self.compress_memory = False
if args.get("compress_memory", False):
# Move some data to shared memory so the memory will not explode when using multi-process for data loading
self.compress()
print("\n\n\n")
def compress(self):
print("image_only", self.image_only)
print("text_only", self.text_only)
self._img_ids_shared_array, self._img_ids_record_position = self.compress_list_of_strings([i["img_id"] for i in self.data], "data_imonly_img_id_{}".format(self.name))
self.compress_memory = True
self._sent_shared_array, self._sent_record_position = self.compress_list_of_strings([i["sent"] for i in self.data], "data_txtonly_sent_{}".format(self.name))
self.compress_memory = True
def compress_list_of_strings(self, list_of_string, name):
record_position = []
all_text = []
current_length = 0
for index, string in enumerate(list_of_string):
array = [ord(c) for c in string]
all_text.extend(array)
current_length += len(array)
record_position.append(current_length)
shared_array = sharearray.cache(name, lambda: np.array(all_text, dtype=np.int32))
del all_text
return shared_array, record_position
def decompress_string_index(self, index, shared_array, record_position):
string_array = shared_array[0 if index == 0 else record_position[index - 1]:record_position[index]]
return ''.join([chr(c) for c in string_array])
def decompress_getitem__(self, index):
if self._sent_shared_array is not None:
sent = self.decompress_string_index(index, self._sent_shared_array, self._sent_record_position)
else:
sent = ""
if self._img_ids_shared_array is not None:
img_id = self.decompress_string_index(index, self._img_ids_shared_array, self._img_ids_record_position)
else:
img_id = None
return {"sent": sent, "img_id": img_id, "uid": None}
def __len__(self):
return len(self.data)
def random_feat(self):
"""Get a random obj feat from the dataset."""
if self.compress_memory:
datum = self.decompress_getitem__(random.randint(0, len(self.data) - 1))
else:
datum = self.data[random.randint(0, len(self.data) - 1)]
img_id = datum['img_id']
if self.use_h5_file:
image_index = self.ids_to_index[img_id]
feat = self.image_feature_dataset.h5_features[image_index]
feat = feat[random.randint(0, 35)]
else:
img_info = self.imgid2img[img_id]
feat = img_info['features']
feat = feat[random.randint(0, 35)]
return feat
def random_tags(self):
"""Get a random obj feat from the dataset."""
datum = self.data[random.randint(0, len(self.data)-1)]
img_id = datum['img_id']
image_index, obj_num, boxes, img_h, img_w, obj_labels, obj_confs, attr_labels, attr_confs = self.image_feature_dataset.get_everything_except_features(img_id)
boxes = boxes.copy()
boxes[:, (0, 2)] /= img_w
boxes[:, (1, 3)] /= img_h
np.testing.assert_array_less(boxes, 1+1e-5)
np.testing.assert_array_less(-boxes, 0+1e-5)
return image_index, obj_num, boxes, img_h, img_w, obj_labels, obj_confs, attr_labels, attr_confs
def __getitem__(self, item: int):
if self.compress_memory:
datum = self.decompress_getitem__(item)
else:
datum = self.data[item]
uid = datum['uid']
img_id = datum['img_id']
sent=datum['sent'].lower()
if not self.text_only:
# Get image info
if self.use_h5_file:
image_index, obj_num, feats, boxes, img_h, img_w, obj_labels, obj_confs, attr_labels, attr_confs = self.image_feature_dataset[img_id]
else:
img_info = self.imgid2img[img_id]
obj_num = img_info['num_boxes']
feats = img_info['features'].copy()
boxes = img_info['boxes'].copy()
obj_labels = img_info['objects_id'].copy()
obj_confs = img_info['objects_conf'].copy()
attr_labels = img_info['attrs_id'].copy()
attr_confs = img_info['attrs_conf'].copy()
assert obj_num == len(boxes) == len(feats)
# Normalize the boxes (to 0 ~ 1)
img_h, img_w = img_info['img_h'], img_info['img_w']
#print(item, img_info, img_h, img_w)
boxes = boxes.copy()
boxes[:, (0, 2)] /= img_w
boxes[:, (1, 3)] /= img_h
np.testing.assert_array_less(boxes, 1+1e-5)
np.testing.assert_array_less(-boxes, 0+1e-5)
# If calculating the matched loss, replace the sentence with an sentence
# corresponding to other image.
is_matched=None
if args.get('task_nlvr2', False):
match_label = datum["match_label"]
is_matched = match_label
second_image_index, second_obj_num, second_feats, second_boxes, second_img_h, second_img_w, second_obj_labels, second_obj_confs, second_attr_labels, second_attr_confs = self.image_feature_dataset[datum["img_id_1"]]
second_boxes = second_boxes.copy()
second_boxes[:, (0, 2)] /= second_img_w
second_boxes[:, (1, 3)] /= second_img_h
np.testing.assert_array_less(second_boxes, 1+1e-5)
np.testing.assert_array_less(-second_boxes, 0 + 1e-5)
feats=np.concatenate((feats, second_feats))
boxes=np.concatenate((boxes, second_boxes))
obj_labels=np.concatenate((obj_labels, second_obj_labels))
obj_confs=np.concatenate((obj_confs, second_obj_confs))
#obj_confs=np.concatenate((obj_confs, second_obj_confs))
attr_labels = np.concatenate((attr_labels, second_attr_labels))
attr_confs = np.concatenate((attr_confs, second_attr_confs))
elif self.task_matched :
if random.random() < 0.5:
is_matched = 0
if self.compress_memory:
other_datum = self.decompress_getitem__(random.randint(0, len(self.data) - 1))
else:
other_datum = self.data[random.randint(0, len(self.data)-1)]
while other_datum['img_id'] == img_id:
if self.compress_memory:
other_datum = self.decompress_getitem__(random.randint(0, len(self.data) - 1))
else:
other_datum = self.data[random.randint(0, len(self.data)-1)]
sent = other_datum['sent']
else:
is_matched = 1
# Label, convert answer to id
if 'label' in datum and args.task_qa:
label = datum['label'].copy()
for ans in list(label.keys()):
label[self.raw_dataset.answer_table.ans2id(ans)] = label.pop(ans)
else:
label = None
if self.image_only:
sent = None
if self.text_only:
feats = None
boxes = None
obj_labels = None
obj_confs = None
attr_labels = None
attr_confs = None
# Create target
example = InputExample(
uid, sent, (feats, boxes),
(obj_labels, obj_confs), (attr_labels, attr_confs),
is_matched, label,
use_visual_tag_flag=self.use_visual_tag_flag
)
#if args.get("faster_loading", False):
return self.convert_example_to_features(example, args.get("max_seq_length", 20), self.tokenizer)
def random_mask_features(self, feats, boxes = None):
mask_feats = deepcopy(feats) #.copy()
feat_mask = np.zeros(len(feats), dtype=np.float32)
for i in range(len(feats)):
prob = random.random()
# mask token with probability
if prob < args.obj_mask_rate:
feat_mask[i] = 1.
prob /= args.obj_mask_rate
# 80% randomly change token to zero feat
if prob < 0.8:
mask_feats[i, :] = 0.
# 10% randomly change token to random feat
elif prob < 0.9:
if not args.get("disable_random_feat", False) and not args.get("inbatch_random", False):
mask_feats[i,:] = self.random_feat()
if args.get("inbatch_random", False):
feat_mask[i] = 2.0 # special mark
# -> rest 10% randomly keep current feat
# Need to predict this feat
return mask_feats, feat_mask
def convert_example_to_features(self, example: InputExample, max_seq_length, tokenizer):
if example.mlm_labels is not None: # The data is already pre-masked
input_ids = example.token_ids
lm_label_ids = example.mlm_labels
max_seq_len = example.max_seq_len + 2
# Add [CLS] and [SEP]
input_ids = tokenizer.convert_tokens_to_ids(["[CLS]"]) + input_ids + tokenizer.convert_tokens_to_ids(["[SEP]"])
lm_label_ids = [-1] + lm_label_ids + [-1]
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_len:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
lm_label_ids.append(-1)
features = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
lm_label_ids=lm_label_ids,
visual_feats=(None, None),
obj_labels={
'obj': (None, None),
'attr': (None, None),
'feat': (None, None),
},
is_matched=None,
ans=-1,
visual_tags = None,
visual_tags_objective = None,
visual_tags_mask = None,
visual_tags_box=None,
visual_tags_mismatch=None
)
return features
if example.sent is not None:
tokens = tokenizer.tokenize(example.sent.strip())
# Account for [CLS] and [SEP] with "- 2"
if len(tokens) > max_seq_length - 2:
tokens = tokens[:(max_seq_length - 2)]
# Ge random words
masked_tokens, masked_label = random_word(tokens, tokenizer)
# concatenate lm labels and account for CLS, SEP, SEP
masked_tokens = ['[CLS]'] + masked_tokens + ['[SEP]']
input_ids = tokenizer.convert_tokens_to_ids(masked_tokens)
# Mask & Segment Word
lm_label_ids = ([-1] + masked_label + [-1])
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
lm_label_ids.append(-1)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(lm_label_ids) == max_seq_length
elif args.get("insert_cls", False):
masked_tokens = ["[CLS]"]
input_ids = tokenizer.convert_tokens_to_ids(masked_tokens)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
lm_label_ids = [-1]
else:
input_ids = None
input_mask = None
segment_ids = None
lm_label_ids = None
if example.use_visual_tag_flag and example.visual_feats[0] is not None: # Let's do a hybrid embedding
feat, boxes = example.visual_feats
obj_labels, obj_confs = example.obj_labels
attr_labels, attr_confs = example.attr_labels
# Mask Image Features:
masked_feat, feat_mask = self.random_mask_features(feat, boxes=boxes)
assert(args.non_exclusive_tags)
assert(args.use_bert_input_for_tags)
visual_tags, visual_tags_objective, visual_tags_mask, visual_tags_box, visual_tags_segment_ids = tag_data_utilis.create_tags_pretrain(
obj_labels=obj_labels,
attr_labels=attr_labels,
obj_confs=obj_confs,
attr_confs=attr_confs,
tokenizer=self.tokenizer,
symbolic_vocab=symbolic_vocab,
visual_tags_box = boxes,
feat_mask = feat_mask,
use_bert_input=True
)
elif example.visual_feats[0] is not None:
feat, boxes = example.visual_feats
obj_labels, obj_confs = example.obj_labels
attr_labels, attr_confs = example.attr_labels
# Mask Image Features:
masked_feat, feat_mask = self.random_mask_features(feat, boxes=boxes)
visual_tags = None
visual_tags_objective = None
visual_tags_mask = None
visual_tags_box = None
visual_mismatch_label = None
obj_labels_transformed_mismatch = None
visual_tags_box_mismatch = None
else:
masked_feat = None
boxes = None
obj_labels = None
obj_confs = None
attr_labels = None
attr_confs = None
feat_mask = None
feat = None
visual_tags = None
visual_tags_objective = None
visual_tags_mask = None
visual_tags_box = None
visual_mismatch_label = None
obj_labels_transformed_mismatch = None
visual_tags_box_mismatch = None
# QA answer label
if example.label is None or len(example.label) == 0 or example.is_matched != 1:
# 1. No label 2. Label is pruned 3. unmatched visual + language pair
ans = -1
else:
keys, values = zip(*example.label.items())
if len(keys) == 1:
ans = keys[0]
else:
value_sum = sum(values)
prob = [value / value_sum for value in values]
choice = np.random.multinomial(1, prob).argmax()
ans = keys[choice]
features = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
lm_label_ids=lm_label_ids,
visual_feats=(masked_feat, boxes),
obj_labels={
'obj': (obj_labels, obj_confs),
'attr': (attr_labels, attr_confs),
'feat': (feat, feat_mask),
},
is_matched=example.is_matched,
ans=ans,
visual_tags = visual_tags,
visual_tags_objective = visual_tags_objective,
visual_tags_mask = visual_tags_mask,
visual_tags_box=visual_tags_box,
visual_tags_mismatch=None if not args.get('use_tag_mismatch', None) else visual_mismatch_label,
obj_labels_transformed_mismatch=None if not args.get("use_tag_mismatch", None) else obj_labels_transformed_mismatch,
visual_tags_box_mismatch=None if not args.get('use_tag_mismatch', None) else visual_tags_box_mismatch,
use_visual_tag_flag=example.use_visual_tag_flag )
return features
def create_in_batch_random_feat(self, example, example_index, all_examples):
if args.get("inbatch_random", False) and example.visual_feats[0] is not None:
feats, _ = example.visual_feats
feat_mask = example.obj_labels["feat"][1]
#original_feats = example.obj_labels["feat"][0]
for i in range(len(feat_mask)):
if feat_mask[i] == 2:
feat_mask[i] = 1
select_index = random.randint(0, len(all_examples) - 1)
while select_index == example_index:
select_index = random.randint(0, len(all_examples) - 1)
select_index_j = random.randint(0, len(feat_mask) - 1)
while select_index_j == i:
select_index_j = random.randint(0, len(feat_mask) - 1)
feats[i] = all_examples[select_index].obj_labels["feat"][0][select_index_j]
return example
def custom_collact_fn(self, examples):
hybrid_num = random.randint(args.get("hybrid_min", 2), args.get("hybrid_max", 34))
train_features = [self.create_in_batch_random_feat(example, example_index, all_examples = examples) for example_index, example in enumerate(examples)]
if train_features[0].input_ids is not None:
# language Inputs
input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
# Language Prediction
lm_labels = torch.tensor([f.lm_label_ids for f in train_features], dtype=torch.long)
else:
input_ids = None
input_mask = None
segment_ids = None
lm_labels = None
if train_features[0].visual_feats[0] is not None:
# Visual Inputs
if isinstance(train_features[0].visual_feats[0], torch.FloatTensor):
feats = torch.stack([f.visual_feats[0] for f in train_features])
else:
feats = torch.from_numpy(np.stack([f.visual_feats[0] for f in train_features]))
pos = torch.from_numpy(np.stack([f.visual_feats[1] for f in train_features]))
# Visual Prediction
obj_labels = {}
for key in args.visual_losses.split(","):#('obj', 'attr', 'feat'):
visn_labels = torch.from_numpy(np.stack([f.obj_labels[key][0] for f in train_features]))
#if self.custom_coco_data:
# visn_mask = torch.ones(visn_labels.size(0), visn_labels.size(1)).float().cuda()
#else:
visn_mask = torch.from_numpy(np.stack([f.obj_labels[key][1] for f in train_features]))
assert visn_labels.size(0) == visn_mask.size(0) and visn_labels.size(1) == visn_mask.size(1)
obj_labels[key] = (visn_labels, visn_mask)
if args.get('task_nlvr2', False):
visual_feats_seg_ids = []
for i in range(feats.size(0)):
visual_feats_seg_ids.append([0] * 36 + [1] * 36)
visual_feats_seg_ids= torch.tensor(visual_feats_seg_ids, dtype=torch.int64)
else:
visual_feats_seg_ids = None
else:
feats = None
pos = None
obj_labels = None
visual_feats_seg_ids = None
if train_features[0].visual_tags is not None:
# do padding
tag_max_length = max([len(f.visual_tags) for f in train_features])
for f in train_features:
current_tag_length = len(f.visual_tags)
if current_tag_length < tag_max_length:
f.visual_tags = f.visual_tags + [0] * (tag_max_length - current_tag_length)
f.visual_tags_objective = f.visual_tags_objective + [-1] * (tag_max_length - current_tag_length)
f.visual_tags_mask = f.visual_tags_mask + [0] * (tag_max_length - current_tag_length)
f.visual_tags_box = f.visual_tags_box + [ np.array([0.0, 0.0, 0.0, 0.0], dtype=np.float32) ] * (tag_max_length - current_tag_length)
f.visual_tags_box = np.stack(f.visual_tags_box)
if f.visual_tags_segment_ids is not None:
f.visual_tags_segment_ids = f.visual_tags_segment_ids + [0] * (tag_max_length - current_tag_length)
visual_tags = torch.tensor([f.visual_tags for f in train_features], dtype=torch.long)
visual_tags_mask = torch.tensor([f.visual_tags_mask for f in train_features], dtype=torch.long)
visual_tags_box = torch.from_numpy(np.stack([f.visual_tags_box for f in train_features]))
visual_tags_objective = torch.tensor([f.visual_tags_objective for f in train_features], dtype=torch.long)
if train_features[0].visual_tags_mismatch is not None:
visual_tags_mismatch = torch.tensor([f.visual_tags_mismatch for f in train_features], dtype=torch.long)
else:
visual_tags_mismatch = None
if train_features[0].visual_tags_segment_ids is not None:
visual_tags_segment_ids = torch.tensor([f.visual_tags_segment_ids for f in train_features], dtype=torch.long)
else:
visual_tags_segment_ids = None
if args.get("tag_hard_max_length", None) is not None and tag_max_length > args.tag_hard_max_length:
# truncate the tag sequence
visual_tags = visual_tags[:, :args.tag_hard_max_length].contiguous()
visual_tags_mask = visual_tags_mask[:, :args.tag_hard_max_length].contiguous()
visual_tags_box = visual_tags_box[:, :args.tag_hard_max_length].contiguous()
visual_tags_objective = visual_tags_objective[:, :args.tag_hard_max_length].contiguous()
if visual_tags_mismatch is not None:
visual_tags_mismatch = visual_tags_mismatch[:, :args.tag_hard_max_length].contiguous()
if visual_tags_segment_ids is not None:
visual_tags_segment_ids = visual_tags_segment_ids[:, :args.tag_hard_max_length].contiguous()
else:
visual_tags = None
visual_tags_mask = None
visual_tags_box = None
visual_tags_objective = None
visual_tags_mismatch = None
visual_tags_segment_ids = None
if train_features[0].is_matched is not None:
matched_labels = torch.tensor([f.is_matched for f in train_features], dtype=torch.long)
else:
matched_labels = None
ans = torch.from_numpy(np.stack([f.ans for f in train_features]))
if args.get("lxmert_style_nlvr", False):
# Reorganize the inputs
input_ids = input_ids.unsqueeze(1).expand(input_ids.size(0), 2, input_ids.size(-1)).contiguous().view(-1, input_ids.size(-1)).contiguous()
lm_labels = lm_labels.unsqueeze(1).expand(lm_labels.size(0), 2, lm_labels.size(-1)).contiguous().view(-1, lm_labels.size(-1)).contiguous()
input_mask = input_mask.unsqueeze(1).expand(input_mask.size(0), 2, input_mask.size(-1)).contiguous().view(-1, input_mask.size(-1)).contiguous()
visual_feats_seg_ids = None
feats = feats.view(-1, feats.size(1)//2, feats.size(-1)).contiguous()
pos = pos.view(-1, pos.size(1) // 2, pos.size(-1)).contiguous()
if args.get("use_visual_tag_flag", False):
visual_tags = visual_tags.view(-1, visual_tags.size(1) // 2).contiguous()
visual_tags_box = visual_tags_box.view(-1, visual_tags_box.size(1) // 2, visual_tags_box.size(-1)).contiguous()
visual_tags_objective = visual_tags_objective.view(-1, visual_tags_objective.size(1) // 2).contiguous()
visual_tags_mask = visual_tags_mask.view(-1, visual_tags_mask.size(1)//2).contiguous()
return [input_ids, segment_ids, input_mask, lm_labels, feats, pos, obj_labels, matched_labels, ans, visual_feats_seg_ids, visual_tags, visual_tags_mask, visual_tags_box, visual_tags_objective, visual_tags_mismatch, visual_tags_segment_ids]
class LXMERTEvaluator:
def __init__(self, dataset: LXMERTDataset):
self.raw_dataset = dataset
# Create QA Eval Data
self.data = []
for datum in self.raw_dataset.data:
sentf = datum['sentf']
for sents_cat, sents in sentf.items():
if sents_cat in datum['labelf']: # A labeled dataset
labels = datum['labelf'][sents_cat]
for sent_idx, sent in enumerate(sents):
new_datum = {
'uid': make_uid(datum['img_id'], sents_cat, sent_idx),
'img_id': datum['img_id'],
'sent': sent,
'dset': sents_cat,
'label': labels[sent_idx]
}
self.data.append(new_datum)
# uid2datum
self.uid2datum = {}
for datum in self.data:
self.uid2datum[datum['uid']] = datum
def evaluate(self, uid2ans: dict, pprint=False):
score = 0.
cnt = 0
dset2score = defaultdict(lambda: 0.)
dset2cnt = defaultdict(lambda: 0)
for uid, ans in uid2ans.items():
if uid not in self.uid2datum: # Not a labeled data
continue
datum = self.uid2datum[uid]
label = datum['label']
dset = datum['dset']
if ans in label:
score += label[ans]
dset2score[dset] += label[ans]
cnt += 1
dset2cnt[dset] += 1
accu = score / cnt
dset2accu = {}
for dset in dset2cnt:
dset2accu[dset] = dset2score[dset] / dset2cnt[dset]
if pprint:
accu_str = "Overall Accu %0.4f, " % (accu)
sorted_keys = sorted(dset2accu.keys())
for key in sorted_keys:
accu_str += "%s Accu %0.4f, " % (key, dset2accu[key])
print(accu_str)
return accu, dset2accu
def dump_result(self, uid2ans: dict, path):
raise NotImplemented
| 42,230 | 43.453684 | 630 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/pretrain/lxmert_pretrain.py | # coding=utf-8
# Copyleft 2019 project LXRT.
import collections
import os
import random
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import json
from param import args
from pretrain.lxmert_data import LXMERTDataset, LXMERTTorchDataset, LXMERTEvaluator
from pretrain.text_data import GeneralCorpusNP
from lxrt.tokenization import BertTokenizer
from lxrt.modeling import LXRTPretraining
from collections import defaultdict
DataTuple = collections.namedtuple("DataTuple", 'dataset torchdset loader evaluator vl_torchdset')
EvalDataTuple = collections.namedtuple("EvalDataTuple", 'dataset torchdset loader evaluator vl_torchdset textonly')
class TrainingMeter():
def __init__(self):
self.counter_dict = defaultdict(float)
self.true_dict = defaultdict(float)
def update(self, loss_dict):
for key, item in loss_dict.items():
self.counter_dict[key] += 1
self.true_dict[key] += item
def report(self):
keys = list(self.counter_dict.keys())
keys.sort()
for key in keys:
print(" {} : {:.7}".format(key, self.true_dict[key] / self.counter_dict[key]))
def clean(self):
self.counter_dict = defaultdict(float)
self.true_dict = defaultdict(float)
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import _SingleProcessDataLoaderIter, _MultiProcessingDataLoaderIter
if args.get('random_seed', None):
random.seed(args.random_seed)
np.random.seed(args.random_seed)
torch.random.manual_seed(args.random_seed)
torch.cuda.manual_seed_all(args.random_seed)
def get_tuple(splits: str, bs: int, shuffle=False, drop_last=False, topk=-1, num_workers = 0, limit_source = [], restrict_source = None) -> DataTuple:
# Decide which QA datasets would be used in pre-training.
# Options: vqa, gqa, visual7w
# Note: visual7w is a part of vgqa, we take the name here.
qa_sets = args.qa_sets
if qa_sets is not None:
qa_sets = set(qa_set.lower().strip() for qa_set in qa_sets.split(","))
# Build dataset, data loader, and evaluator.
dset = LXMERTDataset(splits, qa_sets=qa_sets)
tset = LXMERTTorchDataset(
dset,
topk,
limit_source = limit_source,
use_visual_tag_flag = args.get("allow_tag_for_eval", False) # As this function is called for evaulation in our context
)
data_loader = DataLoader(
tset, batch_size=bs,
shuffle=shuffle, num_workers=num_workers,
collate_fn= tset.custom_collact_fn if args.get('custom_collact_fn', False) else lambda x: x,
drop_last=drop_last, pin_memory=args.get("pin_memory", True)
)
evaluator = LXMERTEvaluator(dset)
print()
return DataTuple(dataset=dset, torchdset=tset, loader=data_loader, evaluator=evaluator, vl_torchdset=tset)
from lxrt.h5_data import CustomBatchSampler, ConcateDataset
def get_tuple_hybrid(splits: str, bs: int, shuffle=False, drop_last=False, num_workers=0, topk=-1, image_only_splits=None, text_only_splits = None, limit_source = [], restrict_source = None) -> DataTuple:
# Decide which QA datasets would be used in pre-training.
# Options: vqa, gqa, visual7w
# Note: visual7w is a part of vgqa, we take the name here.
qa_sets = args.qa_sets
if qa_sets is not None:
qa_sets = set(qa_set.lower().strip() for qa_set in qa_sets.split(","))
# Three type of datasets: v&l, language, vision
datasets_list_torch = []
datasets_list = []
if splits is not None:
vl_dataset = LXMERTDataset(splits, qa_sets=qa_sets)
vl_dataset_torch = LXMERTTorchDataset(vl_dataset, topk, limit_source = limit_source, randomized_pairing = args.get("randomized_pairing", False), use_visual_tag_flag = args.get("use_visual_tag_flag", False))
datasets_list.append(vl_dataset)
datasets_list_torch.append(vl_dataset_torch)
if text_only_splits is not None:
text_only_datasets = []
for split in text_only_splits.split("+"):
if not("book_corpus" in split or "sbu" in split):
text_only_dataset = LXMERTDataset(split, qa_sets=qa_sets)
text_only_dataset_torch = LXMERTTorchDataset(text_only_dataset, topk, text_only=True, limit_source=limit_source)
datasets_list.append(text_only_dataset)
datasets_list_torch.append(text_only_dataset_torch)
text_only_datasets.append(text_only_dataset_torch)
else:
text_only_dataset = None
if "book_corpus" in split and args.get("text_shared_memory", False):
text_class = GeneralCorpusNP
else:
#text_class = GeneralCorpus
pass
text_only_dataset_torch = text_class(ann_file=args.book_corpus_path if "book_corpus" in split else args.sbu_path, pretrained_model_name="bert-base-uncased", tokenizer=None, seq_len=args.get("text_only_max_seq_len", 64), min_seq_len=args.get("text_only_min_seq_len", 64), encoding="utf-8", on_memory=True)
datasets_list.append(text_only_dataset)
datasets_list_torch.append(text_only_dataset_torch)
text_only_datasets.append(text_only_dataset_torch)
if image_only_splits is not None:
if image_only_splits != "":
image_only_dataset = LXMERTDataset(image_only_splits, qa_sets=qa_sets)
image_only_dataset_torch = LXMERTTorchDataset(image_only_dataset, topk, image_only=True, use_visual_tag_flag = args.get("use_visual_tag_flag", False))
datasets_list.append(image_only_dataset)
datasets_list_torch.append(image_only_dataset_torch)
if args.get("add_adhoc_google_cc_image_only", False):
google_cc_dataset = LXMERTDataset("google_cc_train", qa_sets=qa_sets)
google_cc_dataset_torch = LXMERTTorchDataset(google_cc_dataset, topk, image_only=True, use_visual_tag_flag=args.get("use_visual_tag_flag", False), available_split_for_cc = args.get("available_split_for_cc", [0]))
datasets_list.append(google_cc_dataset)
datasets_list_torch.append(google_cc_dataset_torch)
if args.get("add_adhoc_open_image_image_only", False):
open_image_dataset = LXMERTDataset("open_images_train", qa_sets=qa_sets)
open_image_torch = LXMERTTorchDataset(open_image_dataset, topk, image_only=True, use_visual_tag_flag=args.get("use_visual_tag_flag", False))
datasets_list.append(open_image_dataset)
datasets_list_torch.append(open_image_torch)
# Merge different datasets
merged_dataset = ConcateDataset(datasets_list_torch)
if args.task_qa:
merged_dataset.answer_table = datasets_list[0].answer_table if datasets_list[0] is not None else None
batch_sampler = CustomBatchSampler(merged_dataset.datasets, bs, upsample_ratios=args.get("upsample_ratios", [1,1,1]))
try:
custom_collact_fn = datasets_list_torch[0].custom_collact_fn if args.get('custom_collact_fn', False) else lambda x: x
except:
custom_collact_fn = datasets_list_torch[-1].custom_collact_fn if args.get('custom_collact_fn', False) else lambda x: x
data_loader = DataLoader(
merged_dataset, num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=custom_collact_fn,
pin_memory=args.get("pin_memory", True)
)
if args.task_qa:
evaluator = LXMERTEvaluator(datasets_list[0]) if datasets_list[0] is not None else None # The evaluator is for task_qa so no need to have it
else:
evaluator = None
print()
if splits is not None:
vl_torchdset = vl_dataset_torch
else:
vl_torchdset = datasets_list_torch[-1] # the last dataset
return DataTuple(dataset=merged_dataset, torchdset=merged_dataset, loader=data_loader, evaluator=evaluator, vl_torchdset=vl_torchdset)
if not args.get("hybrid", False):
train_tuple = get_tuple(args.train, args.batch_size, shuffle=True, drop_last=True, num_workers=args.num_workers, limit_source = args.get("limit_source", []))
valid_batch_size = args.get("valid_batch_size", 128)
valid_tuple = get_tuple(args.valid, valid_batch_size, shuffle=False, drop_last=False, topk=5000, num_workers=args.get("val_num_workers", 2), limit_source = args.get("limit_source_for_val", []))
else:
train_tuple = get_tuple_hybrid(args.train, args.batch_size, shuffle=True, num_workers = args.num_workers, drop_last=True, image_only_splits = args.train_image_only, text_only_splits = args.get("train_text_only", None), limit_source = args.get("limit_source", []))
valid_batch_size = args.get("valid_batch_size", 128)
valid_tuple = get_tuple(args.valid, valid_batch_size, shuffle=False, num_workers = args.get("val_num_workers", 2), drop_last=False, topk=5000, limit_source = args.get("limit_source_for_val", []))
from lxmert_data import symbolic_vocab
LOSSES_NAME = ('Mask_LM', 'Matched', 'Obj', 'Attr', 'Feat', 'QA')
class LXMERT:
def __init__(self, max_seq_length):
super().__init__()
self.max_seq_length = max_seq_length
self.tokenizer = BertTokenizer.from_pretrained(
"bert-base-uncased",
do_lower_case=True
)
# Build model
self.model = LXRTPretraining.from_pretrained(
"bert-base-uncased",
args = args,
task_mask_lm=args.task_mask_lm,
task_obj_predict=args.task_obj_predict,
task_matched=args.task_matched,
task_qa=args.task_qa,
visual_losses=args.visual_losses,
num_answers= args.num_answers if args.get("num_answers", None) else train_tuple.dataset.answer_table.num_answers
)
# Weight initialization and loading
if args.from_scratch:
print("Train from Scratch: re-initialize all BERT weights.")
self.model.apply(self.model.init_bert_weights)
if args.get("use_tag_symbolic_embedding", False):
self.model.bert.embeddings.initialize_symbolic_embeddings(symbolic_vocab.get_symbolic_list(self.tokenizer))
self.model.special_initialize_pretraining_head()
if args.get("hybrid_embedding", False):
self.model.bert.embeddings.initialize_visual_position_type_embeddings()
if args.load_lxmert is not None:
# Load lxmert would not load the answer head.
self.load_lxmert(args.load_lxmert)
self.model = self.model.cuda()
if args.multiGPU:
self.model = nn.DataParallel(self.model)
self.global_step = 0
def forward(self, examples):
for index, i in enumerate(examples):
if i is not None:
if isinstance(i, dict):
for key in i:
i[key] = (i[key][0].cuda(), i[key][1].cuda())
else:
examples[index] = i.cuda()
input_ids, segment_ids, input_mask, lm_labels, feats, pos, obj_labels, matched_labels, ans, visual_feats_seg_ids, visual_tags, visual_tags_mask, visual_tags_box, visual_tags_objective, visual_tags_mismatch, visual_tags_segment_ids = examples
loss, losses, ans_logit, losses_dict = self.model(
input_ids, segment_ids, input_mask, lm_labels,
feats, pos, obj_labels, matched_labels, ans,
visual_feats_seg_ids = visual_feats_seg_ids,
visual_tags = visual_tags,
visual_tags_mask = visual_tags_mask,
visual_tags_box = visual_tags_box,
visual_tags_objective = visual_tags_objective,
visual_tags_mismatch = visual_tags_mismatch,
visual_tags_segment_ids = visual_tags_segment_ids
)
return loss, losses.detach().cpu(), ans_logit, losses_dict
def train_batch(self, optim, batch):
gradient_accumulation_steps = args.get("gradient_accumulation_steps", 1)
if (self.global_step + 1) % gradient_accumulation_steps == 0:
optim.zero_grad()
loss, losses, ans_logit, losses_dict = self.forward(batch)
if args.multiGPU:
loss = loss.mean()
losses = losses.mean(0)
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
loss.backward()
if (self.global_step + 1) % gradient_accumulation_steps == 0:
nn.utils.clip_grad_norm_(self.model.parameters(), 1.)
optim.step()
return loss.item(), losses.cpu().numpy(), ans_logit, losses_dict
def valid_batch(self, batch):
with torch.no_grad():
loss, losses, ans_logit, losses_dict = self.forward(batch)
if args.multiGPU:
loss = loss.mean()
losses = losses.mean(0)
return loss.item(), losses.cpu().numpy(), ans_logit, losses_dict
def train(self, train_tuple: DataTuple, eval_tuple: DataTuple):
train_ld = train_tuple.loader
# Optimizer
from lxrt.optimization import BertAdam
batch_per_epoch = len(train_ld)
t_total = int(batch_per_epoch * args.epochs)
warmup_ratio = args.get("warmup_ratio", 0.05)
print("Total Iters: %d" % t_total)
if args.get("t_total", None):
t_total = args.t_total
print("!! Changing to specified t_toal in args: {}".format(t_total))
self.t_total = t_total
warmup_iters = int(t_total * warmup_ratio)
print("Batch per epoch: %d" % batch_per_epoch)
print("Warm up Iters: %d" % warmup_iters)
self.optim = BertAdam(self.model.parameters(), lr=args.lr, warmup=warmup_ratio, t_total=t_total)
if args.load is not None:
self.load(args.load, t_total = t_total)
gradient_accumulation_steps = args.get("gradient_accumulation_steps", 1)
# Train
best_eval_loss = 9595.
report_every = args.get("report_every", 100)
custom_train_meter = TrainingMeter()
for epoch in range(args.epochs):
# Train
self.model.train()
total_loss = 0.
total_losses = 0.
uid2ans = {}
for batch_id, batch in enumerate(tqdm(train_ld, total=len(train_ld))):
if args.get("skip_training", False):
break
loss, losses, logit, losses_dict = self.train_batch(self.optim, batch)
total_loss += loss
try:
total_losses += losses
except:
pass
if args.task_qa and batch[0].sent is not None:
assert(0) # Not used in our experiment
score, label = logit.max(1)
for datum, l in zip(batch, label.cpu().numpy()):
uid = datum.uid
ans = train_tuple.dataset.answer_table.id2ans(l)
uid2ans[uid] = ans
for key, value in losses_dict.items():
losses_dict[key] = value.mean().item() # make the losses scalar
if "Masked LM" in losses_dict and losses_dict["Masked LM"] == 0:
del losses_dict["Masked LM"]
custom_train_meter.update(losses_dict)
if batch_id % report_every == 0 and batch_id > 0:
print("Folder: {} \n Epoch {} Iter: {}/{}".format(args.output, epoch, batch_id, len(train_ld)))
#print(pd.DataFrame(train_results[-report_every:]).mean())
custom_train_meter.report()
custom_train_meter.clean()
print()
if args.get("save_step", -1) != -1 and self.global_step != 0 and (self.global_step // gradient_accumulation_steps) % args.save_step == 0:
self.save("Step{}".format(self.global_step))
self.global_step += 1
print("The training loss for Epoch %d is %0.4f" % (epoch, total_loss / batch_per_epoch))
if args.task_qa:
train_tuple.evaluator.evaluate(uid2ans, pprint=True)
# Eval
avg_eval_loss = self.evaluate_epoch(eval_tuple, iters=-1)
if args.get("eval_on_train", False):
print("On train set")
self.evaluate_epoch(train_tuple, iters=-1)
if avg_eval_loss < best_eval_loss:
best_eval_loss = avg_eval_loss
self.save("BEST_EVAL_LOSS")
self.save("Epoch%02d" % (epoch+1))
def evaluate_epoch(self, eval_tuple: DataTuple, iters: int=-1):
self.model.eval()
eval_ld = eval_tuple.loader
total_loss = 0.
total_losses = 0.
uid2ans = {}
eval_meter = TrainingMeter()
for i, batch in enumerate(tqdm(eval_ld)):
loss, losses, logit, losses_dict = self.valid_batch(batch)
total_loss += loss
try:
total_losses += losses
except:
pass
for key, value in losses_dict.items():
losses_dict[key] = value.mean().item()
eval_meter.update(losses_dict)
if args.task_qa:
score, label = logit.max(1)
for datum, l in zip(batch, label.cpu().numpy()):
uid = datum.uid
ans = train_tuple.dataset.answer_table.id2ans(l)
uid2ans[uid] = ans
if i == iters:
break
print("Evaluation:")
eval_meter.report()
print("\n\n\n\n\n\n\n\n")
if args.task_qa:
eval_tuple.evaluator.evaluate(uid2ans, pprint=True)
return total_loss / len(eval_ld)
def evaluate_epoch_text(self, eval_tuple: DataTuple, iters: int=-1):
self.model.eval()
eval_ld = eval_tuple.textonly
total_loss = 0.
total_losses = 0.
uid2ans = {}
eval_meter = TrainingMeter()
for i, batch in enumerate(tqdm(eval_ld)):
loss, losses, logit, losses_dict = self.valid_batch(batch)
total_loss += loss
total_losses += losses
for key, value in losses_dict.items():
losses_dict[key] = value.mean().item()
eval_meter.update(losses_dict)
if args.task_qa:
score, label = logit.max(1)
for datum, l in zip(batch, label.cpu().numpy()):
uid = datum.uid
ans = train_tuple.dataset.answer_table.id2ans(l)
uid2ans[uid] = ans
if i == iters:
break
print("Evaluation text only:")
eval_meter.report()
print("\n\n\n\n\n\n\n\n")
return total_loss / len(eval_ld)
def save(self, name):
torch.save(self.model.state_dict(),
os.path.join(args.output, "%s_LXRT.pth" % name))
if args.get("save_optimizer", False) and "Step" not in name:
torch.save(self.optim.state_dict(),
os.path.join(args.output, "%s_LXRT_optimizer.pth" % name))
def load(self, path, t_total):
print("Load model from %s" % path)
state_dict = torch.load("%s_LXRT.pth" % path)
#self.model.load_state_dict(state_dict)
from qa_answer_table import load_state_dict_flexible
load_state_dict_flexible(self.model, state_dict)
optimizer_path = "{}_LXRT_optimizer.pth".format(path)
if os.path.exists(optimizer_path) and args.get("load_optimizer", True):
print("Load optimizer from {}".format(optimizer_path))
loaded_optim = torch.load(optimizer_path)
if args.get("reset_schedule", False):
for group in loaded_optim["param_groups"]:
group['lr'] = args.lr
group['warmup'] = args.warmup_ratio
group["t_total"] = t_total
for p in group['params']:
loaded_optim["state"][p]["step"]
loaded_optim["state"][p]["step"] = 0
self.optim.load_state_dict(loaded_optim)
def load_lxmert(self, path):
print("Load LXMERT model from %s" % path)
state_dict = torch.load("%s_LXRT.pth" % path)
# Do not load any answer head
for key in list(state_dict.keys()):
if 'answer' in key:
state_dict.pop(key)
# Change Multi GPU to single GPU
new_state_dict = {}
for key, value in state_dict.items():
if key.startswith("module."):
new_state_dict[key[len("module."):]] = value
state_dict = new_state_dict
load_keys = set(state_dict.keys())
model_keys = set(self.model.state_dict().keys())
print()
print("Keys in loaded but not in model:")
for key in sorted(load_keys.difference(model_keys)):
print(key)
print()
print("Keys in model but not in loaded:")
for key in sorted(model_keys.difference(load_keys)):
print(key)
print()
self.model.load_state_dict(state_dict, strict=False)
if __name__ == "__main__":
lxmert = LXMERT(max_seq_length=args.get("max_seq_length", 20))
lxmert.train(train_tuple, valid_tuple)
| 21,642 | 41.189084 | 320 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/lxrt/symbolic_vocabulary.py | from param import args
class SymbolicVocab:
def __init__(self, object_path, attribute_path, cls_token="[CLS]", sep_token="[SEP]", mask_token="[MASK]", take_fisrt = True):
attributes = []
with open(attribute_path) as f:
for line in f:
attr = line.strip("\n")
if "," in attr and take_fisrt:
attr = attr.split(",")[0]
if len(attr) != 0:
attributes.append(attr)
assert (len(attributes) == 400)
objects = []
with open(object_path) as f:
for line in f:
attr = line.strip("\n")
if "," in attr and take_fisrt:
attr = attr.split(",")[0]
if len(attr) != 0:
objects.append(attr)
assert (len(objects) == 1600)
self.attributes = attributes
self.objects = objects
self.id2word = []
self.id2word.append(cls_token)
self.id2word.append(sep_token)
self.id2word.append(mask_token)
self.id2word.extend(attributes)
self.id2word.extend(objects)
self.length_of_attribute = len(attributes)
self.word2id = {}
for index, word in enumerate(self.id2word):
self.word2id[word] = index
def __len__(self):
return self.id2word
def obj_id2word(self, index):
return self.objects[index]
def attr_id2word(self, index):
return self.attributes[index]
def get_symbolic_list(self, tokenizer):
all_subwords = []
for word in self.id2word:
all_subwords.append(tokenizer.convert_tokens_to_ids(tokenizer.tokenize(word)))
return all_subwords
def get_seg_id(self, word_id):
if word_id >= 3 and word_id < self.length_of_attribute + 3:
return 1
return 0
| 1,892 | 30.032787 | 130 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/lxrt/optimization.py | # coding=utf-8
# Copyright 2019 project LXRT
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
import logging
logger = logging.getLogger(__name__)
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x/warmup
return 0.5 * (1.0 + torch.cos(math.pi * x))
def warmup_constant(x, warmup=0.002):
""" Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps.
Learning rate is 1. afterwards. """
if x < warmup:
return x/warmup
return 1.0
def warmup_linear(x, warmup=0.002):
""" Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step.
After `t_total`-th training step, learning rate is zero. """
if x < warmup:
return x/warmup
return max((x-1.)/(warmup-1.), 0)
SCHEDULES = {
'warmup_cosine': warmup_cosine,
'warmup_constant': warmup_constant,
'warmup_linear': warmup_linear,
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
max_grad_norm=1.0):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
## Print a summary of the optimizer
print("BERTAdam lr {} total_steps {} warmup {}".format(lr, t_total, warmup))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
warned_for_t_total = False
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# LXRT: grad is clipped outside.
# Add grad clipping
# if group['max_grad_norm'] > 0:
# clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
progress = state['step']/group['t_total']
lr_scheduled = group['lr'] * schedule_fct(progress, group['warmup'])
# warning for exceeding t_total (only active with warmup_linear
if group['schedule'] == "warmup_linear" and progress > 1. and not warned_for_t_total:
logger.warning(
"Training beyond specified 't_total' steps with schedule '{}'. Learning rate set to {}. "
"Please set 't_total' of {} correctly.".format(group['schedule'], lr_scheduled, self.__class__.__name__))
warned_for_t_total = True
# end warning
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss
| 8,058 | 42.798913 | 141 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/lxrt/entry.py | # coding=utf-8
# Copyright 2021 Project Unsupervised VisualBERT
# Copyright 2019 project LXRT.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import torch.nn as nn
import numpy as np
import numpy
from lxrt.tokenization import BertTokenizer
from collections import defaultdict
def get_padding_lengths(list_of_np_array):
return_dict = defaultdict(int)
for array in list_of_np_array:
for i, shape in enumerate(array.shape):
if return_dict["dimension_{}".format(i)] < shape:
return_dict["dimension_{}".format(i)] = shape
return return_dict
def pad_np_arrays(list_of_np_array, padding_value, dtype, cuda = True):
if isinstance(list_of_np_array[0], list):
list_of_np_array = [np.array(i, dtype=dtype) for i in list_of_np_array]
if list_of_np_array[0] is None:
return None
padding_lengths = get_padding_lengths(list_of_np_array)
max_shape = [padding_lengths["dimension_{}".format(i)]
for i in range(len(padding_lengths))]
# Convert explicitly to an ndarray just in case it's an scalar (it'd end up not being an ndarray otherwise)
final_list = []
for array_index, array in enumerate(list_of_np_array):
return_array = numpy.asarray(numpy.ones(max_shape, dtype = dtype) * padding_value)
# If the tensor has a different shape from the largest tensor, pad dimensions with zeros to
# form the right shaped list of slices for insertion into the final tensor.
slicing_shape = list(array.shape)
#if len(array.shape) < len(max_shape):
# slicing_shape = slicing_shape + [0 for _ in range(len(max_shape) - len(array.shape))]
slices = tuple([slice(0, x) for x in slicing_shape])
return_array[slices] = array
final_list.append(return_array)
final_list = np.stack(final_list, 0)
tensor = torch.from_numpy(final_list)
if cuda:
return tensor.cuda()
else:
return tensor
#from param import args
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
def convert_sents_to_features(sents, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (i, sent) in enumerate(sents):
tokens_a = tokenizer.tokenize(sent.strip())
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# Keep segment id which allows loading BERT-weights.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids))
return features
def convert_sents_to_features_tensors(sents, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (i, sent) in enumerate(sents):
tokens_a = tokenizer.tokenize(sent.strip())
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# Keep segment id which allows loading BERT-weights.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids))
input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long).cuda()
input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long).cuda()
segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long).cuda()
return input_ids, input_mask, segment_ids
def convert_tags_to_tensorts(tags, cuda = True):
if tags[0] is None:
return None, None, None, None, None
visual_tags, visual_tags_mask, visual_tags_box, visual_tags_type, visual_tags_segment_ids = zip(*tags)
visual_tags = pad_np_arrays(visual_tags, padding_value=0, dtype=np.int64, cuda = cuda)
visual_tags_mask = pad_np_arrays(visual_tags_mask, padding_value=0, dtype=np.int64, cuda = cuda)
visual_tags_box = pad_np_arrays(visual_tags_box, padding_value=0, dtype=np.float32, cuda = cuda)
visual_tags_type = pad_np_arrays(visual_tags_type, padding_value=0, dtype=np.int64, cuda = cuda)
visual_tags_segment_ids = pad_np_arrays(visual_tags_segment_ids, padding_value=0, dtype=np.int64, cuda = cuda)
return visual_tags, visual_tags_mask, visual_tags_box, visual_tags_type, visual_tags_segment_ids
def convert_sent_features_to_features(sents, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (i, sent) in enumerate(sents):
tokens_a = tokenizer.tokenize(sent.strip())
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# Keep segment id which allows loading BERT-weights.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids))
return features
def set_visual_config(args, VISUAL_CONFIG):
VISUAL_CONFIG.l_layers = args.llayers
VISUAL_CONFIG.x_layers = args.xlayers
VISUAL_CONFIG.r_layers = args.rlayers
class LXRTEncoder(nn.Module):
def __init__(self, args, max_seq_length, mode='x'):
super().__init__()
self.max_seq_length = max_seq_length
from lxrt.modeling import LXRTFeatureExtraction as VisualBertForLXRFeature, VISUAL_CONFIG
set_visual_config(args, VISUAL_CONFIG)
# Using the bert tokenizer
self.tokenizer = BertTokenizer.from_pretrained(
"bert-base-uncased",
do_lower_case=True
)
# Build LXRT Model
self.model = VisualBertForLXRFeature.from_pretrained(
"bert-base-uncased",
mode=mode
)
if args.from_scratch:
print("Re-initializing all the weights")
self.model.apply(self.model.init_bert_weights)
self.load_pretrain_head = args.get("load_pretrain_head", False)
if self.load_pretrain_head:
from lxmert.src.lxrt.modeling import BertPreTrainingHeads
self.pretrained_head = BertPreTrainingHeads(self.model.config, self.model.bert.embeddings.word_embeddings.weight)
def multi_gpu(self):
self.model = nn.DataParallel(self.model)
@property
def dim(self):
return 768
def forward(self, sents, feats, visual_attention_mask=None, input_already_tokenized=False, visual_feats_seg_ids = None):
if not input_already_tokenized:
train_features = convert_sents_to_features(
sents, self.max_seq_length, self.tokenizer)
else:
train_features = sents
input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long).cuda()
input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long).cuda()
segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long).cuda()
output = self.model(input_ids, segment_ids, input_mask,
visual_feats=feats,
visual_attention_mask=visual_attention_mask,
visual_feats_seg_ids = visual_feats_seg_ids)
return output
def save(self, path):
torch.save(self.model.state_dict(),
os.path.join("%s_LXRT.pth" % path))
def load(self, path):
# Load state_dict from snapshot file
print("Load LXMERT pre-trained model from %s" % path)
state_dict = torch.load("%s_LXRT.pth" % path)
new_state_dict = {}
for key, value in state_dict.items():
if key.startswith("module."):
new_state_dict[key[len("module."):]] = value
else:
new_state_dict[key] = value
state_dict = new_state_dict
# Print out the differences of pre-trained and model weights.
load_keys = set(state_dict.keys())
model_keys = set(self.model.state_dict().keys())
print()
print("Weights in loaded but not in model:")
for key in sorted(load_keys.difference(model_keys)):
print(key)
print()
print("Weights in model but not in loaded:")
for key in sorted(model_keys.difference(load_keys)):
print(key)
print()
# Load weights to model
self.model.load_state_dict(state_dict, strict=False)
| 11,480 | 37.016556 | 125 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/lxrt/tokenization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
import collections
import logging
import os
import unicodedata
from io import open
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
}
VOCAB_NAME = 'vocab.txt'
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BertTokenizer.
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input
Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to;
Effective maximum length is always the minimum of this
value (if specified) and the underlying BERT model's
sequence length.
never_split: List of tokens which will never be split during tokenization.
Only has an effect when do_wordpiece_only=False
"""
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = max_len if max_len is not None else int(1e12)
def tokenize(self, text):
if self.do_basic_tokenize:
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
vocab_file = pretrained_model_name_or_path
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self,
do_lower_case=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in self.never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
if text in self.never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| 15,388 | 38.560411 | 133 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/lxrt/modeling.py | # coding=utf-8
# Copyright 2019 project LXRT.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch LXRT model."""
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
from copy import deepcopy
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, SmoothL1Loss
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
}
CONFIG_NAME = 'bert_config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
TF_WEIGHTS_NAME = 'model.ckpt'
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except Importtokenization:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class GeLU(nn.Module):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
def __init__(self):
super().__init__()
def forward(self, x):
return gelu(x)
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class VisualConfig(object):
VISUAL_LOSSES = ['obj', 'attr', 'feat']
def __init__(self,
l_layers=12,
x_layers=5,
r_layers=0):
from param import args
self.l_layers = args.llayers
self.x_layers = args.xlayers
self.r_layers = args.rlayers
self.visual_feat_dim = 2048
self.visual_pos_dim = 4
'''if args.get("kl_divergence", False):
self.obj_id_num = 1601
self.attr_id_num = 401
else:'''
self.obj_id_num = 1600
self.attr_id_num = 400
self.visual_losses = self.VISUAL_LOSSES
weight = 1 / 0.15
if args.get("weight_disable", False):
weight = 1.0
ce_or_kl = "kl" if args.get("kl_divergence", False) else "ce"
self.visual_loss_config = {
'obj': (self.obj_id_num, ce_or_kl, (-1,), weight),
'attr': (self.attr_id_num, ce_or_kl, (-1,), weight),
'feat': (2048, 'l2', (-1, 2048), weight),
}
try:
from param import args
self.visualbert_style = args.get('visualbert_style', False)
self.symbolic_vocab_size = args.get('symbolic_vocab_size', 2632)
self.multi_choice = args.get("multi_choice", 0)
except:
self.visualbert_style = False
def set_visual_dims(self, feat_dim, pos_dim):
self.visual_feat_dim = feat_dim
self.visual_pos_dim = pos_dim
VISUAL_CONFIG = VisualConfig()
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
BertLayerNorm = torch.nn.LayerNorm
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=0)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size, padding_idx=0)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout=nn.Dropout(config.hidden_dropout_prob)
if VISUAL_CONFIG.visualbert_style:
self.symbolic_embedding = nn.Embedding(VISUAL_CONFIG.symbolic_vocab_size + 1, config.hidden_size) # The first is reserved for masking
def forward(self, input_ids, token_type_ids=None, attribute_ids=None, symbolic_embedding=False):
if symbolic_embedding:
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.symbolic_embedding(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + token_type_embeddings
if attribute_ids is not None:
attribute_mask = (attribute_ids != 0).float()
attribute_embedding = self.symbolic_embedding(attribute_ids)
# Need to average along the latter lines
attribute_embedding = attribute_embedding * attribute_mask.unsqueeze(-1) # mask out paddings
attribute_embedding = attribute_embedding.sum(2)
length_attribute = attribute_mask.sum(2)
length_attribute[length_attribute == 0] = 1
attribute_embedding = attribute_embedding / length_attribute.unsqueeze(-1)
embeddings = embeddings + attribute_embedding
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def special_embedding(self, tokenized_words):
with torch.no_grad():
all_embeddings = []
for subwords in tokenized_words:
subwords = torch.LongTensor(subwords)
embedding = self.word_embeddings(subwords)
embedding = embedding.mean(dim=0)
all_embeddings.append(embedding)
all_embeddings = torch.stack( [torch.zeros_like(all_embeddings[0])] + all_embeddings, dim=0)
self.symbolic_embedding.weight = torch.nn.Parameter(all_embeddings)
class BertAttention(nn.Module):
def __init__(self, config, ctx_dim=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
# visual_dim = 2048
if ctx_dim is None:
ctx_dim =config.hidden_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(ctx_dim, self.all_head_size)
self.value = nn.Linear(ctx_dim, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, context, attention_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(context)
mixed_value_layer = self.value(context)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
if args.get("output_attention", False):
return context_layer, attention_probs
return context_layer
class BertAttOutput(nn.Module):
def __init__(self, config):
super(BertAttOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertCrossattLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.att = BertAttention(config)
self.output = BertAttOutput(config)
def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None):
output = self.att(input_tensor, ctx_tensor, ctx_att_mask)
attention_output = self.output(output, input_tensor)
return attention_output
class BertSelfattLayer(nn.Module):
def __init__(self, config):
super(BertSelfattLayer, self).__init__()
self.self = BertAttention(config)
self.output = BertAttOutput(config)
def forward(self, input_tensor, attention_mask):
# Self attention attends to itself, thus keys and querys are the same (input_tensor).
self_output = self.self(input_tensor, input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertSelfattLayer(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
if args.get("output_attention", False):
attention_output, attention_weights = self.attention(hidden_states, attention_mask)
else:
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if args.get("output_attention", False):
return layer_output, attention_weights
return layer_output
"""
---------------------------------------------------------------------------------------
Above modules are copied from BERT (pytorch-transformer) with modifications.
---------------------------------------------------------------------------------------
"""
class BertEmbeddingsWithVisualEmbedding(nn.Module):
"""Construct the embeddings from word, position, token_type embeddings and visual embeddings.
"""
def __init__(self, config):
super(BertEmbeddingsWithVisualEmbedding, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.symbolic_embedding = nn.Embedding(2003, config.hidden_size)
#### Below are for encoding visual features
# Segment and position embedding for image features
self.token_type_embeddings_visual = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.position_embeddings_visual = nn.Embedding(config.max_position_embeddings, config.hidden_size)
feat_dim = VISUAL_CONFIG.visual_feat_dim
pos_dim = VISUAL_CONFIG.visual_pos_dim
# Object feature encoding
self.visn_fc = nn.Linear(feat_dim, config.hidden_size)
self.visn_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
# Box position encoding
self.box_fc = nn.Linear(pos_dim, config.hidden_size)
self.box_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout=nn.Dropout(config.hidden_dropout_prob)
self.add_segment_embedding_to_visual = args.get("add_segment_embedding_to_visual", False)
self.add_segment_embedding_to_visual_tags=args.get("add_segment_embedding_to_visual_tags", False)
self.add_position_embedding_to_visual_tags=args.get("add_position_embedding_to_visual_tags", False)
self.tag_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.joint_layer_norm=args.get("joint_layer_norm", False)
self.use_segment_embedding_for_vision_and_tag=args.get("use_segment_embedding_for_vision_and_tag", False)
self.use_bert_input_for_tags=args.get('use_bert_input_for_tags', False)
self.disable_divide_2 = args.get("disable_divide_2", False)
def initialize_visual_position_type_embeddings(self):
### This is a bit unorthodox. The better way might be to add an inititilizer to AllenNLP.
# This function is used to initialize the token_type_embeddings_visual and positiona_embedding_visual, just incase.
self.token_type_embeddings_visual.weight = torch.nn.Parameter(deepcopy(self.token_type_embeddings.weight.data), requires_grad = True)
self.position_embeddings_visual.weight = torch.nn.Parameter(deepcopy(self.position_embeddings.weight.data), requires_grad = True)
return
def initialize_symbolic_embeddings(self, tokenized_words):
with torch.no_grad():
all_embeddings = []
for subwords in tokenized_words:
subwords = torch.LongTensor(subwords)
embedding = self.word_embeddings(subwords)
embedding = embedding.mean(dim=0)
all_embeddings.append(embedding)
all_embeddings = torch.stack(all_embeddings, dim=0)
self.symbolic_embedding = nn.Embedding.from_pretrained(deepcopy(all_embeddings), freeze = False)
def forward(self, input_ids, token_type_ids=None, visual_embeddings=None, visual_embeddings_type=None, position_embeddings_visual=None, image_text_alignment=None, confidence=None, position_ids=None, boxes=None, visual_tags=None, visual_tags_box=None, visual_tags_type=None, visual_tags_segment_ids=None):
if input_ids is not None:
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
text_embeddings = words_embeddings + position_embeddings + token_type_embeddings
if not self.joint_layer_norm:
text_embeddings = self.LayerNorm(text_embeddings)
else:
text_embeddings = None
if visual_tags is not None:
if self.use_bert_input_for_tags:
tag_embeddings = self.word_embeddings(visual_tags)
else:
tag_embeddings = self.symbolic_embedding(visual_tags)
if args.get("oscar_style", False):
tag_position_ids = torch.arange(visual_tags.size(1), dtype=torch.long, device=visual_tags.device)
tag_position_ids = tag_position_ids.unsqueeze(0).expand_as(visual_tags)
tag_type_ids = torch.ones_like(visual_tags)
tag_position_embeddings = self.position_embeddings_visual(tag_position_ids)
tag_type_embeddings = self.token_type_embeddings_visual(tag_type_ids)
tag_embeddings = tag_embeddings + tag_position_embeddings + tag_type_embeddings
else:
y = self.box_fc(visual_tags_box)
if not self.joint_layer_norm:
y = self.box_layer_norm(y)
tag_embeddings = self.tag_layer_norm(tag_embeddings)
if not self.disable_divide_2:
tag_embeddings = (tag_embeddings + y) / 2 # + token_type_embeddings
else:
tag_embeddings = tag_embeddings + y
if visual_tags_segment_ids is not None:
assert(self.use_segment_embedding_for_vision_and_tag)
if self.use_segment_embedding_for_vision_and_tag:
if visual_tags_segment_ids is not None:
tag_type_ids = visual_tags_segment_ids
else:
tag_type_ids = torch.zeros_like(visual_tags) # Temporary
tag_type_embeddings = self.token_type_embeddings_visual(tag_type_ids)
tag_embeddings += tag_type_embeddings
else:
tag_embeddings = None
if visual_embeddings is not None:
x = self.visn_fc(visual_embeddings)
#x = self.visn_layer_norm(x)
y = self.box_fc(boxes)
#y = self.box_layer_norm(y)
if not self.joint_layer_norm:
x = self.visn_layer_norm(x)
y = self.box_layer_norm(y)
if not self.disable_divide_2:
v_embeddings = (x + y) / 2
else:
v_embeddings = x + y
#if visual_embeddings_type is not None:
# assert(self.use_segment_embedding_for_vision_and_tag)
if self.use_segment_embedding_for_vision_and_tag:
if visual_embeddings_type is None:
visual_embeddings_type = torch.zeros(*visual_embeddings.size()[:-1], dtype=torch.long).cuda()
token_type_embeddings_visual = self.token_type_embeddings_visual(visual_embeddings_type)
v_embeddings += token_type_embeddings_visual
else:
v_embeddings = None
if args.get("joint_layer_norm", False):
# Concate the two:
embeddings = torch.cat([i for i in [text_embeddings, tag_embeddings, v_embeddings] if i is not None] , dim = 1) # concat the visual embeddings after the attentions
embeddings = self.LayerNorm(embeddings)
else:
embeddings = torch.cat([i for i in [text_embeddings, tag_embeddings, v_embeddings] if i is not None], dim=1) # concat the visual embeddings after the attentions
embeddings = self.dropout(embeddings)
return embeddings
def unfreeze_obj_feat(self):
all_modules = [
self.token_type_embeddings_visual,
self.position_embeddings_visual,
# Object feature encoding
self.visn_fc,
self.visn_layer_norm,
# Box position encoding
self.box_fc,
self.box_layer_norm,
self.dropout]
for submodule in all_modules:
for p in submodule.parameters():
p.requires_grad = True
class LXRTXLayer(nn.Module):
def __init__(self, config):
super().__init__()
# The cross-attention Layer
self.visual_attention = BertCrossattLayer(config)
# Self-attention Layers
self.lang_self_att = BertSelfattLayer(config)
self.visn_self_att = BertSelfattLayer(config)
# Intermediate and Output Layers (FFNs)
self.lang_inter = BertIntermediate(config)
self.lang_output = BertOutput(config)
self.visn_inter = BertIntermediate(config)
self.visn_output = BertOutput(config)
def cross_att(self, lang_input, lang_attention_mask, visn_input, visn_attention_mask):
# Cross Attention
lang_att_output = self.visual_attention(lang_input, visn_input, ctx_att_mask=visn_attention_mask)
visn_att_output = self.visual_attention(visn_input, lang_input, ctx_att_mask=lang_attention_mask)
return lang_att_output, visn_att_output
def self_att(self, lang_input, lang_attention_mask, visn_input, visn_attention_mask):
# Self Attention
lang_att_output = self.lang_self_att(lang_input, lang_attention_mask)
visn_att_output = self.visn_self_att(visn_input, visn_attention_mask)
return lang_att_output, visn_att_output
def output_fc(self, lang_input, visn_input):
# FC layers
lang_inter_output = self.lang_inter(lang_input)
visn_inter_output = self.visn_inter(visn_input)
# Layer output
lang_output = self.lang_output(lang_inter_output, lang_input)
visn_output = self.visn_output(visn_inter_output, visn_input)
return lang_output, visn_output
def forward(self, lang_feats, lang_attention_mask,
visn_feats, visn_attention_mask):
lang_att_output = lang_feats
visn_att_output = visn_feats
lang_att_output, visn_att_output = self.cross_att(lang_att_output, lang_attention_mask,
visn_att_output, visn_attention_mask)
lang_att_output, visn_att_output = self.self_att(lang_att_output, lang_attention_mask,
visn_att_output, visn_attention_mask)
lang_output, visn_output = self.output_fc(lang_att_output, visn_att_output)
return lang_output, visn_output
class VisualFeatEncoder(nn.Module):
def __init__(self, config):
super().__init__()
feat_dim = VISUAL_CONFIG.visual_feat_dim
pos_dim = VISUAL_CONFIG.visual_pos_dim
# Object feature encoding
self.visn_fc = nn.Linear(feat_dim, config.hidden_size)
self.visn_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
# Box position encoding
self.box_fc = nn.Linear(pos_dim, config.hidden_size)
self.box_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, visn_input):
# This is when we do not have the box_fc yet
if isinstance(visn_input, tuple) or isinstance(visn_input, list):
feats, boxes = visn_input
x = self.visn_fc(feats)
x = self.visn_layer_norm(x)
y = self.box_fc(boxes)
y = self.box_layer_norm(y)
output = (x + y) / 2
output = self.dropout(output)
return output
else:
assert(0)
x = self.visn_fc(visn_input)
x = self.visn_layer_norm(x)
return x
def _cat_with_none(feat_1, feat_2, dim):
if feat_1 is None:
return feat_2
if feat_2 is None:
return feat_1
return torch.cat((feat_1, feat_2), dim=dim)
def _split_with_none(lang_feats, visn_feats, joint_feats):
if lang_feats is None:
assert(visn_feats.size(1) == joint_feats.size(1))
return None, joint_feats
if visn_feats is None:
assert(lang_feats.size(1) == joint_feats.size(1))
return joint_feats, None
return joint_feats[:, :lang_feats.size(1), :].contiguous(), joint_feats[:, lang_feats.size(1):, :].contiguous()
class LXRTEncoder(nn.Module):
def __init__(self, config):
super().__init__()
# Obj-level image embedding layer
self.visn_fc = VisualFeatEncoder(config)
# Number of layers
self.num_l_layers = VISUAL_CONFIG.l_layers
self.num_x_layers = VISUAL_CONFIG.x_layers
self.num_r_layers = VISUAL_CONFIG.r_layers
print("LXRT encoder with %d l_layers, %d x_layers, and %d r_layers." %
(self.num_l_layers, self.num_x_layers, self.num_r_layers))
self.multi_choice = VISUAL_CONFIG.multi_choice
self.visualbert_style = VISUAL_CONFIG.visualbert_style
if self.visualbert_style:
layers = [BertLayer(config) for _ in range(self.num_l_layers)]
self.layer = nn.ModuleList(layers)
if args.get("additional_attention_layer", False):
_config = copy.deepcopy(config)
_config.intermediate_size = 768
_config.num_attention_heads = 1
#layers += [BertLayer(_config)]
self.additional_layer = BertLayer(_config)
print("\n\n!! Has {} layers".format(len(self.layer) + 1))
else:
print("\n\n!! Has {} layers".format(len(self.layer)))
return
# Layers
# Using self.layer instead of self.l_layer to support loading BERT weights.
'''self.layer = nn.ModuleList(
[BertLayer(config) for _ in range(self.num_l_layers)]
) '''
layers = [BertLayer(config) for _ in range(self.num_l_layers)]
print(args.additional_attention_layer)
assert(0)
if args.get("additional_attention_layer", False):
_config = copy.deepcopy(config)
_config.intermediate_size = 768
layers += [BertLayer(_config)]
self.layer = nn.ModuleList(layers)
print("\n\n!! Has {} layers".format(len(self.layer)))
self.x_layers = nn.ModuleList(
[LXRTXLayer(config) for _ in range(self.num_x_layers)]
)
self.r_layers = nn.ModuleList(
[BertLayer(config) for _ in range(self.num_r_layers)]
)
self.multi_choice = VISUAL_CONFIG.multi_choice
self.config = config
def forward(self,
lang_feats, lang_attention_mask,
visn_feats, visn_attention_mask=None,
bypass_visual_feat=None, bypass_mask=None,
layer_limit = -1):
# Run visual embedding layer
# Note: Word embedding layer was executed outside this module.
# Keep this design to allow loading BERT weights.
if not args.get("hybrid_embedding", False):
if args.get("symbolic", False):
visn_feats, adj = visn_feats
elif visn_feats[0] is not None:
visn_feats = self.visn_fc(visn_feats)
else:
visn_feats = None
if self.multi_choice != 0:
visn_feats = visn_feats.unsqueeze(1).expand( visn_feats.size(0), self.multi_choice, visn_feats.size(1), visn_feats.size(2))
visn_attention_mask = visn_attention_mask.unsqueeze(1).expand(visn_attention_mask.size(0), self.multi_choice, visn_attention_mask.size(1), visn_attention_mask.size(2), visn_attention_mask.size(3))
#print(visn_feats.size())
visn_feats = visn_feats.reshape((-1, visn_feats.size(2), visn_feats.size(3)))
visn_attention_mask = visn_attention_mask.reshape((-1, visn_attention_mask.size(-3), visn_attention_mask.size(-2), visn_attention_mask.size(-1)))
if self.visualbert_style:
if args.get("bypass_visual_feat", False):
joint_feats = _cat_with_none(lang_feats, visn_feats, dim=1) #torch.cat((lang_feats, visn_feats), dim=1)
joint_mask = _cat_with_none(lang_attention_mask, visn_attention_mask, dim=-1) #torch.cat((lang_attention_mask, visn_attention_mask), dim=-1)
if args.get("include_additional_layer", True):
for layer_module in self.layer[:-1]:
joint_feats = layer_module(joint_feats, joint_mask)
joint_feats = torch.cat((joint_feats, bypass_visual_feat), dim=1)
joint_feats = self.layer[-1](joint_feats, bypass_mask)
return _split_with_none(joint_feats, visn_feats, joint_feats)
else:
for layer_module in self.layer:
joint_feats = layer_module(joint_feats, joint_mask)
return torch.cat((joint_feats, bypass_visual_feat), dim = 1), None
if args.get("seperate_modeling", False):
#assert (args.get("additional_attention_layer", False))
joint_feats = _cat_with_none(lang_feats, visn_feats, dim=1) #torch.cat((lang_feats, visn_feats), dim=1)
joint_mask = _cat_with_none(lang_attention_mask, visn_attention_mask, dim=-1) #torch.cat((lang_attention_mask, visn_attention_mask), dim=-1)
if layer_limit != -1:
for layer_module in self.layer[:layer_limit]:
joint_feats = layer_module(joint_feats, joint_mask)
else:
for layer_module in self.layer:
joint_feats = layer_module(joint_feats, joint_mask)
return _split_with_none(lang_feats, visn_feats, joint_feats) #joint_feats[:, :lang_feats.size(1), :].contiguous(), joint_feats[:, lang_feats.size(1):, :].contiguous()
joint_feats = _cat_with_none(lang_feats, visn_feats, dim=1) #torch.cat((lang_feats, visn_feats), dim=1)
joint_mask = _cat_with_none(lang_attention_mask, visn_attention_mask, dim=-1) #torch.cat((lang_attention_mask, visn_attention_mask), dim=-1)
all_attention_weights = []
for layer_module in self.layer:
if args.get("output_attention", False):
joint_feats, attention_weights = layer_module(joint_feats, joint_mask)
all_attention_weights.append(attention_weights)
else:
joint_feats = layer_module(joint_feats, joint_mask)
if args.get("additional_attention_layer", False):
joint_feats = self.additional_layer(joint_feats, joint_mask)
if args.get("output_attention", False):
return _split_with_none(lang_feats, visn_feats, joint_feats), all_attention_weights
return _split_with_none(lang_feats, visn_feats, joint_feats) #joint_feats[:, :lang_feats.size(1), :].contiguous(), joint_feats[:, lang_feats.size(1):, :].contiguous()
# Run language layers
if lang_feats is not None:
for layer_module in self.layer:
lang_feats = layer_module(lang_feats, lang_attention_mask)
# Run relational layers
for layer_module in self.r_layers:
visn_feats = layer_module(visn_feats, visn_attention_mask)
# Run cross-modality layers
if lang_feats is not None:
for layer_module in self.x_layers:
lang_feats, visn_feats = layer_module(lang_feats, lang_attention_mask,
visn_feats, visn_attention_mask)
return lang_feats, visn_feats
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertVisualAnswerHead(nn.Module):
def __init__(self, config, num_answers):
super().__init__()
hid_dim = config.hidden_size
self.logit_fc = nn.Sequential(
nn.Linear(hid_dim, hid_dim * 2),
GeLU(),
BertLayerNorm(hid_dim * 2, eps=1e-12),
nn.Linear(hid_dim * 2, num_answers)
)
def forward(self, hidden_states):
return self.logit_fc(hidden_states)
class BertVisualObjHead(nn.Module):
def __init__(self, config, visual_losses):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# Decide the use of visual losses
visual_losses = visual_losses.split(",")
for loss in visual_losses:
assert loss in VISUAL_CONFIG.VISUAL_LOSSES
self.visual_losses = visual_losses
sizes = {key: VISUAL_CONFIG.visual_loss_config[key][0] for key in self.visual_losses}
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder_dict = nn.ModuleDict({
key: nn.Linear(config.hidden_size, sizes[key])
for key in self.visual_losses
})
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
output = {}
for key in self.visual_losses:
output[key] = self.decoder_dict[key](hidden_states)
return output
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
if args.get("lxmert_style_nlvr", False):
self.seq_relationship_new = nn.Linear(config.hidden_size * 2, 2)
else:
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output, calculate_seq_score = True):
prediction_scores = self.predictions(sequence_output)
if not calculate_seq_score:
return prediction_scores, None
if args.get("lxmert_style_nlvr", False):
seq_relationship_score = self.seq_relationship_new(pooled_output)
else:
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None,
from_tf=False, *inputs, **kwargs):
"""
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = pretrained_model_name_or_path
# redirect to the cache, if necessary
cache_dir = args.get("cache_dir", "/local/harold/tmp/")
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path == 'bert-base-uncased':
try:
print("The BERT-weight-downloading query to AWS was time-out;"
"trying to download from UNC servers")
archive_file = "https://nlp.cs.unc.edu/data/bert/bert-base-uncased.tar.gz"
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except EnvironmentError:
print("The weight-downloading still crashed with link: %s, "
"please check your network connection" % archive_file)
return None
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
archive_file))
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file) or from_tf:
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp(prefix="/local/harold/tmp/")
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path, map_location='cpu' if not torch.cuda.is_available() else None)
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
if from_tf:
# Directly load from a TensorFlow checkpoint
weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME)
return load_tf_weights_in_bert(model, weights_path)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
return model
from param import args
class LXRTModel(BertPreTrainedModel):
"""LXRT Model."""
def __init__(self, config):
super().__init__(config)
if args.get("hybrid_embedding", False):
self.embeddings = BertEmbeddingsWithVisualEmbedding(config)
else:
self.embeddings = BertEmbeddings(config)
self.encoder = LXRTEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self,
input_ids, token_type_ids=None, attention_mask=None,
visual_feats=None, visual_attention_mask=None, position_embeddings_visual=None,
visual_tags=None, visual_tags_mask=None, visual_tags_box=None, visual_tags_type=None, visual_tags_segment_ids=None,
visual_feats_seg_ids = None,
):
if visual_attention_mask is None and visual_feats[0] is not None:
if args.get("uneven_masks", False):
visual_attention_mask = 1 - (visual_feats[0] == 0.0).all(-1).float().to(next(self.parameters()).device)
else:
visual_attention_mask = torch.ones(visual_feats[0].size(0), visual_feats[0].size(1)).to(next(self.parameters()).device)
if attention_mask is None and input_ids is not None:
attention_mask = torch.ones_like(input_ids)
if visual_tags_mask is None and visual_tags is not None:
visual_tags_mask = torch.ones_list(visual_tags)
# Process masks
if visual_attention_mask is not None:
extended_visual_attention_mask = visual_attention_mask.unsqueeze(1).unsqueeze(2)
extended_visual_attention_mask = extended_visual_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_visual_attention_mask = (1.0 - extended_visual_attention_mask) * -10000.0
else:
extended_visual_attention_mask = None
if attention_mask is not None:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
else:
extended_attention_mask = None
if visual_tags_mask is not None:
extended_visual_tags_mask = visual_tags_mask.unsqueeze(1).unsqueeze(2)
extended_visual_tags_mask = extended_visual_tags_mask.to(dtype=next(self.parameters()).dtype)
extended_visual_tags_mask = (1.0 - extended_visual_tags_mask) * -10000.0
else:
extended_visual_tags_mask = None
embedding_output = self.embeddings(
input_ids=input_ids,
token_type_ids=None,
position_ids=None,
visual_embeddings=visual_feats[0],
boxes=visual_feats[1],
visual_embeddings_type=visual_feats_seg_ids,
position_embeddings_visual=None,
image_text_alignment=None,
confidence=None,
visual_tags=visual_tags,
visual_tags_box=visual_tags_box,
visual_tags_type=visual_tags_type,
visual_tags_segment_ids = visual_tags_segment_ids
)
concated_mask = torch.cat([ i for i in [extended_attention_mask, extended_visual_tags_mask, extended_visual_attention_mask] if i is not None], dim=-1)
# self.encoder will not distinguish between visual inputs, visual tag inputs or text inputs
if args.get("output_attention", False):
combined_feats, _, attention_weights = self.encoder(
embedding_output,
concated_mask,
visn_feats=None,
visn_attention_mask=None)
else:
combined_feats, _ = self.encoder(
embedding_output,
concated_mask,
visn_feats=None,
visn_attention_mask=None)
if attention_mask is not None:
lang_feats = combined_feats[:,:attention_mask.size(-1)]
else:
lang_feats = None
if visual_tags_mask is not None:
if attention_mask is None:
tag_feats = combined_feats[:,:visual_tags_mask.size(-1)]
else:
tag_feats = combined_feats[:, attention_mask.size(-1): attention_mask.size(-1) + visual_tags_mask.size(-1)]
else:
tag_feats = None
if visual_attention_mask is not None:
visn_feats =combined_feats[:, -visual_attention_mask.size(-1):]
else:
visn_feats = None
if lang_feats is not None:
pooled_output = self.pooler(lang_feats)
if args.get("output_attention", False):
return (lang_feats, tag_feats, visn_feats), pooled_output, attention_weights
return (lang_feats, tag_feats, visn_feats), pooled_output
else:
if args.get("output_attention", False):
return (lang_feats, tag_feats, visn_feats), None, attention_weights
return (lang_feats, tag_feats, visn_feats), None
class LXRTPretraining(BertPreTrainedModel):
def __init__(self,
config,
args=None,
task_mask_lm=True,
task_matched=True,
task_obj_predict=True,
visual_losses='',
task_qa=True,
num_answers=2):
super().__init__(config)
# Configuration
self.config = config
self.num_answers = num_answers
self.args = args
# Use of pre-training tasks
self.task_mask_lm = task_mask_lm
self.task_obj_predict = task_obj_predict
self.task_matched = task_matched
self.task_qa = task_qa
# LXRT backbone
self.bert = LXRTModel(config)
# Pre-training heads
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
if self.task_obj_predict:
self.obj_predict_head = BertVisualObjHead(config, visual_losses)
if self.task_qa:
self.answer_head = BertVisualAnswerHead(config, self.num_answers)
if args.get("use_tag_symbolic_embedding", False):
self.symbolic_head = deepcopy(self.cls)
# Weight initialization
self.apply(self.init_bert_weights)
def special_initialize_pretraining_head(self):
self.symbolic_head.predictions.decoder.weight = self.bert.embeddings.symbolic_embedding.weight
self.symbolic_head.predictions.bias = nn.Parameter(torch.zeros(self.symbolic_head.predictions.decoder.weight.size(0)))
def forward(self,
input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
visual_feats=None, pos=None, obj_labels=None,
matched_label=None, ans=None,
visual_tags=None, visual_tags_mask=None, visual_tags_box=None, visual_tags_type=None, visual_tags_objective=None, visual_tags_mismatch=None, visual_tags_segment_ids=None,
visual_feats_seg_ids=None,
return_cross_relationship_score = False
):
(lang_output, tags_output, visn_output), pooled_output = self.bert(
input_ids, token_type_ids, attention_mask,
visual_feats=(visual_feats, pos), visual_feats_seg_ids = visual_feats_seg_ids,
visual_tags=visual_tags, visual_tags_mask=visual_tags_mask, visual_tags_box=visual_tags_box, visual_tags_type = visual_tags_type, visual_tags_segment_ids = visual_tags_segment_ids
)
if input_ids is None:
answer_score = None
cross_relationship_score = None
else:
if args.get('lxmert_style_nlvr', False):
pooled_output = pooled_output.view(pooled_output.size(0) // 2, 2 * pooled_output.size(-1))
lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output)
if self.task_qa:
answer_score = self.answer_head(pooled_output)
else:
# This answer_score would not be used anywhere,
# just to keep a constant return function signature.
answer_score = pooled_output[0][0]
total_loss = 0.
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses = ()
losses_dict = {}
if masked_lm_labels is not None and self.task_mask_lm:
masked_lm_loss = loss_fct(
lang_prediction_scores.view(-1, self.config.vocab_size),
masked_lm_labels.view(-1)
)
total_loss += masked_lm_loss
losses += (masked_lm_loss.detach(),)
if visual_feats is not None:
losses_dict["Masked LM"] = masked_lm_loss.detach()
else:
losses_dict["Text Only Masked LM"] = masked_lm_loss.detach()
if matched_label is not None and self.task_matched and cross_relationship_score is not None:
matched_loss = loss_fct(
cross_relationship_score.view(-1, 2),
matched_label.view(-1)
)
total_loss += matched_loss
losses += (matched_loss.detach(),)
losses_dict["Matches"] = matched_loss.detach()
if obj_labels is not None and self.task_obj_predict and not args.get("disable_visual_and_tag_objective", False):
loss_fcts = {
'l2': SmoothL1Loss(reduction='none'),
'ce': CrossEntropyLoss(ignore_index=-1, reduction='none'),
"kl": torch.nn.KLDivLoss(reduction = "batchmean")
}
total_visn_loss = 0.
visn_prediction_scores_dict = self.obj_predict_head(visn_output)
for key in self.args.visual_losses.split(","):
label, mask_conf = obj_labels[key]
if key == "attr" or key == "obj":
label = label.long()
elif key == "feat":
label = label.float()
else:
assert(0)
output_dim, loss_fct_name, label_shape, weight = VISUAL_CONFIG.visual_loss_config[key]
visn_loss_fct = loss_fcts[loss_fct_name]
visn_prediction_scores = visn_prediction_scores_dict[key]
visn_loss = visn_loss_fct(
visn_prediction_scores.view(-1, output_dim),
label.view(*label_shape),
)
if visn_loss.dim() > 1: # Regression Losses
visn_loss = visn_loss.mean(1)
visn_loss = (visn_loss * mask_conf.view(-1)).mean() * weight
total_visn_loss += visn_loss
losses += (visn_loss.detach(),)
losses_dict[key] = visn_loss.detach()
total_loss += total_visn_loss
if ans is not None and self.task_qa and input_ids is not None:
answer_loss = loss_fct(
answer_score.view(-1, self.num_answers),
ans.view(-1)
)
# Since this Github version pre-trains with QA loss from the beginning,
# I exclude "*2" here to match the effect of QA losses.
# Previous: (loss *0) for 6 epochs, (loss *2) for 6 epochs. (Used 10 instead of 6 in EMNLP paper)
# Now : (loss *1) for 12 epochs
#
# * 2 # Multiply by 2 because > half of the data will not have label
total_loss += answer_loss
losses += (answer_loss.detach(),)
losses_dict["qa"] = answer_loss.detach()
if visual_tags_objective is not None and not args.get("disable_visual_and_tag_objective", False):
if args.get("use_bert_input_for_tags", False):
tags_output, _ = self.cls(tags_output, tags_output[:, 0], calculate_seq_score = False)
masked_tag_loss = loss_fct(
tags_output.view(-1, self.config.vocab_size),
visual_tags_objective.view(-1)
)
else:
tags_output, _ = self.symbolic_head(tags_output, tags_output[:, 0])
masked_tag_loss = loss_fct(
tags_output.view(-1, 2003),
visual_tags_objective.view(-1)
)
total_loss += masked_tag_loss
losses_dict["Masked Tags"] = masked_tag_loss.detach()
if visual_tags_mismatch is not None:
matched_loss = loss_fct(
cross_relationship_score.view(-1, 2),
visual_tags_mismatch.view(-1)
)
total_loss += matched_loss
losses += (matched_loss.detach(),)
losses_dict["Tag mismatch"] = matched_loss.detach()
if answer_score is None:
return total_loss, torch.stack(losses).unsqueeze(0), answer_score, losses_dict
return total_loss, torch.stack(losses).unsqueeze(0) if len(losses) is not None else (), answer_score.detach(), losses_dict
class LXRTFeatureExtraction(BertPreTrainedModel):
"""
BERT model for classification.
"""
def __init__(self, config, mode='lxr'):
"""
:param config:
:param mode: Number of visual layers
"""
super().__init__(config)
self.config = config
self.bert = LXRTModel(config)
self.mode = mode
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, visual_feats=None,
visual_attention_mask=None, return_both = False, visual_feats_seg_ids = None):
feat_seq, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
visual_feats=visual_feats,
visual_attention_mask=visual_attention_mask,
visual_feats_seg_ids = visual_feats_seg_ids)
if return_both:
return feat_seq, pooled_output
if 'x' == self.mode:
return pooled_output
elif 'x' in self.mode and ('l' in self.mode or 'r' in self.mode):
return feat_seq, pooled_output
elif 'l' in self.mode or 'r' in self.mode:
return feat_seq | 69,048 | 45.124916 | 308 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/lxrt/h5_data.py | import h5py
from copy import deepcopy
import numpy as np
import json
from torch.utils.data import Dataset
import torch
import random
from param import args
from tqdm import tqdm
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import gc
from src.tools import sharearray
import os
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
all_ = []
for i in range(0, len(lst), n):
data_index = lst[i:i + n]
if len(data_index) == n: # Do not do any incomplete batch
all_.append(data_index)
return all_
class CustomBatchSampler():
# We upsample certain datasets
def __init__(self, datasets, batch_size, upsample_ratios = [1, 1, 1], reduce_to_non_batch_sampler = False):
self.datasets = datasets
self.batch_size = batch_size
self.lengths = [len(i) for i in self.datasets]
self.upsample_ratios = upsample_ratios
self.rotate_index = [0] * len(self.upsample_ratios)
self.reduce_to_non_batch_sampler = reduce_to_non_batch_sampler
_flag = False
for i in self.upsample_ratios:
if i < 1:
_flag = True
self.all_indexes = [torch.randperm(i).tolist() for i in self.lengths]
assert(not args.get("old_sampler", False))
if args.get("gradient_accumulation_steps", None):
self.batch_size = batch_size * args.gradient_accumulation_steps
self.prepare_indexes()
def prepare_indexes(self):
self.all_batched_indexes = []
current_index = 0
for index, i in enumerate(self.lengths):
#if args.get("debug", False):
# random_indexes = list(range(i))
#else:
tmp_indexes = []
if self.upsample_ratios[index] < 1:
sample_num = int(1 / self.upsample_ratios[index])
random_indexes = self.all_indexes[index][self.rotate_index[index]:][::sample_num]
self.rotate_index[index] = self.rotate_index[index] + 1 #% sample_num
if self.rotate_index[index] == sample_num:
self.all_indexes[index] = torch.randperm(i).tolist()
self.rotate_index[index] = 0 # Reset rotate index
random.shuffle(random_indexes)
random_indexes = [j + current_index for j in random_indexes]
random_indexes = chunks(random_indexes, self.batch_size)
#self.all_batched_indexes.extend(random_indexes)
else:
random_indexes = torch.randperm(i).tolist()
random_indexes = [j + current_index for j in random_indexes]
random_indexes = chunks(random_indexes, self.batch_size)
#self.all_batched_indexes.extend(random_indexes)
random.shuffle(random_indexes)
self.all_batched_indexes.append(random_indexes)
if self.upsample_ratios[index] > 1:
for k in range(self.upsample_ratios[index] - 1):
#if args.get("debug", False):
# random_indexes = list(range(i))
#else:
random_indexes = torch.randperm(i).tolist()
random_indexes = [j + current_index for j in random_indexes]
random_indexes = chunks(random_indexes, self.batch_size)
#self.all_batched_indexes.extend(random_indexes)
random.shuffle(random_indexes)
self.all_batched_indexes[index].extend(random_indexes)
current_index += i
all_flatterned_indexes = []
original_recorder = [len(i) for i in self.all_batched_indexes]
original_recorder = [i / sum(original_recorder) for i in original_recorder]
index_recorder = np.array([len(i) - 1 for i in self.all_batched_indexes])
while np.any(index_recorder >= 0):
choosed_index = np.random.choice(len(original_recorder), p=original_recorder)
if index_recorder[choosed_index] >= 0:
all_flatterned_indexes.append(self.all_batched_indexes[choosed_index][index_recorder[choosed_index]])
index_recorder[choosed_index] -= 1
self.all_batched_indexes = all_flatterned_indexes
if self.reduce_to_non_batch_sampler:
new_ = []
for i in self.all_batched_indexes:
for j in i:
new_.append([j])
self.all_batched_indexes = new_
if args.get("gradient_accumulation_steps", None):
flattened_indexes = []
for indexes in self.all_batched_indexes:
flattened_indexes.extend(indexes)
self.all_batched_indexes = chunks(flattened_indexes, self.batch_size // args.gradient_accumulation_steps)
return current_index
def __iter__(self):
self.prepare_indexes()
return iter(self.all_batched_indexes)
def __len__(self):
return len(self.all_batched_indexes)
class ConcateDataset(Dataset):
def __init__(self, datasets):
self.datasets = datasets
def __getitem__(self, index):
#return self.datasets[1][0]
#
len_of_datasets = [len(i) for i in self.datasets]
for i in range(0, len(len_of_datasets)):
'''if i == len(self.len_of_datasets) - 1 and index >= self.len_of_datasets[i]:
index = index % self.len_of_datasets[i]'''
if index < len_of_datasets[i]:
return self.datasets[i][index]
else:
index -= len_of_datasets[i]
def __len__(self):
return sum([len(i) for i in self.datasets])
class ConcateH5():
def __init__(self, list_of_h5):
self.list_of_h5 = list_of_h5
self.len_of_h5 = [len(i) for i in list_of_h5]
self.current_copy_index = None
self.current_copy = None
def __getitem__(self, index):
for i in range(0, len(self.len_of_h5)):
if index < self.len_of_h5[i]:
return self.list_of_h5[i][index]
else:
index -= self.len_of_h5[i]
def __len__(self):
return sum(self.len_of_h5)
class ImageFeatureDataset():
def __init__(self, h5_features, h5_boxes, h5_objects_id, h5_objects_conf, h5_attrs_id, h5_attrs_conf, h5_wh, ids_to_index, h5_num_boxes = None, version_3 = False):
self.h5_features = h5_features
self.h5_boxes = h5_boxes
self.h5_objects_id = h5_objects_id
self.h5_objects_conf = h5_objects_conf
self.h5_attrs_id = h5_attrs_id
self.h5_attrs_conf = h5_attrs_conf
self.h5_wh = h5_wh
self.ids_to_index = ids_to_index
self.h5_num_boxes = h5_num_boxes
self.all_indexes = None
self.version_3 = version_3
def __getitem__(self, img_id):
image_index = self.ids_to_index[img_id]
if self.h5_num_boxes is not None:
obj_num = self.h5_num_boxes[image_index]
else:
obj_num = 36
feats = self.h5_features[image_index]
boxes = self.h5_boxes[image_index]
img_h = self.h5_wh[image_index][1]
img_w = self.h5_wh[image_index][0]
# For VCR, we did not keep the labels rather we kept the confidence
if self.version_3:
obj_confs = np.array(self.h5_objects_conf[image_index][:, 1:])
attr_confs = np.array(self.h5_attrs_conf[image_index][:, 1:])
obj_labels = np.argmax(obj_confs, axis=1)
attr_labels = np.argmax(attr_confs, axis=1)
obj_confs = np.max(obj_confs, axis=1)
attr_confs = np.max(attr_confs, axis = 1)
else:
obj_labels = self.h5_objects_id[image_index]
obj_confs = self.h5_objects_conf[image_index]
attr_labels = self.h5_attrs_id[image_index]
attr_confs = self.h5_attrs_conf[image_index]
return image_index, obj_num, feats, boxes, img_h, img_w, obj_labels, obj_confs, attr_labels, attr_confs
def get_everything_except_features(self, img_id):
image_index = self.ids_to_index[img_id]
obj_num = 36
#feats = self.h5_features[image_index]
boxes = self.h5_boxes[image_index]
img_h = self.h5_wh[image_index][1]
img_w = self.h5_wh[image_index][0]
obj_labels = self.h5_objects_id[image_index]
obj_confs = self.h5_objects_conf[image_index]
attr_labels = self.h5_attrs_id[image_index]
attr_confs = self.h5_attrs_conf[image_index]
return image_index, obj_num, boxes, img_h, img_w, obj_labels, obj_confs, attr_labels, attr_confs
@classmethod
def create(cls,
sources,
Split2ImgFeatPath_h5,
load_custom_h5_version2=False, load_custom_h5_version3=False,
text_only = False, on_memory=False):
current_counter = 0
ids_to_index = {}
h5_features_list = []
h5_boxes_list = []
h5_objects_id_list = []
h5_objects_conf_list = []
h5_attrs_id_list = []
h5_attrs_conf_list = []
h5_wh_list = []
h5_num_boxes_list = []
for split in sources:
if load_custom_h5_version2:
h5_features, h5_boxes, h5_objects_id, h5_objects_conf, h5_attrs_id, h5_attrs_conf, wh_list, h5_num_boxes = cls.load_custom_h5_version2(Split2ImgFeatPath_h5[split], text_only = text_only, on_memory = on_memory)
elif load_custom_h5_version3:
h5_features, h5_boxes, h5_objects_id, h5_objects_conf, h5_attrs_id, h5_attrs_conf, wh_list, h5_num_boxes = cls.load_custom_h5_version3(Split2ImgFeatPath_h5[split], on_memory = on_memory)
else:
h5_features, h5_boxes, h5_objects_id, h5_objects_conf, h5_attrs_id, h5_attrs_conf = cls.load_custom_h5(Split2ImgFeatPath_h5[split], on_memory=on_memory, text_only=text_only)
h5_num_boxes = [36] * len(h5_features)
print(Split2ImgFeatPath_h5[split], len(h5_boxes))
h5_features_list.append(h5_features)
h5_boxes_list.append(h5_boxes)
h5_objects_id_list.append(h5_objects_id)
h5_objects_conf_list.append(h5_objects_conf)
h5_attrs_id_list.append(h5_attrs_id)
h5_attrs_conf_list.append(h5_attrs_conf)
h5_num_boxes_list.append(h5_num_boxes)
if load_custom_h5_version2 or load_custom_h5_version3:
with open(Split2ImgFeatPath_h5[split].replace("h5", "txt").replace('no_features', "image_ids"), "r") as f:
image_ids = f.readlines()
for index, i in enumerate(image_ids):
# we will skip images with no boxes, might need some sanity check
if h5_num_boxes[index] == 0:
continue
ids_to_index[i.replace("\n", "")] = index + current_counter
current_counter += len(image_ids)
else:
with open(Split2ImgFeatPath_h5[split].replace("h5", "json"), "r") as f:
metadata = json.load(f)
wh_list = []
for index, i in enumerate(metadata):
ids_to_index[i["img_id"]] = index + current_counter
wh_list.append((i['img_w'], i['img_h']))
current_counter += len(metadata)
h5_wh_list.append(wh_list)
print("Created {}".format(sources))
h5_features = ConcateH5(h5_features_list)
h5_boxes = ConcateH5(h5_boxes_list)
h5_objects_id = ConcateH5(h5_objects_id_list)
h5_objects_conf = ConcateH5(h5_objects_conf_list)
h5_attrs_id = ConcateH5(h5_attrs_id_list)
h5_attrs_conf = ConcateH5(h5_attrs_conf_list)
h5_wh = ConcateH5(h5_wh_list)
h5_num_boxes_list = ConcateH5(h5_num_boxes_list)
return cls(h5_features, h5_boxes, h5_objects_id, h5_objects_conf, h5_attrs_id, h5_attrs_conf, h5_wh, ids_to_index, h5_num_boxes = h5_num_boxes_list, version_3 = load_custom_h5_version3)
@staticmethod
def load_custom_h5(h5_file_name, on_memory=False, text_only = False):
h5_file = h5py.File(h5_file_name, "r")
if on_memory:
print("Reading h5 {}".format(h5_file))
h5_features = sharearray.cache(h5_file_name.split("/")[-1], lambda: h5_file['features'])
gc.collect()
else:
h5_features = h5_file['features']
h5_boxes = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "boxes"), np.array(h5_file['boxes']))
h5_objects_id = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "objects_id"), np.array(h5_file['objects_id']))
h5_objects_conf = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "objects_conf"), np.array(h5_file['objects_conf']))
h5_attrs_id = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "attrs_id"), np.array(h5_file['attrs_id']))
h5_attrs_conf = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "attrs_conf"), np.array(h5_file['attrs_conf']))
for index in range(len(h5_attrs_id)):
assert( np.all(h5_attrs_id[index] == np.array(h5_file['attrs_id'][index])))
if on_memory:
h5_file.close()
del h5_file
gc.collect()
return h5_features, h5_boxes, h5_objects_id, h5_objects_conf, h5_attrs_id, h5_attrs_conf
@staticmethod
def load_custom_h5_version2(h5_file_name, on_memory=False, text_only=False): # This version used in the conceptual caption
if not text_only:
h5_file_feature = h5py.File(h5_file_name.replace("no_features", "features"), "r")
h5_file = h5py.File(h5_file_name, "r")
if on_memory:
print("Reading h5 {}".format(h5_file_name.replace("no_features", "features")))
h5_features = sharearray.cache(h5_file_name.replace("no_features", "features").split("/")[-1], lambda: h5_file_feature['image_features'])
gc.collect()
else:
if not text_only:
h5_features = h5_file_feature['image_features']
h5_boxes = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "boxes"), lambda: h5_file['boxes'])
h5_num_boxes = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "num_boxes"), lambda: h5_file['num_boxes'])
if not args.get("kl_divergence", False):
h5_objects_id = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "object_ids"), lambda: np.array(h5_file['object_ids'])[:, :, 0]) #deepcopy(np.array(h5_file['object_ids'])[:, :, 0])
h5_objects_conf = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "object_pro"), lambda: np.array(h5_file['object_pro'])[:, :, 0]) #deepcopy(np.array(h5_file['object_pro'])[:, :, 0])
h5_attrs_id = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "attribute_ids"), lambda: np.array(h5_file['attribute_ids'])[:, :, 0]) #deepcopy(np.array(h5_file['attribute_ids'])[:, :, 0])
h5_attrs_conf = sharearray.cache("{}_{}".format(h5_file_name.split("/")[-1], "attribute_pro"), lambda: np.array(h5_file['attribute_pro'])[:, :, 0]) #deepcopy(np.array(h5_file['attribute_pro'])[:, :, 0])
else:
h5_objects_id = deepcopy(np.array(h5_file['object_ids']))
h5_objects_conf = deepcopy(np.array(h5_file['object_pro']))
h5_attrs_id = deepcopy(np.array(h5_file['attribute_ids']))
h5_attrs_conf = deepcopy(np.array(h5_file['attribute_pro']))
gc.collect()
img_h = deepcopy(np.array(h5_file['img_h'])).tolist()
img_w = deepcopy(np.array(h5_file['img_w'])).tolist()
wh_list = []
for i in range(len(img_h)):
wh_list.append((img_w[i], img_h[i]))
h5_file.close()
del h5_file
gc.collect()
if text_only:
h5_features = [0] * len(h5_num_boxes)
return h5_features, h5_boxes, h5_objects_id, h5_objects_conf, h5_attrs_id, h5_attrs_conf, wh_list, h5_num_boxes
@staticmethod
def load_custom_h5_version3(h5_file_name, on_memory=False, keep_top_1=True): # This version used in the conceptual caption
h5_file_feature = h5py.File(h5_file_name.replace("no_features", "features"), "r")
h5_file = h5py.File(h5_file_name, "r")
if on_memory:
print("Reading h5 {}".format(h5_file_name.replace("no_features", "features")))
h5_features = sharearray.cache(h5_file_name.replace("no_features", "features").split("/")[-1], lambda: h5_file_feature['image_features'])
gc.collect()
else:
h5_features = h5_file_feature['image_features']
h5_boxes = deepcopy(np.array(h5_file['boxes']))
h5_num_boxes = deepcopy(np.array(h5_file['num_boxes']))
h5_objects_conf = h5_file['object_pro']
h5_attrs_conf = h5_file['attribute_pro']
img_h = deepcopy(np.array(h5_file['img_h'])).tolist()
img_w = deepcopy(np.array(h5_file['img_w'])).tolist()
wh_list = []
for i in range(len(img_h)):
wh_list.append((img_w[i], img_h[i]))
h5_objects_id = np.zeros(len(wh_list)) # Place holder
h5_attrs_id = np.zeros(len(wh_list)) # Place holder
return h5_features, h5_boxes, h5_objects_id, h5_objects_conf, h5_attrs_id, h5_attrs_conf, wh_list, h5_num_boxes
| 17,660 | 44.518041 | 225 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/lxrt/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import json
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_bert'))
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w', encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| 8,209 | 32.104839 | 112 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/tasks/vqa_data.py | # coding=utf-8
# Copyleft 2019 project LXRT.
import json
import os
import pickle
import numpy as np
import torch
from torch.utils.data import Dataset
import h5py
from copy import deepcopy
from param import args
from utils import load_obj_tsv
from pretrain.tag_data_utilis import create_tags
from lxrt.tokenization import BertTokenizer
from lxrt.h5_data import ImageFeatureDataset
# Load part of the dataset for fast checking.
# Notice that here is the number of images instead of the number of data,
# which means all related data to the images would be used.
TINY_IMG_NUM = 512
FAST_IMG_NUM = 5000
# The path to data and image features.
VQA_DATA_ROOT = 'data/vqa/'
MSCOCO_IMGFEAT_ROOT = 'data/mscoco_imgfeat/'
SPLIT2NAME = {
'train': 'train2014',
'valid': 'val2014',
'minival': 'val2014',
'nominival': 'val2014',
'test': 'test2015',
}
Split2ImgFeatPath = {
'train': 'data/mscoco_imgfeat/train2014_obj36.h5',
'valid': 'data/mscoco_imgfeat/val2014_obj36.h5',
'minival': 'data/mscoco_imgfeat/val2014_obj36.h5',
'nominival': 'data/mscoco_imgfeat/val2014_obj36.h5',
"test": 'data/mscoco_imgfeat/test2015_obj36.h5',
}
class VQADataset:
"""
A VQA data example in json file:
{
"answer_type": "other",
"img_id": "COCO_train2014_000000458752",
"label": {
"net": 1
},
"question_id": 458752000,
"question_type": "what is this",
"sent": "What is this photo taken looking through?"
}
"""
def __init__(self, splits: str):
self.name = splits
self.splits = splits.split(',')
# Loading datasets
self.data = []
for split in self.splits:
self.data.extend(json.load(open("data/vqa/%s.json" % split)))
print("Load %d data from split(s) %s." % (len(self.data), self.name))
# Convert list to dict (for evaluation)
self.id2datum = {
datum['question_id']: datum
for datum in self.data
}
# Answers
self.ans2label = json.load(open("data/vqa/trainval_ans2label.json"))
self.label2ans = json.load(open("data/vqa/trainval_label2ans.json"))
assert len(self.ans2label) == len(self.label2ans)
@property
def num_answers(self):
return len(self.ans2label)
def __len__(self):
return len(self.data)
class ConcateH5():
def __init__(self, list_of_h5):
self.list_of_h5 = list_of_h5
self.len_of_h5 = [len(i) for i in list_of_h5]
def __getitem__(self, index):
for i in range(0, len(self.len_of_h5)):
if index < self.len_of_h5[i]:
return self.list_of_h5[i][index]
else:
index -= self.len_of_h5[i]
def __len__(self):
return sum(self.len_of_h5)
"""
An example in obj36 tsv:
FIELDNAMES = ["img_id", "img_h", "img_w", "objects_id", "objects_conf",
"attrs_id", "attrs_conf", "num_boxes", "boxes", "features"]
FIELDNAMES would be keys in the dict returned by load_obj_tsv.
"""
mapping_rawdataset_name_to_json = {
"train": "train",
"nominival": "val",
"minival": "val"
}
class VQATorchDataset(Dataset):
def __init__(self, dataset: VQADataset, args):
super().__init__()
self.raw_dataset = dataset
if args.tiny:
topk = TINY_IMG_NUM
elif args.fast:
topk = FAST_IMG_NUM
else:
topk = None
self.limit_to_symbolic_split = args.get("limit_to_symbolic_split", False)
if self.limit_to_symbolic_split:
dataDir = "/local/harold/ubert/bottom-up-attention/data/vg/"
coco_ids = set()
self.mapping_cocoid_to_imageid = {}
with open(os.path.join(dataDir, 'image_data.json')) as f:
metadata = json.load(f)
for item in metadata:
if item['coco_id']:
coco_ids.add(int(item['coco_id']))
self.mapping_cocoid_to_imageid[int(item['coco_id'])] = item["image_id"]
from lib.data.vg_gqa import vg_gqa
self.vg_gqa = vg_gqa(None, split = "val" if self.raw_dataset.name == "minival" else "train", transforms=None, num_im=-1)
self.custom_coco_data = args.get("custom_coco_data", False)
self.use_h5_file = args.get("use_h5_file", False)
if self.use_h5_file:
self.image_feature_dataset = ImageFeatureDataset.create(dataset.splits, Split2ImgFeatPath, on_memory = args.get("on_memory", False))
self.ids_to_index = self.image_feature_dataset.ids_to_index
# Screen data
used_data = []
for datum in self.raw_dataset.data:
if datum['img_id'] in self.ids_to_index:
used_data.append(datum)
else:
# Loading detection features to img_data
img_data = []
for split in dataset.splits:
# Minival is 5K images in MS COCO, which is used in evaluating VQA/LXMERT-pre-training.
# It is saved as the top 5K features in val2014_***.tsv
load_topk = 5000 if (split == 'minival' and topk is None) else topk
img_data.extend(load_obj_tsv(
os.path.join(MSCOCO_IMGFEAT_ROOT, '%s_obj36.tsv' % (SPLIT2NAME[split])),
topk=load_topk))
# Convert img list to dict
self.imgid2img = {}
for img_datum in img_data:
self.imgid2img[img_datum['img_id']] = img_datum
used_data = self.raw_dataset.data
used_data = used_data[::args.get("partial_dataset", 1)]
self.data = used_data
# Only kept the data with loaded image features
print("Use %d data in torch dataset" % (len(self.data)))
print()
if args.get("add_tags", False):
self.tokenizer = BertTokenizer.from_pretrained(
"bert-base-uncased",
do_lower_case=True
)
from lxrt.symbolic_vocabulary import SymbolicVocab
self.symbolic_vocab = SymbolicVocab(args.objects_vocab, args.attributes_vocab)
def load_custom_h5(self, h5_file):
h5_features = h5_file['features']
h5_boxes = deepcopy(np.array(h5_file['boxes']))
h5_objects_id = deepcopy(np.array(h5_file['objects_id']))
h5_objects_conf = deepcopy(np.array(h5_file['objects_conf']))
h5_attrs_id = deepcopy(np.array(h5_file['attrs_id']))
h5_attrs_conf = deepcopy(np.array(h5_file['attrs_conf']))
return h5_features, h5_boxes, h5_objects_id, h5_objects_conf, h5_attrs_id, h5_attrs_conf
def __len__(self):
return len(self.data)
def __getitem__(self, item: int):
datum = self.data[item]
img_id = datum['img_id']
ques_id = datum['question_id']
ques = datum['sent']
if self.custom_coco_data:
image_index = self.ids_to_index[img_id]
obj_num = None
feats = self.h5_features[image_index]
boxes = self.h5_boxes[image_index]
img_h = self.h5_wh[image_index][1]
img_w = self.h5_wh[image_index][0]
obj_confs = None
attr_labels = None
attr_confs = None
elif self.use_h5_file:
'''image_index = self.ids_to_index[img_id]
obj_num = 36
feats = self.h5_features[image_index]
boxes = self.h5_boxes[image_index]
img_h = self.h5_wh[image_index][1]
img_w = self.h5_wh[image_index][0] '''
image_index, obj_num, feats, boxes, img_h, img_w, obj_labels, obj_confs, attr_labels, attr_confs = self.image_feature_dataset[img_id]
else:
# Get image info
img_info = self.imgid2img[img_id]
obj_num = img_info['num_boxes']
feats = img_info['features'].copy()
boxes = img_info['boxes'].copy()
assert obj_num == len(boxes) == len(feats)
img_h, img_w = img_info['img_h'], img_info['img_w']
# Normalize the boxes (to 0 ~ 1)
boxes = boxes.copy()
boxes[:, (0, 2)] /= img_w
boxes[:, (1, 3)] /= img_h
np.testing.assert_array_less(boxes, 1+1e-5)
np.testing.assert_array_less(-boxes, 0+1e-5)
if args.get("add_tags", False):
tags = create_tags(obj_labels=obj_labels, attr_labels=attr_labels, obj_confs=None, attr_confs=None, tokenizer=self.tokenizer, symbolic_vocab = self.symbolic_vocab, visual_tags_box = boxes, use_bert_input=True)
else:
tags = None
# Provide label (target)
if 'label' in datum:
label = datum['label']
target = torch.zeros(self.raw_dataset.num_answers)
for ans, score in label.items():
target[self.raw_dataset.ans2label[ans]] = score
return ques_id, feats, boxes, ques, tags, target
else:
return ques_id, feats, boxes, ques, tags
class VQAEvaluator:
def __init__(self, dataset: VQADataset):
self.dataset = dataset
def evaluate(self, quesid2ans: dict):
score = 0.
for quesid, ans in quesid2ans.items():
datum = self.dataset.id2datum[quesid]
label = datum['label']
if ans in label:
score += label[ans]
return score / len(quesid2ans)
def dump_result(self, quesid2ans: dict, path):
"""
Dump results to a json file, which could be submitted to the VQA online evaluation.
VQA json file submission requirement:
results = [result]
result = {
"question_id": int,
"answer": str
}
:param quesid2ans: dict of quesid --> ans
:param path: The desired path of saved file.
"""
with open(path, 'w') as f:
result = []
for ques_id, ans in quesid2ans.items():
result.append({
'question_id': ques_id,
'answer': ans
})
json.dump(result, f, indent=4, sort_keys=True)
| 10,280 | 34.329897 | 221 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/tasks/vqa_model.py | # coding=utf-8
# Copyleft 2019 project LXRT.
import torch.nn as nn
from param import args
from lxrt.entry import LXRTEncoder, convert_sents_to_features_tensors, convert_tags_to_tensorts, pad_np_arrays
from lxrt.modeling import BertLayerNorm, GeLU
from lxrt.tokenization import BertTokenizer
import numpy as np
# Max length including <bos> and <eos>
MAX_VQA_LENGTH = 20
class VQAModel(nn.Module):
def __init__(self, num_answers):
super().__init__()
# Build LXRT encoder
self.lxrt_encoder = LXRTEncoder(
args,
max_seq_length=MAX_VQA_LENGTH
)
hid_dim = self.lxrt_encoder.dim
# VQA Answer heads
self.logit_fc = nn.Sequential(
nn.Linear(hid_dim, hid_dim * 2),
GeLU(),
BertLayerNorm(hid_dim * 2, eps=1e-12),
nn.Linear(hid_dim * 2, num_answers)
)
self.logit_fc.apply(self.lxrt_encoder.model.init_bert_weights)
self.tokenizer = BertTokenizer.from_pretrained(
"bert-base-uncased",
do_lower_case=True
)
def multi_gpu(self):
self.lxrt_encoder.model.module.bert = nn.DataParallel(self.lxrt_encoder.model.module.bert)
def forward(self, feat, pos, sent, tags):
"""
b -- batch_size, o -- object_number, f -- visual_feature_size
:param feat: (b, o, f)
:param pos: (b, o, 4)
:param sent: (b,) Type -- list of string
:param leng: (b,) Type -- int numpy array
:return: (b, num_answer) The logit of each answers.
"""
#x = self.lxrt_encoder(sent, (feat, pos))
input_ids, input_mask, segment_ids = convert_sents_to_features_tensors(sent, max_seq_length = MAX_VQA_LENGTH, tokenizer=self.tokenizer)
visual_tags, visual_tags_mask, visual_tags_box, visual_tags_type, visual_tags_segment_ids = convert_tags_to_tensorts(tags)
feat = pad_np_arrays(feat, padding_value=0, dtype=np.float32)
pos = pad_np_arrays(pos, padding_value=0, dtype=np.float32)
stuff, pooled_output = self.lxrt_encoder.model.module.bert(
input_ids, segment_ids, input_mask,
visual_feats=(feat, pos),
visual_attention_mask=None,
visual_feats_seg_ids=None,
visual_tags=visual_tags, visual_tags_mask=visual_tags_mask, visual_tags_box=visual_tags_box, visual_tags_type=visual_tags_type, visual_tags_segment_ids=visual_tags_segment_ids,
)
logit = self.logit_fc(pooled_output)
return logit
| 2,612 | 34.310811 | 192 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/tasks/vqa.py | # coding=utf-8
# Copyleft 2019 project LXRT.
import os
import collections
import torch
import torch.nn as nn
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import h5py
import pandas as pd
from param import args
from pretrain.qa_answer_table import load_lxmert_qa, load_lxmert_from_sgg_and_lxmert_pretrain, load_lxmert_from_pretrain_noqa
from tasks.vqa_model import VQAModel
from tasks.vqa_data import VQADataset, VQATorchDataset, VQAEvaluator
from utils import load_lxmert_sgg
DataTuple = collections.namedtuple("DataTuple", 'dataset loader evaluator')
def get_data_tuple(splits: str, bs:int, shuffle=False, drop_last=False) -> DataTuple:
dset = VQADataset(splits)
tset = VQATorchDataset(dset, args)
evaluator = VQAEvaluator(dset)
data_loader = DataLoader(
tset, batch_size=bs,
shuffle=shuffle, num_workers=args.num_workers,
drop_last=drop_last, pin_memory=True,
collate_fn=lambda x: x
)
return DataTuple(dataset=dset, loader=data_loader, evaluator=evaluator)
class VQA:
def __init__(self):
# Datasets
self.train_tuple = get_data_tuple(
args.train, bs=args.batch_size, shuffle=True, drop_last=True
)
if args.valid != "":
valid_bsize = args.get("valid_batch_size", 16)
self.valid_tuple = get_data_tuple(
args.valid, bs=valid_bsize,
shuffle=False, drop_last=False
)
else:
self.valid_tuple = None
# Model
self.model = VQAModel(self.train_tuple.dataset.num_answers)
# Load pre-trained weights
if args.load_lxmert is not None:
self.model.lxrt_encoder.load(args.load_lxmert)
if args.get("load_lxmert_pretrain", None) is not None:
load_lxmert_from_pretrain_noqa(args.load_lxmert_pretrain, self.model)
if args.load_lxmert_qa is not None:
load_lxmert_qa(args.load_lxmert_qa, self.model,
label2ans=self.train_tuple.dataset.label2ans)
# GPU options
self.model = self.model.cuda()
if args.multiGPU:
self.model.lxrt_encoder.multi_gpu()
self.model.multi_gpu()
# Loss and Optimizer
self.bce_loss = nn.BCEWithLogitsLoss()
if 'bert' in args.optim:
batch_per_epoch = len(self.train_tuple.loader)
t_total = int(batch_per_epoch * args.epochs)
print("BertAdam Total Iters: %d" % t_total)
from lxrt.optimization import BertAdam
self.optim = BertAdam(list(self.model.parameters()),
lr=args.lr,
warmup=0.1,
t_total=t_total)
else:
self.optim = args.optimizer(self.model.parameters(), args.lr)
# Output Directory
self.output = args.output
os.makedirs(self.output, exist_ok=True)
def train(self, train_tuple, eval_tuple):
dset, loader, evaluator = train_tuple
iter_wrapper = (lambda x: tqdm(x, total=len(loader))) if args.tqdm else (lambda x: x)
best_valid = 0.
train_results = []
report_every = args.get("report_every", 100)
for epoch in range(args.epochs):
quesid2ans = {}
for i, batch in iter_wrapper(enumerate(loader)):
ques_id, feats, boxes, sent, tags, target = zip(*batch)
self.model.train()
self.optim.zero_grad()
target = torch.stack(target).cuda()
logit = self.model(feats, boxes, sent, tags)
assert logit.dim() == target.dim() == 2
loss = self.bce_loss(logit, target)
loss = loss * logit.size(1)
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), 5.)
self.optim.step()
train_results.append(pd.Series({"loss":loss.detach().mean().item()}))
score, label = logit.max(1)
for qid, l in zip(ques_id, label.cpu().numpy()):
ans = dset.label2ans[l]
quesid2ans[qid] = ans
if i % report_every == 0 and i > 0:
print("Epoch: {}, Iter: {}/{}".format(epoch, i, len(loader)))
print(" {}\n~~~~~~~~~~~~~~~~~~\n".format(pd.DataFrame(train_results[-report_every:]).mean()))
log_str = "\nEpoch %d: Train %0.2f\n" % (epoch, evaluator.evaluate(quesid2ans) * 100.)
if self.valid_tuple is not None: # Do Validation
valid_score = self.evaluate(eval_tuple)
if valid_score > best_valid and not args.get("special_test", False):
best_valid = valid_score
self.save("BEST")
log_str += "Epoch %d: Valid %0.2f\n" % (epoch, valid_score * 100.) + \
"Epoch %d: Best %0.2f\n" % (epoch, best_valid * 100.)
if epoch >= 5:
self.save("Epoch{}".format(epoch))
print(log_str, end='')
print(args.output)
self.save("LAST")
def predict(self, eval_tuple: DataTuple, dump=None):
"""
Predict the answers to questions in a data split.
:param eval_tuple: The data tuple to be evaluated.
:param dump: The path of saved file to dump results.
:return: A dict of question_id to answer.
"""
self.model.eval()
dset, loader, evaluator = eval_tuple
quesid2ans = {}
for i, batch in enumerate(tqdm(loader)):
_ = list(zip(*batch))
ques_id, feats, boxes, sent, tags = _[:5]#, target = zip(*batch)
with torch.no_grad():
#target = torch.stack(target).cuda()
logit = self.model(feats, boxes, sent, tags)
score, label = logit.max(1)
for qid, l in zip(ques_id, label.cpu().numpy()):
ans = dset.label2ans[l]
quesid2ans[qid] = ans
if dump is not None:
evaluator.dump_result(quesid2ans, dump)
return quesid2ans
def evaluate(self, eval_tuple: DataTuple, dump=None):
"""Evaluate all data in data_tuple."""
quesid2ans = self.predict(eval_tuple, dump)
return eval_tuple.evaluator.evaluate(quesid2ans)
@staticmethod
def oracle_score(data_tuple):
dset, loader, evaluator = data_tuple
quesid2ans = {}
for i, (ques_id, feats, boxes, sent, target) in enumerate(loader):
_, label = target.max(1)
for qid, l in zip(ques_id, label.cpu().numpy()):
ans = dset.label2ans[l]
quesid2ans[qid.item()] = ans
return evaluator.evaluate(quesid2ans)
def save(self, name):
torch.save(self.model.state_dict(),
os.path.join(self.output, "%s.pth" % name))
def load(self, path):
print("Load model from %s" % path)
state_dict = torch.load("%s.pth" % path)
self.model.load_state_dict(state_dict)
if __name__ == "__main__":
# Build Class
vqa = VQA()
# Load VQA model weights
# Note: It is different from loading LXMERT pre-trained weights.
if args.load is not None:
vqa.load(args.load)
# Test or Train
if args.test is not None:
args.fast = args.tiny = False # Always loading all data in test
if 'test' in args.test:
vqa.predict(
get_data_tuple(args.test, bs=64,
shuffle=False, drop_last=False),
dump=os.path.join(args.output, 'test_predict.json')
)
elif 'val' in args.test:
# Since part of valididation data are used in pre-training/fine-tuning,
# only validate on the minival set.
result = vqa.evaluate(
get_data_tuple('minival', bs=64,
shuffle=False, drop_last=False),
dump=os.path.join(args.output, 'minival_predict.json')
)
print(result)
else:
assert False, "No such test option for %s" % args.test
else:
print('Splits in Train data:', vqa.train_tuple.dataset.splits)
if vqa.valid_tuple is not None:
print('Splits in Valid data:', vqa.valid_tuple.dataset.splits)
#print("Valid Oracle: %0.2f" % (vqa.oracle_score(vqa.valid_tuple) * 100))
else:
print("DO NOT USE VALIDATION")
vqa.train(vqa.train_tuple, vqa.valid_tuple)
| 8,707 | 36.86087 | 125 | py |
visualbert | visualbert-master/unsupervised_visualbert/data/vg_gqa_imgfeat/extract_gqa_image.py | # !/usr/bin/env python
# The root of bottom-up-attention repo. Do not need to change if using provided docker file.
BUTD_ROOT = '/opt/butd/'
import os, sys
sys.path.insert(0, BUTD_ROOT + "/tools")
os.environ['GLOG_minloglevel'] = '2'
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from fast_rcnn.test import im_detect, _get_blobs
from fast_rcnn.nms_wrapper import nms
import caffe
import argparse
import pprint
import base64
import numpy as np
import cv2
import csv
from tqdm import tqdm
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ["img_id", "img_h", "img_w", "objects_id", "objects_conf",
"attrs_id", "attrs_conf", "num_boxes", "boxes", "features"]
# Settings for the number of features per image. To re-create pretrained features with 36 features
# per image, set both values to 36.
MIN_BOXES = 36
MAX_BOXES = 36
def load_image_ids(img_root):
pathXid = []
for name in os.listdir(img_root):
idx = name.split(".")[0]
pathXid.append(
(
os.path.join(img_root, name),
idx))
return pathXid
def generate_tsv(prototxt, weights, image_ids, outfile):
# First check if file exists, and if it is complete
# never use set, it loses the order!!! F***
wanted_ids = set([image_id[1] for image_id in image_ids])
found_ids = set()
if os.path.exists(outfile):
with open(outfile, "r") as tsvfile:
reader = csv.DictReader(tsvfile, delimiter='\t', fieldnames=FIELDNAMES)
for item in reader:
found_ids.add(item['img_id'])
missing = wanted_ids - found_ids
if len(missing) == 0:
print('already completed {:d}'.format(len(image_ids)))
else:
print('missing {:d}/{:d}'.format(len(missing), len(image_ids)))
if len(missing) > 0:
caffe.set_mode_gpu()
caffe.set_device(0)
net = caffe.Net(prototxt, caffe.TEST, weights=weights)
with open(outfile, 'ab') as tsvfile:
writer = csv.DictWriter(tsvfile, delimiter='\t', fieldnames=FIELDNAMES)
for im_file, image_id in tqdm(image_ids):
if image_id in missing:
try:
writer.writerow(get_detections_from_im(net, im_file, image_id))
except Exception as e:
print(e)
def get_detections_from_im(net, im_file, image_id, conf_thresh=0.2):
"""
:param net:
:param im_file: full path to an image
:param image_id:
:param conf_thresh:
:return: all information from detection and attr prediction
"""
im = cv2.imread(im_file)
scores, boxes, attr_scores, rel_scores = im_detect(net, im)
# Keep the original boxes, don't worry about the regresssion bbox outputs
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
blobs, im_scales = _get_blobs(im, None)
cls_boxes = rois[:, 1:5] / im_scales[0]
cls_prob = net.blobs['cls_prob'].data
attr_prob = net.blobs['attr_prob'].data
pool5 = net.blobs['pool5_flat'].data
# Keep only the best detections
max_conf = np.zeros((rois.shape[0]))
for cls_ind in range(1, cls_prob.shape[1]):
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
keep = np.array(nms(dets, cfg.TEST.NMS))
max_conf[keep] = np.where(cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep])
keep_boxes = np.where(max_conf >= conf_thresh)[0]
if len(keep_boxes) < MIN_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MIN_BOXES]
elif len(keep_boxes) > MAX_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MAX_BOXES]
objects = np.argmax(cls_prob[keep_boxes][:, 1:], axis=1)
objects_conf = np.max(cls_prob[keep_boxes][:, 1:], axis=1)
attrs = np.argmax(attr_prob[keep_boxes][:, 1:], axis=1)
attrs_conf = np.max(attr_prob[keep_boxes][:, 1:], axis=1)
return {
"img_id": image_id,
"img_h": np.size(im, 0),
"img_w": np.size(im, 1),
"objects_id": base64.b64encode(objects), # int64
"objects_conf": base64.b64encode(objects_conf), # float32
"attrs_id": base64.b64encode(attrs), # int64
"attrs_conf": base64.b64encode(attrs_conf), # float32
"num_boxes": len(keep_boxes),
"boxes": base64.b64encode(cls_boxes[keep_boxes]), # float32
"features": base64.b64encode(pool5[keep_boxes]) # float32
}
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Generate bbox output from a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id(s) to use',
default='0', type=str)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--out', dest='outfile',
help='output filepath',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--imgroot', type=str, default='/workspace/images/')
parser.add_argument('--split', type=str, default='valid')
parser.add_argument('--caffemodel', type=str, default='./resnet101_faster_rcnn_final_iter_320000.caffemodel')
args = parser.parse_args()
return args
if __name__ == '__main__':
# Setup the configuration, normally do not need to touch these:
args = parse_args()
args.cfg_file = BUTD_ROOT + "experiments/cfgs/faster_rcnn_end2end_resnet.yml" # s = 500
args.prototxt = BUTD_ROOT + "models/vg/ResNet-101/faster_rcnn_end2end_final/test.prototxt"
args.outfile = "%s_obj36.tsv" % "vg_gqa"
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
assert cfg.TEST.HAS_RPN
# Load image ids, need modification for new datasets.
image_ids = load_image_ids(args.imgroot)
# Generate TSV files, noramlly do not need to modify
generate_tsv(args.prototxt, args.caffemodel, image_ids, args.outfile)
| 6,511 | 35.58427 | 113 | py |
visualbert | visualbert-master/unsupervised_visualbert/data/nlvr2/nlvr/nlvr2/eval/compute_category_accuracy.py | import json
import numpy as np
import sys
# Preds file
preds = dict()
with open(sys.argv[1]) as infile:
for line in infile:
identifier, assignment = line.strip().split(',')
preds[identifier] = assignment
# Annotations file
sent_to_annot = dict()
categories = set()
with open(sys.argv[2]) as infile:
examples = [example for example in infile.read().split('\n\n') if example.strip()]
print('Loaded %d annotated examples.' % len(examples))
for example in examples:
lines = example.split('\n')
sent = lines[0]
sent_to_annot[sent] = list()
for category in lines[1:]:
category = category[2:]
sent_to_annot[sent].append(category)
categories.add(category)
print('Found %d categories.' % len(categories))
category_corrects = dict()
for category in categories:
category_corrects[category] = list()
# Labels file
with open(sys.argv[3]) as infile:
for line in infile:
example = json.loads(line)
identifier = example["identifier"]
label = example["label"].lower()
sentence = example["sentence"]
assignment = preds[identifier].lower()
if assignment in {'true', 'false'}:
correct = int(assignment == label)
else:
raise ValueError('Assignment is not true/false: ' + assignment)
if sentence in sent_to_annot:
categories = sent_to_annot[sentence]
for category in categories:
category_corrects[category].append(correct)
print('Per-category accuracy:')
for category, corrects in sorted(category_corrects.items(), key = lambda x: x[0]):
print(category + ': ' + '{0:.2f}'.format(100. * np.mean(np.array(corrects))) + ' (of %d examples)' % len(corrects))
| 1,791 | 29.896552 | 119 | py |
visualbert | visualbert-master/unsupervised_visualbert/data/nlvr2/nlvr/nlvr2/eval/metrics.py | import json
import sys
# Load the predictions file. Assume it is a CSV.
predictions = { }
for line in open(sys.argv[1]).readlines():
if line:
splits = line.strip().split(",")
# We assume identifiers are in the format "split-####-#-#.png".
identifier = splits[0]
prediction = splits[1]
predictions[identifier] = prediction
# Load the labeled examples.
labeled_examples = [json.loads(line) for line in open(sys.argv[2]).readlines() if line]
# If not, identify the ones that are missing, and exit.
total_num = len(labeled_examples)
if len(predictions) < total_num:
print("Some predictions are missing!")
print("Got " + str(len(predictions)) + " predictions but expected " + str(total_num))
for example in labeled_examples:
lookup = example["identifier"]
if not lookup in predictions:
print("Missing prediction for item " + str(lookup))
exit()
# Get the precision by iterating through the examples and checking the value
# that was predicted.
# Also update the "consistency" dictionary that keeps track of whether all
# predictions for a given sentence were correct.
num_correct = 0.
consistency_dict = { }
for example in labeled_examples:
anon_label = example["identifier"].split("-")
anon_label[2] = ''
anon_label = '-'.join(anon_label)
if not anon_label in consistency_dict:
consistency_dict[anon_label] = True
lookup = example["identifier"]
prediction = predictions[lookup]
if prediction.lower() == example["label"].lower():
num_correct += 1.
else:
consistency_dict[anon_label] = False
# Calculate consistency.
num_consistent = 0.
unique_sentence = len(consistency_dict)
for identifier, consistent in consistency_dict.items():
if consistent:
num_consistent += 1
# Report values.
print("accuracy=" + str(num_correct / total_num))
print("consistency=" + str(num_consistent / unique_sentence))
| 1,887 | 30.466667 | 87 | py |
visualbert | visualbert-master/unsupervised_visualbert/data/nlvr2/nlvr/nlvr2/eval/compute_filtered_accuracy.py | import json
import numpy as np
import sys
# Preds file
preds = dict()
with open(sys.argv[1]) as infile:
for line in infile:
identifier, assignment = line.strip().split(',')
preds[identifier] = assignment
# Labels file
corrects = list()
with open(sys.argv[2]) as infile:
for line in infile:
example = json.loads(line)
identifier = example["identifier"]
label = example["label"].lower()
assignment = preds[identifier].lower()
if assignment in {'true','false'}:
corrects.append(int(assignment == label))
else:
print(assignment)
print(100. * np.mean(np.array(corrects)))
| 666 | 24.653846 | 56 | py |
visualbert | visualbert-master/unsupervised_visualbert/data/nlvr2/nlvr/nlvr2/util/download_images.py | import imagehash
import json
import os
import progressbar
import signal
import socket
import sys
import requests
from PIL import Image
json_file = sys.argv[1]
save_dir = sys.argv[2]
split_name = json_file.split(".")[0]
HEADER = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
TIMEOUT = 2 # Timeout of 2 sections.
examples = [json.loads(line) for line in open(json_file).readlines()]
class Timeout():
"""Timeout class using ALARM signal."""
class Timeout(Exception):
pass
def __init__(self, sec):
self.sec = sec
def __enter__(self):
signal.signal(signal.SIGALRM, self.raise_timeout)
signal.alarm(self.sec)
def __exit__(self, *args):
signal.alarm(0) # disable alarm
def raise_timeout(self, *args):
raise Timeout.Timeout()
def save_image(filename, url, img_hash, wrong_hash_file):
save_path = os.path.join(save_dir, filename)
if not os.path.exists(save_path):
try:
with Timeout(TIMEOUT):
try:
request = requests.get(url, headers = HEADER, stream = True)
# Save the image to the specified directory
with open(save_path, 'wb') as f:
for chunk in request.iter_content(1024):
f.write(chunk)
# And make sure the hash is correct
try:
saved_hash = str(imagehash.average_hash(Image.open(save_path)))
if not saved_hash == img_hash:
wrong_hash_file.write(str(url) + "\t" + str(filename) + "\t" + str(saved_hash) + "\t" + str(img_hash) + "\n")
except OSError as e:
return e
except requests.exceptions.ConnectionError as e:
return e
except requests.exceptions.TooManyRedirects as e:
return e
except requests.exceptions.ChunkedEncodingError as e:
return e
except requests.exceptions.ContentDecodingError as e:
return e
return request.status_code
except Timeout.Timeout as e:
return e
pbar = progressbar.ProgressBar(maxval=len(examples))
hash_file = sys.argv[3]
hashes = json.loads(open(hash_file).read())
pbar.start()
with open(split_name + "_failed_imgs.txt", "a") as ofile, open(split_name + "_checked_imgs.txt", "a") as checked_file, open(split_name + "_failed_hashes.txt", "a") as failed_hash_file:
checked_urls = set([line.strip() for line in open(split_name + "_checked_imgs.txt").readlines()])
num_none = 0
num_total = 0
for i, example in enumerate(examples):
split_id = example["identifier"].split("-")
image_id = "-".join(split_id[:3])
left_image_name = image_id + "-img0.png"
right_image_name = image_id + "-img1.png"
left_url = example["left_url"]
right_url = example["right_url"]
if not left_url in checked_urls:
status_code = save_image(left_image_name, left_url, hashes[left_image_name], failed_hash_file)
if status_code != 200:
ofile.write(str(status_code) + "\t" + left_image_name + "\t" + left_url + "\n")
ofile.flush()
num_none += 1
checked_urls.add(left_url)
checked_file.write(left_url + "\n")
num_total += 1
if not right_url in checked_urls:
status_code = save_image(right_image_name, right_url, hashes[right_image_name], failed_hash_file)
if status_code != 200:
ofile.write(str(status_code) + "\t" + right_image_name + "\t" + right_url + "\n")
ofile.flush()
num_none += 1
checked_urls.add(right_url)
checked_file.write(right_url + "\n")
num_total += 1
pbar.update(i)
pbar.finish()
print("number of missing images: " + str(num_none))
print("total number of requests: " + str(num_total))
| 4,183 | 34.457627 | 184 | py |
visualbert | visualbert-master/unsupervised_visualbert/data/nlvr2/nlvr/nlvr2/data/filter_data.py | import json
import os
def filter_examples(filename, balanced):
with open(filename) as infile:
original_examples = [json.loads(line) for line in infile if line]
pair_labels = dict()
for example in original_examples:
urls = example["left_url"], example["right_url"]
identifier = example["identifier"]
label = example["label"]
if urls not in pair_labels:
pair_labels[urls] = list()
pair_labels[urls].append((identifier, label))
filtered_ids = list()
num_appearing_more_than_once = 0
for urls, examples in pair_labels.items():
if len(examples) > 1:
num_appearing_more_than_once += len(examples)
if balanced and len(set([item[1] for item in examples])) > 1:
for item in examples:
filtered_ids.append(item)
elif not balanced and len(set(item[1] for item in examples)) == 1:
for item in examples:
filtered_ids.append(item)
print('Filtered dataset ' + str(filename) + ' with balanced=' + str(balanced))
print('A total of %d pairs occur more than once' % num_appearing_more_than_once)
print('Found %d valid examples' % len(filtered_ids))
percent_true = len([example for example in filtered_ids if example[1].lower() == "true"]) / float(len(filtered_ids))
print('Majority class: ' + '{0:.2f}'.format(100. * percent_true))
only_ids = [item[0] for item in filtered_ids]
bal_str = "balanced" if balanced else "unbalanced"
with open(os.path.join(bal_str, bal_str + '_' + filename), "w") as ofile:
for example in original_examples:
if example["identifier"] in only_ids:
ofile.write(json.dumps(example) + '\n')
filter_examples("dev.json", True)
filter_examples("dev.json", False)
filter_examples("test1.json", True)
filter_examples("test1.json", False)
| 1,912 | 39.702128 | 120 | py |
visualbert | visualbert-master/unsupervised_visualbert/data/nlvr2/process_raw_data_scripts/process_dataset.py | import json
import os
NLVR2_DATA_ROOT = '../nlvr/nlvr2/data'
split2fname = {
'train': 'train',
'valid': 'dev',
'test': 'test1',
#'hidden': 'test2'
}
for split, fname in split2fname.items():
with open(os.path.join(NLVR2_DATA_ROOT, fname + '.json')) as f:
new_data = []
for i, line in enumerate(f):
datum = json.loads(line)
id_stem = '-'.join(datum['identifier'].split('-')[:-1])
new_datum = {
'identifier': datum['identifier'],
'img0': '%s-img0' % id_stem,
'img1': '%s-img1' % id_stem,
'label': 1 if datum['label'] == 'True' else 0,
'sent': datum['sentence'],
'uid': 'nlvr2_%s_%d' % (split, i),
}
new_data.append(new_datum)
with open('../%s.json' % split, 'w') as g:
json.dump(new_data, g, sort_keys=True, indent=4)
| 927 | 28.935484 | 67 | py |
visualbert | visualbert-master/unsupervised_visualbert/data/gqa/process_raw_data_scripts/process_data.py | from pathlib import Path
import json
GQA_ROOT = '../'
path = Path(GQA_ROOT + 'data')
split2name = {
'train': 'train',
'valid': 'val',
'testdev': 'testdev',
'test': 'test',
'challenge': 'challenge'
}
for split, name in split2name.items():
with open(path / ("%s_balanced_questions.json" % name)) as f:
data = json.load(f)
new_data = []
for key, datum in data.items():
new_datum = {
'question_id': key,
'img_id': datum['imageId'],
'sent': datum['question'],
}
if 'answer' in datum:
new_datum['label'] = {datum['answer']: 1.}
new_data.append(new_datum)
json.dump(new_data, open("../%s.json" % split, 'w'),
indent=4, sort_keys=True)
| 822 | 25.548387 | 65 | py |
visualbert | visualbert-master/unsupervised_visualbert/data/gqa/process_raw_data_scripts/process_submit_data.py | from pathlib import Path
import json
GQA_ROOT = '../'
path = Path(GQA_ROOT + 'data')
split2name = {
'submit': 'submission_all_questions.json'
}
for split, name in split2name.items():
with open(path / ("%s" % name)) as f:
data = json.load(f)
new_data = []
for key, datum in data.items():
new_datum = {
'question_id': key,
'img_id': datum['imageId'],
'sent': datum['question'],
}
if 'answer' in datum:
new_datum['label'] = {datum['answer']: 1.}
new_data.append(new_datum)
json.dump(new_data, open("../%s.json" % split, 'w'),
indent=4, sort_keys=True)
| 727 | 25.962963 | 60 | py |
visualbert | visualbert-master/unsupervised_visualbert/data/gqa/process_raw_data_scripts/process_data_all.py | from pathlib import Path
import json
GQA_ROOT = '../'
path = Path(GQA_ROOT + 'data')
split2name = {
'train': 'train',
'valid': 'val',
'testdev': 'testdev',
}
for split, name in split2name.items():
new_data = []
if split == 'train':
paths = list((path / 'train_all_questions').iterdir())
else:
paths = [path / ("%s_all_questions.json" % name)]
print(split, paths)
for tmp_path in paths:
with tmp_path.open() as f:
data = json.load(f)
for key, datum in data.items():
new_datum = {
'question_id': key,
'img_id': datum['imageId'],
'sent': datum['question'],
}
if 'answer' in datum:
new_datum['label'] = {datum['answer']: 1.}
new_data.append(new_datum)
print(split, len(new_data))
json.dump(new_data, open("../%s_all.json" % split, 'w'),
indent=4, sort_keys=True)
| 1,014 | 26.432432 | 62 | py |
visualbert | visualbert-master/unsupervised_visualbert/data/nlvr2_imgfeat/extract_nlvr2_image.py | # !/usr/bin/env python
# The root of bottom-up-attention repo. Do not need to change if using provided docker file.
BUTD_ROOT = '/opt/butd/'
# SPLIT to its folder name under IMG_ROOT
SPLIT2DIR = {
'train': 'train',
'valid': 'dev',
'test': 'test1',
'hidden': 'test2', # Please correct whether it is test2
}
import os, sys
sys.path.insert(0, BUTD_ROOT + "/tools")
os.environ['GLOG_minloglevel'] = '2'
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from fast_rcnn.test import im_detect, _get_blobs
from fast_rcnn.nms_wrapper import nms
import caffe
import argparse
import pprint
import base64
import numpy as np
import cv2
import csv
from tqdm import tqdm
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ["img_id", "img_h", "img_w", "objects_id", "objects_conf",
"attrs_id", "attrs_conf", "num_boxes", "boxes", "features"]
# Settings for the number of features per image. To re-create pretrained features with 36 features
# per image, set both values to 36.
MIN_BOXES = 36
MAX_BOXES = 36
def load_image_ids(img_root, split_dir):
"""images in the same directory are in the same sequential region,
but with no internal ordering"""
pathXid = []
if split_dir == 'train':
img_root = os.path.join(img_root, split_dir)
for d in os.listdir(img_root):
dir_path = os.path.join(img_root, d)
for name in os.listdir(dir_path):
idx = name.split(".")[0]
pathXid.append(
(
os.path.join(dir_path, name),
idx))
else:
img_root = os.path.join(img_root, split_dir)
for name in os.listdir(img_root):
idx = name.split(".")[0]
pathXid.append(
(
os.path.join(img_root, name),
idx))
return pathXid
def generate_tsv(prototxt, weights, image_ids, outfile):
# First check if file exists, and if it is complete
# never use set, it loses the order!!! F***
wanted_ids = set([image_id[1] for image_id in image_ids])
found_ids = set()
if os.path.exists(outfile):
with open(outfile, "r") as tsvfile:
reader = csv.DictReader(tsvfile, delimiter='\t', fieldnames=FIELDNAMES)
for item in reader:
found_ids.add(item['img_id'])
missing = wanted_ids - found_ids
if len(missing) == 0:
print('already completed {:d}'.format(len(image_ids)))
else:
print('missing {:d}/{:d}'.format(len(missing), len(image_ids)))
if len(missing) > 0:
caffe.set_mode_gpu()
caffe.set_device(0)
net = caffe.Net(prototxt, caffe.TEST, weights=weights)
with open(outfile, 'ab') as tsvfile:
writer = csv.DictWriter(tsvfile, delimiter='\t', fieldnames=FIELDNAMES)
for im_file, image_id in tqdm(image_ids):
if image_id in missing:
try:
writer.writerow(get_detections_from_im(net, im_file, image_id))
except Exception as e:
print(e)
def get_detections_from_im(net, im_file, image_id, conf_thresh=0.2):
"""
:param net:
:param im_file: full path to an image
:param image_id:
:param conf_thresh:
:return: all information from detection and attr prediction
"""
im = cv2.imread(im_file)
scores, boxes, attr_scores, rel_scores = im_detect(net, im)
# Keep the original boxes, don't worry about the regresssion bbox outputs
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
blobs, im_scales = _get_blobs(im, None)
cls_boxes = rois[:, 1:5] / im_scales[0]
cls_prob = net.blobs['cls_prob'].data
attr_prob = net.blobs['attr_prob'].data
pool5 = net.blobs['pool5_flat'].data
# Keep only the best detections
max_conf = np.zeros((rois.shape[0]))
for cls_ind in range(1, cls_prob.shape[1]):
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
keep = np.array(nms(dets, cfg.TEST.NMS))
max_conf[keep] = np.where(cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep])
keep_boxes = np.where(max_conf >= conf_thresh)[0]
if len(keep_boxes) < MIN_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MIN_BOXES]
elif len(keep_boxes) > MAX_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MAX_BOXES]
objects = np.argmax(cls_prob[keep_boxes][:, 1:], axis=1)
objects_conf = np.max(cls_prob[keep_boxes][:, 1:], axis=1)
attrs = np.argmax(attr_prob[keep_boxes][:, 1:], axis=1)
attrs_conf = np.max(attr_prob[keep_boxes][:, 1:], axis=1)
return {
"img_id": image_id,
"img_h": np.size(im, 0),
"img_w": np.size(im, 1),
"objects_id": base64.b64encode(objects), # int64
"objects_conf": base64.b64encode(objects_conf), # float32
"attrs_id": base64.b64encode(attrs), # int64
"attrs_conf": base64.b64encode(attrs_conf), # float32
"num_boxes": len(keep_boxes),
"boxes": base64.b64encode(cls_boxes[keep_boxes]), # float32
"features": base64.b64encode(pool5[keep_boxes]) # float32
}
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Generate bbox output from a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id(s) to use',
default='0', type=str)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--out', dest='outfile',
help='output filepath',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--imgroot', type=str, default='/workspace/images/')
parser.add_argument('--split', type=str, default='valid')
parser.add_argument('--caffemodel', type=str, default='./resnet101_faster_rcnn_final_iter_320000.caffemodel')
args = parser.parse_args()
return args
if __name__ == '__main__':
# Setup the configuration, normally do not need to touch these:
args = parse_args()
args.cfg_file = BUTD_ROOT + "experiments/cfgs/faster_rcnn_end2end_resnet.yml" # s = 500
args.prototxt = BUTD_ROOT + "models/vg/ResNet-101/faster_rcnn_end2end_final/test.prototxt"
args.outfile = "%s_obj36.tsv" % args.split
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
assert cfg.TEST.HAS_RPN
# Load image ids, need modification for new datasets.
image_ids = load_image_ids(args.imgroot, SPLIT2DIR[args.split])
# Generate TSV files, noramlly do not need to modify
generate_tsv(args.prototxt, args.caffemodel, image_ids, args.outfile)
| 7,358 | 35.430693 | 113 | py |
visualbert | visualbert-master/unsupervised_visualbert/data/mscoco_imgfeat/extract_coco_image.py | # !/usr/bin/env python
# The root of bottom-up-attention repo. Do not need to change if using provided docker file.
BUTD_ROOT = '/opt/butd/'
# SPLIT to its folder name under IMG_ROOT
SPLIT2DIR = {
'train': 'train2014',
'valid': 'val2014',
'test': 'test2015',
}
import os, sys
sys.path.insert(0, BUTD_ROOT + "/tools")
os.environ['GLOG_minloglevel'] = '2'
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from fast_rcnn.test import im_detect, _get_blobs
from fast_rcnn.nms_wrapper import nms
import caffe
import argparse
import pprint
import base64
import numpy as np
import cv2
import csv
from tqdm import tqdm
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ["img_id", "img_h", "img_w", "objects_id", "objects_conf",
"attrs_id", "attrs_conf", "num_boxes", "boxes", "features"]
# Settings for the number of features per image. To re-create pretrained features with 36 features
# per image, set both values to 36.
MIN_BOXES = 36
MAX_BOXES = 36
def load_image_ids(img_root, split_dir):
"""images in the same directory are in the same split"""
pathXid = []
img_root = os.path.join(img_root, split_dir)
for name in os.listdir(img_root):
idx = name.split(".")[0]
pathXid.append(
(
os.path.join(img_root, name),
idx))
return pathXid
def generate_tsv(prototxt, weights, image_ids, outfile):
# First check if file exists, and if it is complete
# never use set, it loses the order!!! F***
wanted_ids = set([image_id[1] for image_id in image_ids])
found_ids = set()
if os.path.exists(outfile):
with open(outfile, "r") as tsvfile:
reader = csv.DictReader(tsvfile, delimiter='\t', fieldnames=FIELDNAMES)
for item in reader:
found_ids.add(item['img_id'])
missing = wanted_ids - found_ids
if len(missing) == 0:
print('already completed {:d}'.format(len(image_ids)))
else:
print('missing {:d}/{:d}'.format(len(missing), len(image_ids)))
if len(missing) > 0:
caffe.set_mode_gpu()
caffe.set_device(0)
net = caffe.Net(prototxt, caffe.TEST, weights=weights)
with open(outfile, 'ab') as tsvfile:
writer = csv.DictWriter(tsvfile, delimiter='\t', fieldnames=FIELDNAMES)
for im_file, image_id in tqdm(image_ids):
if image_id in missing:
try:
writer.writerow(get_detections_from_im(net, im_file, image_id))
except Exception as e:
print(e)
def get_detections_from_im(net, im_file, image_id, conf_thresh=0.2):
"""
:param net:
:param im_file: full path to an image
:param image_id:
:param conf_thresh:
:return: all information from detection and attr prediction
"""
im = cv2.imread(im_file)
scores, boxes, attr_scores, rel_scores = im_detect(net, im)
# Keep the original boxes, don't worry about the regresssion bbox outputs
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
blobs, im_scales = _get_blobs(im, None)
cls_boxes = rois[:, 1:5] / im_scales[0]
cls_prob = net.blobs['cls_prob'].data
attr_prob = net.blobs['attr_prob'].data
pool5 = net.blobs['pool5_flat'].data
# Keep only the best detections
max_conf = np.zeros((rois.shape[0]))
for cls_ind in range(1, cls_prob.shape[1]):
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
keep = np.array(nms(dets, cfg.TEST.NMS))
max_conf[keep] = np.where(cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep])
keep_boxes = np.where(max_conf >= conf_thresh)[0]
if len(keep_boxes) < MIN_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MIN_BOXES]
elif len(keep_boxes) > MAX_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MAX_BOXES]
objects = np.argmax(cls_prob[keep_boxes][:, 1:], axis=1)
objects_conf = np.max(cls_prob[keep_boxes][:, 1:], axis=1)
attrs = np.argmax(attr_prob[keep_boxes][:, 1:], axis=1)
attrs_conf = np.max(attr_prob[keep_boxes][:, 1:], axis=1)
return {
"img_id": image_id,
"img_h": np.size(im, 0),
"img_w": np.size(im, 1),
"objects_id": base64.b64encode(objects), # int64
"objects_conf": base64.b64encode(objects_conf), # float32
"attrs_id": base64.b64encode(attrs), # int64
"attrs_conf": base64.b64encode(attrs_conf), # float32
"num_boxes": len(keep_boxes),
"boxes": base64.b64encode(cls_boxes[keep_boxes]), # float32
"features": base64.b64encode(pool5[keep_boxes]) # float32
}
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Generate bbox output from a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id(s) to use',
default='0', type=str)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--out', dest='outfile',
help='output filepath',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--imgroot', type=str, default='/workspace/images/')
parser.add_argument('--split', type=str, default='valid')
parser.add_argument('--caffemodel', type=str, default='./resnet101_faster_rcnn_final_iter_320000.caffemodel')
args = parser.parse_args()
return args
if __name__ == '__main__':
# Setup the configuration, normally do not need to touch these:
args = parse_args()
args.cfg_file = BUTD_ROOT + "experiments/cfgs/faster_rcnn_end2end_resnet.yml" # s = 500
args.prototxt = BUTD_ROOT + "models/vg/ResNet-101/faster_rcnn_end2end_final/test.prototxt"
args.outfile = "%s_obj36.tsv" % args.split
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
assert cfg.TEST.HAS_RPN
# Load image ids, need modification for new datasets.
image_ids = load_image_ids(args.imgroot, SPLIT2DIR[args.split])
# Generate TSV files, noramlly do not need to modify
generate_tsv(args.prototxt, args.caffemodel, image_ids, args.outfile)
| 6,810 | 35.42246 | 113 | py |
visualbert | visualbert-master/visualbert/models/model_wrapper.py | # Handles model training (optimizer), loading, saving
import argparse
import os
import shutil
from copy import deepcopy
import multiprocessing
import numpy as np
import pandas as pd
import torch
from allennlp.common.params import Params
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.optimizers import Optimizer
from torch.nn import DataParallel
from torch.nn.modules import BatchNorm2d
from tqdm import tqdm
from allennlp.nn.util import device_mapping
from utils.pytorch_misc import time_batch, save_checkpoint, clip_grad_norm, \
restore_checkpoint, print_para, restore_best_checkpoint, load_state_dict_flexible
from visualbert.pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.DEBUG)
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
from allennlp.models import Model
class ModelWrapper():
def __init__(self, args, train_dataset_length):
self.scheduler = None
self.args = args
self.args.gradient_accumulation_steps = args.get("gradient_accumulation_steps", 1)
self.args.fp16 = args.get("fp16", False)
self.initialize_model(args)
self.initialize_opimizer(args, train_dataset_length)
self.global_step = 0
self.called_time = 0
def train(self):
self.model.train()
def eval(self):
self.model.eval()
def step(self, batch, eval_mode=False):
if eval_mode:
with torch.no_grad():
output_dict = self.model(**batch)
if output_dict['loss'] is not None:
loss = output_dict['loss'].mean()
output_dict['loss'] = loss
return output_dict
self.optimizer.zero_grad()
output_dict = self.model(**batch)
loss = output_dict['loss']
cnn_loss = output_dict.get("cnn_regularization_loss", None)
if cnn_loss is not None and self.model.module.cnn_loss_ratio != 0:
loss = loss + cnn_loss * self.model.module.cnn_loss_ratio
output_dict['cnn_regularization_loss'] = cnn_loss.mean().item()
loss = loss.mean() # This is because on MultiGPU, loss is a tensor of size GPU_NUM
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
if self.args.get("fp16", False):
self.optimizer.backward(loss)
else:
loss.backward()
if (self.called_time + 1) % self.args.gradient_accumulation_steps == 0:
if self.args.fp16:
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used and handles this automatically
lr_this_step = self.args.learning_rate * self.warmup_linear.get_lr(self.global_step, self.args.warmup_proportion)
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr_this_step
self.optimizer.step()
self.global_step += 1
self.called_time += 1
return output_dict
def initialize_opimizer(self, args, train_dataset_length):
param_optimizer = list(self.model.named_parameters())
# hack to remove pooler, which is not used
# thus it produce None grad that break apex
# There seems to be something that we can't
param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ]
num_train_optimization_steps = int(
train_dataset_length / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
self.num_train_optimization_steps = num_train_optimization_steps
if args.get("fp16", False):
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
self.optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
self.optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
self.warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
else:
self.optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
def initialize_model(self, args):
model = Model.from_params(vocab=None, params=Params(args.model))
if args.get("fp16", False):
model.half()
print("Using FP 16, Model Halfed")
self.model = DataParallel(model).cuda()
def load_state_dict(self, state_dict_to_load):
if isinstance(self.model, DataParallel):
load_state_dict_flexible(self.model, state_dict_to_load["model"])
load_state_dict_flexible(self.optimizer, state_dict_to_load["optimizer"])
def state_dict(self):
if isinstance(self.model, DataParallel):
save_dict = {"model":self.model.module.state_dict(),
"optimizer":self.optimizer.state_dict()}
else:
save_dict = {"model":self.model.state_dict(),
"optimizer":self.optimizer.state_dict()}
return save_dict
def save_checkpoint(self, serialization_dir, epoch, val_metric_per_epoch, is_best = False):
assert(serialization_dir)
model_path = os.path.join(serialization_dir, "model_state_epoch_{}.th".format(epoch))
model_state = self.model.module.state_dict() if isinstance(self.model, DataParallel) else self.model.state_dict()
torch.save(model_state, model_path)
training_state = {'epoch': epoch,
'val_metric_per_epoch': val_metric_per_epoch,
'optimizer': self.optimizer.state_dict()
}
training_path = os.path.join(serialization_dir,
"training_state_epoch_{}.th".format(epoch))
torch.save(training_state, training_path)
if is_best:
print("Best validation performance so far. Copying weights to '{}/best.th'.".format(serialization_dir))
shutil.copyfile(model_path, os.path.join(serialization_dir, "best.th"))
def save_checkpoint_step(self, serialization_dir, step, epoch, is_best = False):
assert(serialization_dir)
model_path = os.path.join(serialization_dir, "model_step_{}_epoch_{}.th".format(step, epoch))
model_state = self.model.module.state_dict() if isinstance(self.model, DataParallel) else self.model.state_dict()
torch.save(model_state, model_path)
training_state = {'step': step,
'epoch': epoch,
'val_metric_per_epoch': None,
'optimizer': self.optimizer.state_dict()
}
training_path = os.path.join(serialization_dir,
"training_step_{}_epoch_{}.th".format(step, epoch))
torch.save(training_state, training_path)
def restore_checkpoint(self, serialization_dir, epoch_to_load):
# Restore from a training dir
return restore_checkpoint(self.model, self.optimizer, serialization_dir, epoch_to_load)
def restore_checkpoint_pretrained(self, restore_bin):
# Restore from a given model path
state_dict = torch.load(restore_bin, map_location=device_mapping(-1))
if isinstance(self.model, DataParallel):
model_to_load = self.model.module
else:
model_to_load = self.model
own_state = model_to_load.state_dict()
for name, param in state_dict.items():
if name not in own_state:
print("Skipped:" + name)
continue
if isinstance(param, torch.nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
own_state[name].copy_(param)
print("Successfully loaded: "+name)
except:
print("Part load failed: " + name)
def freeze_detector(self):
if hasattr(self.model.module, "detector"):
detector = self.model.module.detector
for submodule in detector.backbone.modules():
if isinstance(submodule, BatchNorm2d):
submodule.track_running_stats = False
for p in submodule.parameters():
p.requires_grad = False
else:
print("No detector found.")
@staticmethod
def read_and_insert_args(args, confg):
import commentjson
from attrdict import AttrDict
with open(confg) as f:
config_json = commentjson.load(f)
dict_args = vars(args)
config_json.update(dict_args)
args = AttrDict(config_json)
args.model.bert_model_name = args.bert_model_name
return args
| 10,127 | 39.674699 | 134 | py |
visualbert | visualbert-master/visualbert/models/model.py | # Modified from VCR.
from typing import Dict, List, Any
import os
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.parallel
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import TextFieldEmbedder, Seq2SeqEncoder, FeedForward, InputVariationalDropout, TimeDistributed
from allennlp.training.metrics import CategoricalAccuracy, Average
from allennlp.modules.matrix_attention import BilinearMatrixAttention
from allennlp.nn.util import masked_softmax, weighted_sum, replace_masked_values
from allennlp.nn import InitializerApplicator
from pytorch_pretrained_bert.modeling import BertForMultipleChoice, TrainVisualBERTObjective #BertForMultipleChoice, BertForVisualMultipleChoice, BertForVisualPreTraining, BertForPreTraining, BertForVisualQA
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
@Model.register("VisualBERTDetector")
class VisualBERTDetector(Model):
def __init__(self,
vocab: Vocabulary,
class_embs: bool=True,
bert_model_name: str="bert-base-uncased",
cnn_loss_ratio: float=0.0,
special_visual_initialize: bool=False,
text_only: bool=False,
visual_embedding_dim: int=512,
hard_cap_seq_len: int=None,
cut_first: str='text',
embedding_strategy: str='plain',
random_initialize: bool=False,
training_head_type: str="pretraining",
bypass_transformer: bool=False,
pretrained_detector: bool=True,
output_attention_weights: bool=False
):
super(VisualBERTDetector, self).__init__(vocab)
from utils.detector import SimpleDetector
self.detector = SimpleDetector(pretrained=pretrained_detector, average_pool=True, semantic=class_embs, final_dim=512)
##################################################################################################
self.bert = TrainVisualBERTObjective.from_pretrained(
bert_model_name,
cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(-1)),
training_head_type = training_head_type,
visual_embedding_dim = visual_embedding_dim,
hard_cap_seq_len = hard_cap_seq_len,
cut_first = cut_first,
embedding_strategy = embedding_strategy,
bypass_transformer = bypass_transformer,
random_initialize = random_initialize,
output_attention_weights = output_attention_weights)
if special_visual_initialize:
self.bert.bert.embeddings.special_intialize()
self.training_head_type = training_head_type
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
self.cnn_loss_ratio = cnn_loss_ratio
def _collect_obj_reps(self, span_tags, object_reps):
"""
Collect span-level object representations
:param span_tags: [batch_size, ..leading_dims.., L]
:param object_reps: [batch_size, max_num_objs_per_batch, obj_dim]
:return:
"""
span_tags_fixed = torch.clamp(span_tags, min=0) # In case there were masked values here
row_id = span_tags_fixed.new_zeros(span_tags_fixed.shape)
row_id_broadcaster = torch.arange(0, row_id.shape[0], step=1, device=row_id.device)[:, None]
# Add extra diminsions to the row broadcaster so it matches row_id
leading_dims = len(span_tags.shape) - 2
for i in range(leading_dims):
row_id_broadcaster = row_id_broadcaster[..., None]
row_id += row_id_broadcaster
return object_reps[row_id.view(-1), span_tags_fixed.view(-1)].view(*span_tags_fixed.shape, -1)
def embed_span(self, span, span_tags, span_mask, object_reps):
"""
:param span: Thing that will get embed and turned into [batch_size, ..leading_dims.., L, word_dim]
:param span_tags: [batch_size, ..leading_dims.., L]
:param object_reps: [batch_size, max_num_objs_per_batch, obj_dim]
:param span_mask: [batch_size, ..leading_dims.., span_mask
:return:
"""
retrieved_feats = self._collect_obj_reps(span_tags, object_reps)
span_rep = torch.cat((span['bert'], retrieved_feats), -1)
# add recurrent dropout here
if self.rnn_input_dropout:
span_rep = self.rnn_input_dropout(span_rep)
return self.span_encoder(span_rep, span_mask), retrieved_feats
def forward(self,
images: torch.Tensor = None,
objects: torch.LongTensor = None,
segms: torch.Tensor = None,
boxes: torch.Tensor = None,
box_mask: torch.LongTensor = None,
question: Dict[str, torch.Tensor] = None,
question_tags: torch.LongTensor = None,
question_mask: torch.LongTensor = None,
answers: Dict[str, torch.Tensor] = None,
answer_tags: torch.LongTensor = None,
answer_mask: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None,
label: torch.LongTensor = None,
bert_input_ids: torch.LongTensor = None,
bert_input_mask: torch.LongTensor = None,
bert_input_type_ids: torch.LongTensor = None,
masked_lm_labels: torch.LongTensor = None,
is_random_next: torch.LongTensor= None,
image_text_alignment: torch.LongTensor = None,
output_all_encoded_layers = False) -> Dict[str, torch.Tensor]:
# Trim off boxes that are too long. this is an issue b/c dataparallel, it'll pad more zeros that are
# not needed
max_len = int(box_mask.sum(1).max().item())
objects = objects[:, :max_len]
box_mask = box_mask[:, :max_len]
boxes = boxes[:, :max_len]
segms = segms[:, :max_len]
'''for tag_type, the_tags in (('question', question_tags), ('answer', answer_tags)):
if int(the_tags.max()) > max_len:
raise ValueError("Oh no! {}_tags has maximum of {} but objects is of dim {}. Values are\n{}".format(
tag_type, int(the_tags.max()), objects.shape, the_tags
))'''
obj_reps = self.detector(images=images, boxes=boxes, box_mask=box_mask, classes=objects, segms=segms)
#print("obj_reps", obj_reps['obj_reps'].size())
#print("bert_input_ids", bert_input_ids.size())
#print("box_mask", box_mask.size())
if len(bert_input_ids.size()) == 2: # Using complete shuffle mode
obj_reps_expanded = obj_reps['obj_reps']
box_mask_expanded = box_mask
else:
obj_reps_expanded = obj_reps['obj_reps'].unsqueeze(1).expand(box_mask.size(0), bert_input_mask.size(1), box_mask.size(-1), obj_reps['obj_reps'].size(-1))
box_mask_expanded = box_mask.unsqueeze(1).expand(box_mask.size(0), bert_input_mask.size(1), box_mask.size(-1))
#bert_input_mask = torch.cat((bert_input_mask, box_mask_expanded), dim = -1)
output_dict = self.bert(
input_ids = bert_input_ids,
token_type_ids = bert_input_type_ids,
input_mask = bert_input_mask,
visual_embeddings = obj_reps_expanded,
position_embeddings_visual = None,
image_mask = box_mask_expanded,
visual_embeddings_type = None,
image_text_alignment = image_text_alignment,
label = label,
masked_lm_labels = masked_lm_labels,
is_random_next = is_random_next,
output_all_encoded_layers = output_all_encoded_layers)
#class_probabilities = F.softmax(logits, dim=-1)
cnn_loss = obj_reps['cnn_regularization_loss']
if self.cnn_loss_ratio == 0.0:
output_dict["cnn_regularization_loss"] = None
else:
output_dict["cnn_regularization_loss"] = cnn_loss * self.cnn_loss_ratio
# Multi-process safe??
if label is not None and self.training_head_type != "pretraining":
logits = output_dict["logits"]
logits = logits.detach().float()
label = label.float()
self._accuracy(logits, label)
if self.training_head_type == "pretraining":
output_dict["logits"] = None # Because every image may has different number of image features, the lengths of the logits on different GPUs will be different. This will cause DataParallel to throw errors.
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {'accuracy': self._accuracy.get_metric(reset)}
@Model.register("VisualBERTFixedImageEmbedding")
class VisualBERTFixedImageEmbedding(Model):
def __init__(self,
vocab: Vocabulary,
class_embs: bool=True,
bert_model_name: str="bert-base-uncased",
cnn_loss_ratio: float=0.0,
special_visual_initialize: bool=False,
text_only: bool=False,
training_head_type: str='',
visual_embedding_dim: int=512,
hard_cap_seq_len: int=None,
cut_first: str='text',
embedding_strategy: str='plain',
random_initialize: bool=False,
bypass_transformer: bool=False,
output_attention_weights: bool=False
):
super(VisualBERTFixedImageEmbedding, self).__init__(vocab)
self.text_only = text_only
self.training_head_type = training_head_type
self.bert = TrainVisualBERTObjective.from_pretrained(
bert_model_name,
cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(-1)),
training_head_type = training_head_type,
visual_embedding_dim = visual_embedding_dim,
hard_cap_seq_len = hard_cap_seq_len,
cut_first = cut_first,
embedding_strategy = embedding_strategy,
bypass_transformer = bypass_transformer,
random_initialize = random_initialize,
output_attention_weights = output_attention_weights)
if special_visual_initialize:
self.bert.bert.embeddings.special_intialize()
if self.training_head_type == "nlvr" or self.training_head_type == "multichoice":
self._accuracy = CategoricalAccuracy()
if "vqa" in self.training_head_type:
self._accuracy = Average()
if self.training_head_type == "flickr":
self._accuracy = Average()
def forward(self,
#bert text input
bert_input_ids,
bert_input_mask,
bert_input_type_ids,
# image input
image_dim_variable = None,
image_feat_variable = None,
#
image_text_alignment = None,
visual_embeddings_type = None,
# fine-tuning label
label = None,
flickr_position = None, # For flickr we also need to provide the position
# pretraining lables
masked_lm_labels = None,
is_random_next = None,
output_all_encoded_layers = False
) -> Dict[str, torch.Tensor]:
# image_feat_variable = batch x ( num_choice x ) image_feature_length x dim
# Prepare Mask
if image_feat_variable is not None:
image_mask = torch.arange(image_feat_variable.size(-2)).expand(*image_feat_variable.size()[:-1]).cuda()
if len(image_dim_variable.size()) < len(image_mask.size()):
image_dim_variable = image_dim_variable.unsqueeze(-1)
assert(len(image_dim_variable.size()) == len(image_mask.size()))
image_mask = image_mask < image_dim_variable
image_mask = image_mask.long()
else:
image_mask = None
output_dict = self.bert(
input_ids = bert_input_ids,
token_type_ids = bert_input_type_ids,
input_mask = bert_input_mask,
visual_embeddings = image_feat_variable,
position_embeddings_visual = None,
image_mask = image_mask,
visual_embeddings_type = visual_embeddings_type,
image_text_alignment = image_text_alignment,
label = label,
flickr_position = flickr_position,
masked_lm_labels = masked_lm_labels,
is_random_next = is_random_next,
output_all_encoded_layers = output_all_encoded_layers)
if self.training_head_type == "nlvr" or self.training_head_type == "multichoice":
logits = output_dict["logits"]
self._accuracy(logits, label)
# Multi-process safe??
if "vqa" in self.training_head_type or self.training_head_type == "flickr":
if output_dict["accuracy"] is not None:
self._accuracy(output_dict["accuracy"])
output_dict["cnn_regularization_loss"] = None
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
if self.training_head_type == "nlvr" or self.training_head_type == "multichoice" or "vqa" in self.training_head_type or self.training_head_type == "flickr":
return {'accuracy': self._accuracy.get_metric(reset)}
return {'accuracy': 0.0}
@staticmethod
def compute_score_with_logits(logits, labels):
logits = masked_unk_softmax(logits, 1, 0)
logits = torch.max(logits, 1)[1].data # argmax
one_hots = torch.zeros(*labels.size())
one_hots = one_hots.cuda() if use_cuda else one_hots
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
class SimpleReportMetric():
def __init__(self):
self.total = 0.0
self.called_time = 0
def __call__(self, number, *args):
if isinstance(number, torch.Tensor):
number = number.item()
self.total += number
self.called_time += 1
def get_metric(self, reset):
return
| 14,578 | 42.912651 | 215 | py |
visualbert | visualbert-master/visualbert/models/__init__.py |
# You can add more models in this folder. like
# from models.no_question import model
# from models.no_vision_at_all import model
# from models.old_model import model
# from models.bottom_up_top_down import model
# from models.revisiting_vqa_baseline import model
# from models.mlb import model | 295 | 36 | 50 | py |
visualbert | visualbert-master/visualbert/models/train.py | """
Training script. Should be pretty adaptable to whatever.
"""
import argparse
import os
import shutil
from copy import deepcopy
import multiprocessing
import numpy as np
import pandas as pd
import torch
from allennlp.common.params import Params
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.optimizers import Optimizer
from torch.nn import DataParallel
from torch.nn.modules import BatchNorm2d
from tqdm import tqdm
from allennlp.nn.util import device_mapping
from visualbert.utils.pytorch_misc import time_batch, save_checkpoint, clip_grad_norm, \
restore_checkpoint, print_para, restore_best_checkpoint, restore_checkpoint_flexible, load_state_dict_flexible, compute_score_with_logits
from visualbert.dataloaders.vcr import VCR, VCRLoader
try:
from visualbert.dataloaders.coco_dataset import COCODataset
except:
print("Import COCO dataset failed.")
try:
from visualbert.dataloaders.nlvr_dataset import NLVRDataset
except:
print("Import NLVR2 dataset failed.")
try:
from visualbert.dataloaders.vqa_dataset import VQADataset
except:
print("Import VQA dataset failed.")
try:
from visualbert.dataloaders.flickr_dataset import Flickr30kFeatureDataset
except:
print("Import Flickr30K dataset failed.")
from pytorch_pretrained_bert.optimization import BertAdam
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.DEBUG)
'''import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (40960, rlimit[1]))
print("Setting to 40960")
except:
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))'''
from allennlp.models import Model
from visualbert.models.model_wrapper import ModelWrapper
from visualbert.models import model
#################################
from attrdict import AttrDict
parser = argparse.ArgumentParser(description='train')
parser.add_argument(
'-folder',
dest='folder',
help='folder location',
type=str,
)
parser.add_argument(
'-no_tqdm',
dest='no_tqdm',
action='store_true',
)
parser.add_argument(
'-config',
dest='config',
help='config location',
type=str,
)
args = parser.parse_args()
args = ModelWrapper.read_and_insert_args(args, args.config)
#####################################################
if os.path.exists(args.folder):
create_flag = 0
else:
create_flag = 1
print("Making directories")
os.makedirs(args.folder, exist_ok=True)
import sys
run_log_counter = 0
while(os.path.exists(args.folder + '/run_{}.log'.format(run_log_counter))):
run_log_counter += 1
file_log = open(args.folder + '/run_{}.log'.format(run_log_counter),'w') # File where you need to keep the logs
file_log.write("")
class Unbuffered:
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
file_log.write(data) # Write the data of stdout here to a text file as well
def flush(self):
pass
sys.stdout = Unbuffered(sys.stdout)
NUM_GPUS = torch.cuda.device_count()
NUM_CPUS = multiprocessing.cpu_count()
if NUM_GPUS == 0:
raise ValueError("you need gpus!")
def _to_gpu(td):
if args.get("fp16", False):
_to_fp16(td)
if NUM_GPUS > 1:
return td
for k in td:
if k != 'metadata':
if td[k] is not None:
td[k] = {k2: v.cuda(non_blocking=True) for k2, v in td[k].items()} if isinstance(td[k], dict) else td[k].cuda(non_blocking=True)
return td
def _to_fp16(td):
for k in td:
if isinstance(td[k], torch.FloatTensor):
td[k] = td[k].to(dtype=torch.float16)
num_workers = args.get("num_workers", 2)
val_workers = args.get("val_workers", 0)
TEST_DATA_READING = False
if TEST_DATA_READING:
num_workers = 0
print(f"Using {num_workers} workers out of {NUM_CPUS} possible", flush=True)
loader_params = {'batch_size': args.train_batch_size // NUM_GPUS, 'num_gpus':NUM_GPUS, 'num_workers':num_workers}
def get_dataset_loader(args, dataset_name):
# The VCR approach toward
if dataset_name == "vcr":
train, val, test = VCR.splits(
mode='rationale' if args.rationale else 'answer',
only_use_relevant_dets = args.get('only_use_relevant_dets', True),
do_lower_case = args.do_lower_case,
bert_model_name = args.bert_model_name,
max_seq_length = args.max_seq_length,
pretraining = args.pretraining,
pretraining_include_qa_and_qar = args.pretraining_include_qa_and_qar,
complete_shuffle = args.get("complete_shuffle", False),
use_alignment = args.get('use_alignment', False),
add_all_features = args.add_all_features,
answer_labels_path = args.get("answer_labels_path", None),
vcr_annots_dir = args.vcr_annots_dir,
vcr_image_dir = args.vcr_image_dir
)
elif dataset_name == "coco":
train, val, test = COCODataset.splits(args)
elif dataset_name == "nlvr":
train, val, test = NLVRDataset.splits(args)
elif dataset_name == "vqa":
train, val, test = VQADataset.splits(args)
elif dataset_name == "wiki":
train, val, test = WikiDataset.splits(args)
elif dataset_name == "flickr":
train, val, test = Flickr30kFeatureDataset.splits(args)
else:
assert(0)
loader_params = {'batch_size': args.train_batch_size // NUM_GPUS, 'num_gpus':NUM_GPUS, 'num_workers':num_workers}
train_loader_params = deepcopy(loader_params)
val_loader_params = deepcopy(loader_params)
val_loader_params["num_workers"] = val_workers
test_loader_params = deepcopy(loader_params)
test_loader_params["num_workers"] = val_workers
train_loader = VCRLoader.from_dataset(train, **train_loader_params)
val_loader = VCRLoader.from_dataset(val, **val_loader_params)
test_loader = VCRLoader.from_dataset(test, **test_loader_params)
train_set_size = len(train)
return train_loader, val_loader, test_loader, train_set_size
train_loader, val_loader, test_loader, train_set_size = get_dataset_loader(args, args.dataset)
ARGS_RESET_EVERY = args.get("print_every", 100)
train_model = ModelWrapper(args, train_set_size)
#Loading from pre-trained model
if args.restore_bin:
train_model.restore_checkpoint_pretrained(args.restore_bin)
#Loading from previous checkpoint
if create_flag == 0:
start_epoch, val_metric_per_epoch = train_model.restore_checkpoint(serialization_dir=args.folder, epoch_to_load = args.get("epoch_to_load", None))
if val_metric_per_epoch is None:
val_metric_per_epoch = []
else:
create_flag = 1
start_epoch, val_metric_per_epoch = 0, []
shutil.copy2(args.config, args.folder) # Always copy the config
if args.get("freeze_detector", True):
train_model.freeze_detector()
param_shapes = print_para(train_model.model)
print(args)
print("########### Starting from {}".format(start_epoch))
num_batches = 0
stop_epoch = args.num_train_epochs
save_every = args.get("save_every", None)
for epoch_num in range(start_epoch, stop_epoch):
train_results = []
norms = []
train_model.model.train()
if not args.get("skip_training", False):
for b, (time_per_batch, batch) in enumerate(time_batch(tqdm(train_loader), reset_every=ARGS_RESET_EVERY)):
batch = _to_gpu(batch)
output_dict = train_model.step(batch)
num_batches += 1
train_results.append(pd.Series({'loss': output_dict['loss'].mean().item(),
'crl': output_dict.get("cnn_regularization_loss", 0.0),
'next_sentence_loss': output_dict["next_sentence_loss"].mean().item() if "next_sentence_loss" in output_dict else 0.0,
'masked_lm_loss': output_dict["masked_lm_loss"].mean().item() if "masked_lm_loss" in output_dict else 0.0,
'accuracy': (train_model.model.module).get_metrics(
reset=(b % ARGS_RESET_EVERY) == 0)[
'accuracy'],
'sec_per_batch': time_per_batch,
'hr_per_epoch': len(train_loader) * time_per_batch / 3600,
}))
if b % ARGS_RESET_EVERY == 0 and b > 0:
print("e{:2d}b{:5d}/{:5d}. \nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format(
epoch_num, b, len(train_loader),
pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(),
), flush=True)
if save_every is not None and b % save_every == 0 and b != 0:
train_model.save_checkpoint_step(args.folder, b, epoch_num)
print("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean()))
try:
### This is the eval part
val_probs = []
val_labels = []
val_size = 0.0
val_loss_sum = 0.0
val_acc = 0.0
val_acc_upper = 0.0
val_instance_counter = 0.0
val_next_sentence_loss_sum = 0.0
train_model.eval()
val_counter = 0
############ Different reporting parameters
# for vqa, nlvr, flickr
do_test = args.get("do_test", False) ## This one is for vqa
if do_test:
val_loader = test_loader
val_dataset = val_loader.dataset
vcr_save_result = args.get("vcr_save_result", False) # This one is for vcr
for b, (time_per_batch, batch) in enumerate(time_batch(val_loader if args.no_tqdm else tqdm(val_loader), reset_every=ARGS_RESET_EVERY)):
with torch.no_grad():
batch = _to_gpu(batch)
output_dict = train_model.step(batch, eval_mode = True)
if not args.pretraining:
# Pretty clumsy code
if args.model.training_head_type == "vqa":
val_probs.append(output_dict['logits'].detach().cpu())
if not do_test:
val_labels.append(batch['label'].detach().cpu())
elif args.model.training_head_type == "flickr":
# This is because of multi-GPU
val_acc += (output_dict["accuracy"] * output_dict["entity_num"].float()).sum(-1).item()
val_acc_upper += (output_dict["upperbound_accuracy"] * output_dict["entity_num"].float()).sum(-1).item()
val_instance_counter += output_dict["entity_num"].sum(-1).item()
elif args.model.training_head_type == "multichoice":
val_probs.append(output_dict['logits'].detach().cpu().numpy())
if not do_test:
val_labels.append(batch['label'].detach().cpu().numpy())
elif args.model.training_head_type == "nlvr":
val_probs.append(output_dict['logits'].detach().cpu().numpy())
val_labels.append(batch['label'].detach().cpu().numpy())
else:
val_labels.append(batch['label'].detach().cpu().numpy())
if not do_test:
val_loss_sum += output_dict['loss'].mean().item() * batch['label'].size(0)
val_counter += batch['label'].size(0)
if "next_sentence_loss" in output_dict:
val_next_sentence_loss_sum += output_dict['next_sentence_loss'].mean().item() * batch['label'].size(0)
if not args.pretraining:
if args.model.training_head_type == "vqa":
if do_test:
val_probs = np.concatenate(val_probs, 0)
val_probs = torch.Tensor(val_probs)
val_probs = val_probs.squeeze(1)
val_dataset.generate_test_file(val_probs, os.path.join(args.folder, "result.json"))
print("Finished testing")
assert(0)
else:
val_labels = np.concatenate(val_labels, 0)
val_probs = np.concatenate(val_probs, 0)
val_probs = torch.Tensor(val_probs)
val_labels = torch.Tensor(val_labels)
val_probs = val_probs.squeeze(1)
acc = torch.sum(compute_score_with_logits(val_probs, val_labels)) / val_labels.size(0)
acc = acc.squeeze(-1).item()
elif args.model.training_head_type == "flickr":
acc = val_acc / val_instance_counter
val_acc_upper = val_acc_upper / val_instance_counter
print("Upper bound: {:.5f}".format(val_acc_upper))
elif args.model.training_head_type == "multichoice": #VCR
if not do_test:
val_labels = np.concatenate(val_labels, 0)
val_probs = np.concatenate(val_probs, 0)
if vcr_save_result:
if do_test:
file_name = "test"
else:
file_name = "val"
save_file_name = os.path.join(args.folder, file_name + "_qa.np")
if args.rationale:
save_file_name = os.path.join(args.folder, file_name + "_qar.np")
if do_test:
np.save(save_file_name, val_probs)
else:
np.savez(save_file_name+'z', val_probs=val_probs, val_labels=val_labels)
#np.save(save_file_name, (val_probs, val_labels))
print("Saved result to {}".format(save_file_name))
assert(0)
acc = float(np.mean(val_labels == val_probs.argmax(1)))
elif args.model.training_head_type == "nlvr":
val_labels = np.concatenate(val_labels, 0)
val_probs = np.concatenate(val_probs, 0)
if args.get("report", False):
val_probs = val_probs.argmax(1)
assert(val_probs.shape[0]) == len(val_dataset)
result = []
for index, i in enumerate(val_dataset.items):
label = "True" if val_probs[index] == 1 else "False"
result.append(i["identifier"] + "," + label)
with open(os.path.join(args.folder, "results.csv"), "w") as f:
f.write("\n".join(result))
assert(0)
acc = float(np.mean(val_labels == val_probs.argmax(1)))
if not do_test:
val_loss_avg = val_loss_sum / val_counter
print("Val epoch {} has acc {:.5f} and loss {:.5f}".format(epoch_num, acc, val_loss_avg), flush=True)
else:
print("Val epoch {} has acc {:.5f}".format(epoch_num, acc), flush=True)
assert(0)
val_metric_per_epoch.append(acc)
else:
val_loss_avg = val_loss_sum / val_counter
val_next_sentence_loss_avg = val_next_sentence_loss_sum / val_counter
print("Val epoch {} has loss {:.5f}, next sentence loss {:.5f}".format(epoch_num, val_loss_avg, val_next_sentence_loss_avg), flush=True)
val_metric_per_epoch.append(-val_loss_avg)
if int(np.argmax(val_metric_per_epoch)) < (len(val_metric_per_epoch) - 1 - args.patience):
print("Stopping at epoch {:2d}".format(epoch_num))
break
############### Save model
if not args.get("skip_training", False):
train_model.save_checkpoint(args.folder, epoch_num, val_metric_per_epoch, is_best=int(np.argmax(val_metric_per_epoch)) == (len(val_metric_per_epoch) - 1))
except KeyboardInterrupt:
if not args.get("skip_training", False):
train_model.save_checkpoint(args.folder, epoch_num, None, is_best=False)
print("Something Went Wrong with Evaluation. Stopped.")
assert(0)
except:
if not args.get("skip_training", False):
train_model.save_checkpoint(args.folder, epoch_num, None, is_best=False)
print("Something Went Wrong with Evaluation. Ignored.")
if args.get("skip_training", False):
assert(0)
| 16,973 | 39.901205 | 166 | py |
visualbert | visualbert-master/visualbert/pytorch_pretrained_bert/optimization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
# Consistent with HuggingFace BERT version 3fc63f126ddf883ba9659f13ec046c3639db7b7e
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
import logging
import abc
import sys
logger = logging.getLogger(__name__)
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta('ABC', (), {})
class _LRSchedule(ABC):
""" Parent of all LRSchedules here. """
warn_t_total = False # is set to True for schedules where progressing beyond t_total steps doesn't make sense
def __init__(self, warmup=0.002, t_total=-1, **kw):
"""
:param warmup: what fraction of t_total steps will be used for linear warmup
:param t_total: how many training steps (updates) are planned
:param kw:
"""
super(_LRSchedule, self).__init__(**kw)
if t_total < 0:
logger.warning("t_total value of {} results in schedule not being applied".format(t_total))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
warmup = max(warmup, 0.)
self.warmup, self.t_total = float(warmup), float(t_total)
self.warned_for_t_total_at_progress = -1
def get_lr(self, step, nowarn=False):
"""
:param step: which of t_total steps we're on
:param nowarn: set to True to suppress warning regarding training beyond specified 't_total' steps
:return: learning rate multiplier for current update
"""
if self.t_total < 0:
return 1.
progress = float(step) / self.t_total
ret = self.get_lr_(progress)
# warning for exceeding t_total (only active with warmup_linear
if not nowarn and self.warn_t_total and progress > 1. and progress > self.warned_for_t_total_at_progress:
logger.warning(
"Training beyond specified 't_total'. Learning rate multiplier set to {}. Please set 't_total' of {} correctly."
.format(ret, self.__class__.__name__))
self.warned_for_t_total_at_progress = progress
# end warning
return ret
@abc.abstractmethod
def get_lr_(self, progress):
"""
:param progress: value between 0 and 1 (unless going beyond t_total steps) specifying training progress
:return: learning rate multiplier for current update
"""
return 1.
class ConstantLR(_LRSchedule):
def get_lr_(self, progress):
return 1.
class WarmupCosineSchedule(_LRSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
Decreases learning rate from 1. to 0. over remaining `1 - warmup` steps following a cosine curve.
If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
"""
warn_t_total = True
def __init__(self, warmup=0.002, t_total=-1, cycles=.5, **kw):
"""
:param warmup: see LRSchedule
:param t_total: see LRSchedule
:param cycles: number of cycles. Default: 0.5, corresponding to cosine decay from 1. at progress==warmup and 0 at progress==1.
:param kw:
"""
super(WarmupCosineSchedule, self).__init__(warmup=warmup, t_total=t_total, **kw)
self.cycles = cycles
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
return 0.5 * (1. + math.cos(math.pi * self.cycles * 2 * progress))
class WarmupCosineWithHardRestartsSchedule(WarmupCosineSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
If `cycles` (default=1.) is different from default, learning rate follows `cycles` times a cosine decaying
learning rate (with hard restarts).
"""
def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
super(WarmupCosineWithHardRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
assert(cycles >= 1.)
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
ret = 0.5 * (1. + math.cos(math.pi * ((self.cycles * progress) % 1)))
return ret
class WarmupCosineWithWarmupRestartsSchedule(WarmupCosineWithHardRestartsSchedule):
"""
All training progress is divided in `cycles` (default=1.) parts of equal length.
Every part follows a schedule with the first `warmup` fraction of the training steps linearly increasing from 0. to 1.,
followed by a learning rate decreasing from 1. to 0. following a cosine curve.
"""
def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
assert(warmup * cycles < 1.)
warmup = warmup * cycles if warmup >= 0 else warmup
super(WarmupCosineWithWarmupRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
def get_lr_(self, progress):
progress = progress * self.cycles % 1.
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
ret = 0.5 * (1. + math.cos(math.pi * progress))
return ret
class WarmupConstantSchedule(_LRSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
Keeps learning rate equal to 1. after warmup.
"""
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
return 1.
class WarmupLinearSchedule(_LRSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
Linearly decreases learning rate from 1. to 0. over remaining `1 - warmup` steps.
"""
warn_t_total = True
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
return max((progress - 1.) / (self.warmup - 1.), 0.)
SCHEDULES = {
None: ConstantLR,
"none": ConstantLR,
"warmup_cosine": WarmupCosineSchedule,
"warmup_constant": WarmupConstantSchedule,
"warmup_linear": WarmupLinearSchedule
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate of 1. (no warmup regardless of warmup setting). Default: -1
schedule: schedule to use for the warmup (see above).
Can be `'warmup_linear'`, `'warmup_constant'`, `'warmup_cosine'`, `'none'`, `None` or a `_LRSchedule` object (see below).
If `None` or `'none'`, learning rate is always kept constant.
Default : `'warmup_linear'`
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0, **kwargs):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not isinstance(schedule, _LRSchedule) and schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
# initialize schedule object
if not isinstance(schedule, _LRSchedule):
schedule_type = SCHEDULES[schedule]
schedule = schedule_type(warmup=warmup, t_total=t_total)
else:
if warmup != -1 or t_total != -1:
logger.warning("warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. "
"Please specify custom warmup and t_total in _LRSchedule object.")
defaults = dict(lr=lr, schedule=schedule,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss | 13,112 | 42.134868 | 139 | py |
visualbert | visualbert-master/visualbert/pytorch_pretrained_bert/__main__.py | # coding: utf8
def main():
import sys
try:
from .convert_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ModuleNotFoundError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
if len(sys.argv) != 5:
# pylint: disable=line-too-long
print("Should be used as `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`")
else:
PYTORCH_DUMP_OUTPUT = sys.argv.pop()
TF_CONFIG = sys.argv.pop()
TF_CHECKPOINT = sys.argv.pop()
convert_tf_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
if __name__ == '__main__':
main()
| 932 | 39.565217 | 137 | py |
visualbert | visualbert-master/visualbert/pytorch_pretrained_bert/tokenization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import os
import logging
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
}
VOCAB_NAME = 'vocab.txt'
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True, max_len=None):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = max_len if max_len is not None else int(1e12)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
raise ValueError(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_tokens_to_ids_no_warning(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
@classmethod
def from_pretrained(cls, pretrained_model_name, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name]
else:
vocab_file = pretrained_model_name
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except FileNotFoundError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
if pretrained_model_name in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| 14,261 | 37.13369 | 133 | py |
visualbert | visualbert-master/visualbert/pytorch_pretrained_bert/modeling.py | # coding=utf-8
# Modified by Harold. Added VisualBERT.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
import torch.nn.functional as F
from copy import deepcopy
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
}
CONFIG_NAME = 'bert_config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.output_attention_weights = config.output_attention_weights
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
if self.output_attention_weights:
return context_layer, attention_probs
else:
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
self.output_attention_weights = config.output_attention_weights
def forward(self, input_tensor, attention_mask):
if self.output_attention_weights:
self_output, attention_weights = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output, attention_weights
else:
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
self.output_attention_weights = config.output_attention_weights
def forward(self, hidden_states, attention_mask):
if self.output_attention_weights:
attention_output, attention_weights = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output, attention_weights
else:
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
self.output_attention_weights = config.output_attention_weights
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
if self.output_attention_weights:
attn_data_list = []
all_encoder_layers = []
for layer_module in self.layer:
hidden_states, attention_weights = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
attn_data_list.append(attention_weights)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers, attn_data_list
else:
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class PreTrainedBertModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedBertModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name, state_dict=None, cache_dir=None, random_initialize = False, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name]
else:
archive_file = pretrained_model_name
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except FileNotFoundError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
archive_file))
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if random_initialize:
return model
if state_dict is None:
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
return model
class BertModel(PreTrainedBertModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertForPreTraining(PreTrainedBertModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
elif masked_lm_labels is not None and next_sentence_label is None: # If we did not specify the next_sentence_label
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
total_loss = masked_lm_loss
return total_loss
else:
return prediction_scores, seq_relationship_score
class BertForMaskedLM(PreTrainedBertModel):
"""BERT model with the masked language modeling head.
This module comprises the BERT model followed by the masked language modeling head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
Outputs:
if `masked_lm_labels` is not `None`:
Outputs the masked language modeling loss.
if `masked_lm_labels` is `None`:
Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForMaskedLM(config)
masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForMaskedLM, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores = self.cls(sequence_output)
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
return masked_lm_loss
else:
return prediction_scores
class BertForNextSentencePrediction(PreTrainedBertModel):
"""BERT model with next sentence prediction head.
This module comprises the BERT model followed by the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `next_sentence_label` is not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `next_sentence_label` is `None`:
Outputs the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForNextSentencePrediction(config)
seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForNextSentencePrediction, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
seq_relationship_score = self.cls( pooled_output)
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
return next_sentence_loss
else:
return seq_relationship_score
class BertForSequenceClassification(PreTrainedBertModel):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels=2):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForMultipleChoice(PreTrainedBertModel):
"""BERT model for multiple choice tasks.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_choices`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_choices = 2
model = BertForMultipleChoice(config, num_choices)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_choices=2):
super(BertForMultipleChoice, self).__init__(config)
self.num_choices = num_choices
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
_, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, self.num_choices)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return loss
else:
return reshaped_logits
class BertForTokenClassification(PreTrainedBertModel):
"""BERT model for token-level classification.
This module is composed of the BERT model with a linear layer on top of
the full hidden state of the last layer.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForTokenClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels=2):
super(BertForTokenClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForQuestionAnswering(PreTrainedBertModel):
"""BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
`end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
Outputs:
if `start_positions` and `end_positions` are not `None`:
Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
if `start_positions` or `end_positions` is `None`:
Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
position tokens of shape [batch_size, sequence_length].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForQuestionAnswering(config)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.bert = BertModel(config)
# TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
else:
return start_logits, end_logits
#########################
#### Added by Harold ####
#########################
class BertEmbeddingsWithVisualEmbedding(nn.Module):
"""Construct the embeddings from word, position, token_type embeddings and visual embeddings.
"""
def __init__(self, config):
super(BertEmbeddingsWithVisualEmbedding, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
#### Below are specific for encoding visual features
# Segment and position embedding for image features
self.token_type_embeddings_visual = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.position_embeddings_visual = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.projection = nn.Linear(config.visual_embedding_dim, config.hidden_size)
def special_intialize(self, method_type = 0):
### This is a bit unorthodox. The better way might be to add an inititilizer to AllenNLP.
# This function is used to initialize the token_type_embeddings_visual and positiona_embedding_visual, just incase.
self.token_type_embeddings_visual.weight = torch.nn.Parameter(deepcopy(self.token_type_embeddings.weight.data), requires_grad = True)
self.position_embeddings_visual.weight = torch.nn.Parameter(deepcopy(self.position_embeddings.weight.data), requires_grad = True)
return
def forward(self, input_ids, token_type_ids=None, visual_embeddings=None, visual_embeddings_type=None, position_embeddings_visual=None, image_text_alignment = None, confidence = None):
'''
input_ids = [batch_size, sequence_length]
token_type_ids = [batch_size, sequence_length]
visual_embedding = [batch_size, image_feature_length, image_feature_dim]
image_text_alignment = [batch_size, image_feature_length, alignment_dim]
confidence = [batch_size, image_feature_length] of type LongTensor
'''
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
if visual_embeddings is not None:
visual_embeddings = self.projection(visual_embeddings)
token_type_embeddings_visual = self.token_type_embeddings_visual(visual_embeddings_type)
if image_text_alignment is not None:
# image_text_alignment = Batch x image_length x alignment_number. Each element denotes the position of the word corresponding to the image feature. -1 is the padding value.
image_text_alignment_mask = (image_text_alignment != -1).long()
# Get rid of the -1.
image_text_alignment = image_text_alignment_mask * image_text_alignment
# position_embeddings_visual = Batch x image_length x alignment length x dim
position_embeddings_visual = self.position_embeddings(image_text_alignment) * image_text_alignment_mask.to(dtype=next(self.parameters()).dtype).unsqueeze(-1)
position_embeddings_visual = position_embeddings_visual.sum(2)
# We want to averge along the alignment_number dimension.
image_text_alignment_mask = image_text_alignment_mask.to(dtype=next(self.parameters()).dtype).sum(2)
image_text_alignment_mask[image_text_alignment_mask==0] = 1 # Avoid devide by zero error
position_embeddings_visual = position_embeddings_visual / image_text_alignment_mask.unsqueeze(-1)
position_ids_visual = torch.zeros(*visual_embeddings.size()[:-1], dtype = torch.long).cuda()
# When fine-tuning the detector , the image_text_alignment is sometimes padded too long.
if position_embeddings_visual.size(1) != visual_embeddings.size(1):
assert(position_embeddings_visual.size(1) >= visual_embeddings.size(1))
position_embeddings_visual = position_embeddings_visual[:, :visual_embeddings.size(1), :]
position_embeddings_visual = position_embeddings_visual + self.position_embeddings_visual(position_ids_visual)
else:
position_ids_visual = torch.zeros(*visual_embeddings.size()[:-1], dtype = torch.long).cuda()
position_embeddings_visual = self.position_embeddings_visual(position_ids_visual)
v_embeddings = visual_embeddings + position_embeddings_visual + token_type_embeddings_visual
# Concate the two:
embeddings = torch.cat((embeddings, v_embeddings), dim = 1) # concat the visual embeddings after the attentions
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertVisualModel(PreTrainedBertModel):
def __init__(self, config):
super(BertVisualModel, self).__init__(config)
self.embeddings = BertEmbeddingsWithVisualEmbedding(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.bypass_transformer = config.bypass_transformer
if self.bypass_transformer:
self.additional_layer = BertLayer(config)
self.output_attention_weights = config.output_attention_weights
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids, attention_mask, visual_embeddings, position_embeddings_visual, visual_embeddings_type, image_text_alignment, confidence, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids, visual_embeddings = visual_embeddings, position_embeddings_visual = position_embeddings_visual, visual_embeddings_type = visual_embeddings_type, image_text_alignment = image_text_alignment,
confidence = confidence)
if self.bypass_transformer and visual_embeddings is not None:
assert(not output_all_encoded_layers) # Don't support this for the bypass model
text_length = input_ids.size(1)
text_embedding_output = embedding_output[:, :text_length, :]
visual_part = embedding_output[:, text_length:, :]
text_extended_attention_mask = extended_attention_mask[:, :, :text_length, :text_length]
encoded_layers = self.encoder(text_embedding_output,
text_extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
new_input = torch.cat((sequence_output, visual_part), dim = 1)
final_sequence_output = self.additional_layer(new_input, extended_attention_mask)
pooled_output = self.pooler(final_sequence_output)
return final_sequence_output, pooled_output
if self.output_attention_weights:
encoded_layers, attn_data_list = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output, attn_data_list
else:
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class TrainVisualBERTObjective(PreTrainedBertModel):
def __init__(self, config, training_head_type, visual_embedding_dim = 512, hard_cap_seq_len = None, cut_first = "text", embedding_strategy = "plain", bypass_transformer = False, output_attention_weights= False):
super(TrainVisualBERTObjective, self).__init__(config)
config.visual_embedding_dim = visual_embedding_dim
config.embedding_strategy = embedding_strategy
config.bypass_transformer = bypass_transformer
config.output_attention_weights = output_attention_weights
self.output_attention_weights = output_attention_weights
self.cut_first = cut_first
self.hard_cap_seq_len = hard_cap_seq_len
self.bert = BertVisualModel(config)
self.training_head_type = training_head_type
if self.training_head_type == "pretraining":
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
elif self.training_head_type == "multichoice":
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.num_choices = 4 # For VCR
elif self.training_head_type == "vqa":
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 3129)
elif self.training_head_type == "vqa_advanced":
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
elif self.training_head_type == "nlvr":
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 2)
elif self.training_head_type == "flickr":
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.flickr_attention = FlickrAttention(config)
self.apply(self.init_bert_weights)
def forward(
self,
input_ids,
token_type_ids,
input_mask,
visual_embeddings,
position_embeddings_visual,
image_mask,
image_text_alignment = None,
confidence = None,
visual_embeddings_type=None,
label=None,
flickr_position = None,
masked_lm_labels=None,
image_lm_lables=None,
is_random_next=None,
output_all_encoded_layers = False):
# We want to convert everything into: batch x sequence_length x (dim).
flat_input_ids = transform_to_batch_sequence(input_ids)
flat_token_type_ids = transform_to_batch_sequence(token_type_ids)
flat_input_mask = transform_to_batch_sequence(input_mask)
flat_image_mask = transform_to_batch_sequence(image_mask)
flat_masked_lm_labels = transform_to_batch_sequence(masked_lm_labels)
flat_position_embeddings_visual = transform_to_batch_sequence(position_embeddings_visual)
flat_confidence = transform_to_batch_sequence(confidence)
flat_image_text_alignment = transform_to_batch_sequence_dim(image_text_alignment)
flat_visual_embeddings = transform_to_batch_sequence_dim(visual_embeddings)
if visual_embeddings_type is not None:
visual_embeddings_type = transform_to_batch_sequence(visual_embeddings_type)
else:
if flat_image_mask is not None:
visual_embeddings_type = torch.zeros_like(flat_image_mask, dtype = torch.long)
else:
visual_embeddings_type = None
if flat_image_mask is not None:
flat_attention_mask = torch.cat((flat_input_mask, flat_image_mask), dim = -1)
assert(image_lm_lables is None) # Do not support this yet
if flat_masked_lm_labels is not None:
assert(flat_masked_lm_labels.size(-1) == flat_input_mask.size(-1))
new_lm_labels = torch.ones_like(flat_attention_mask) * -1
size_masked_lm_labels = flat_masked_lm_labels.size()
assert(len(size_masked_lm_labels) == 2)
new_lm_labels[:size_masked_lm_labels[0], :size_masked_lm_labels[1]] = flat_masked_lm_labels
flat_masked_lm_labels = new_lm_labels
else:
flat_attention_mask = flat_input_mask
if self.output_attention_weights:
sequence_output, pooled_output, attention_weights = self.bert(
flat_input_ids,
flat_token_type_ids,
flat_attention_mask,
visual_embeddings = flat_visual_embeddings,
position_embeddings_visual = flat_position_embeddings_visual,
visual_embeddings_type = visual_embeddings_type,
image_text_alignment = flat_image_text_alignment,
confidence = flat_confidence,
output_all_encoded_layers=output_all_encoded_layers)
output_dict = {}
output_dict["attention_weights"] = attention_weights
output_dict['loss'] = None
return output_dict
sequence_output, pooled_output = self.bert(
flat_input_ids,
flat_token_type_ids,
flat_attention_mask,
visual_embeddings = flat_visual_embeddings,
position_embeddings_visual = flat_position_embeddings_visual,
visual_embeddings_type = visual_embeddings_type,
image_text_alignment = flat_image_text_alignment,
confidence = flat_confidence,
output_all_encoded_layers=output_all_encoded_layers)
output_dict = {}
if output_all_encoded_layers:
output_dict["sequence_output"] = sequence_output
output_dict["pooled_output"] = pooled_output
output_dict["loss"] = None
return output_dict
if self.training_head_type == "pretraining":
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
output_dict["logits"] = prediction_scores
output_dict["seq_relationship_score"] = seq_relationship_score
output_dict["loss"] = None
if flat_masked_lm_labels is not None and is_random_next is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.contiguous().view(-1, self.config.vocab_size), flat_masked_lm_labels.contiguous().view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.contiguous().view(-1, 2), is_random_next.contiguous().view(-1))
output_dict["next_sentence_loss"] = next_sentence_loss
output_dict["masked_lm_loss"] = masked_lm_loss
output_dict["loss"] = masked_lm_loss + next_sentence_loss
if flat_masked_lm_labels is not None and is_random_next is None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.contiguous().view(-1, self.config.vocab_size), flat_masked_lm_labels.contiguous().view(-1))
#output_dict["next_sentence_loss"] = None
output_dict["masked_lm_loss"] = masked_lm_loss
output_dict["loss"] = masked_lm_loss
return output_dict
elif self.training_head_type == "multichoice":
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.contiguous().view(-1, self.num_choices)
output_dict["logits"] = reshaped_logits
output_dict["loss"] = None
if label is not None:
loss_fct = CrossEntropyLoss()
output_dict["loss"] = loss_fct(reshaped_logits, label.contiguous())
return output_dict
elif self.training_head_type == "vqa":
index_to_gather = flat_input_mask.sum(1) - 2
pooled_output = torch.gather(sequence_output, 1, index_to_gather.unsqueeze(-1).unsqueeze(-1).expand(index_to_gather.size(0), 1, sequence_output.size(-1)))
flat_input_ids = torch.gather(flat_input_ids, 1, index_to_gather.unsqueeze(-1).expand(index_to_gather.size(0), 1))
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.contiguous().view(-1, 3129)
output_dict["logits"] = logits
output_dict["loss"] = None
output_dict["accuracy"] = None
if label is not None:
loss_fct = torch.nn.KLDivLoss(reduction = "batchmean")
log_softmax = torch.nn.LogSoftmax(dim=-1)
reshaped_logits = log_softmax(reshaped_logits)
output_dict["loss"] = loss_fct(reshaped_logits, label.contiguous())
output_dict["accuracy"] = torch.sum(compute_score_with_logits(reshaped_logits, label)) / label.size(0)
return output_dict
elif self.training_head_type == "vqa_advanced":
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
output_dict["logits"] = prediction_scores
output_dict["seq_relationship_score"] = seq_relationship_score
output_dict["loss"] = None
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.contiguous().view(-1, self.config.vocab_size), flat_masked_lm_labels.contiguous().view(-1))
output_dict["masked_lm_loss"] = masked_lm_loss
output_dict["loss"] = masked_lm_loss
prediction_tokens = torch.max(prediction_scores, -1)[1].view(input_ids.size(0), -1).cpu().numpy() # batch x sequence length , records the predicted words
lm_labels = flat_masked_lm_labels.view(input_ids.size(0), -1).cpu().numpy()
counter = 0.0
flags = []
for i in range(lm_labels.shape[0]):
flag = True
for j in range(lm_labels.shape[1]):
if lm_labels[i][j] != -1 and prediction_tokens[i][j] != lm_labels[i][j]:
flag = False
break
if flag:
counter += 1
flags.append(flag)
output_dict["accuracy"] = counter / prediction_tokens.shape[0]
return output_dict
elif self.training_head_type == "nlvr":
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.contiguous()
output_dict["logits"] = logits
output_dict["loss"] = None
if label is not None:
loss_fct = CrossEntropyLoss()
output_dict["loss"] = loss_fct(reshaped_logits, label.contiguous())
return output_dict
elif self.training_head_type == "flickr":
if flickr_position is not None:
entities_num = (flickr_position != -1).long().view(-1).sum(-1)
flickr_position_mask = (flickr_position != -1).long()
# Make the -1 become 0
flickr_position = flickr_position * flickr_position_mask
# Selected_positions = batch x selected position x dim
selected_positions = batched_index_select(sequence_output, 1, flickr_position)
# Visual Features = batch x visual_feature_length x dim
visual_features = sequence_output[:, flat_input_mask.size(1): ,:]
assert(visual_features.size(1) == flat_image_mask.size(1))
scores = self.flickr_attention(selected_positions, visual_features, flat_image_mask)
# scores = batch x selected position x visual_feature
# scores = selected_positions.bmm(visual_features.transpose(1,2))
loss_fct = torch.nn.KLDivLoss(reduction = "batchmean")
log_softmax = torch.nn.LogSoftmax(dim=-1)
scores = log_softmax(scores)
label = label.contiguous()
# label = batch x selected_postion x needed position
output_dict["loss"] = loss_fct(scores, label)
acc, upper_acc = compute_score_with_logits_flickr(scores, label)
output_dict["accuracy"] = acc / entities_num
output_dict["upperbound_accuracy"] = upper_acc / entities_num
output_dict["entity_num"] = entities_num
return output_dict
class FlickrAttention(nn.Module):
def __init__(self, config):
super(FlickrAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = 1#config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, query, key, attention_mask):
attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype)
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
attention_mask = (1.0 - attention_mask) * -10000.0
mixed_query_layer = self.query(query)
mixed_key_layer = self.key(key)
# We don't need value layers
#mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
#value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
attention_scores = attention_scores + attention_mask
attention_scores = attention_scores.squeeze(1)
return attention_scores
def compute_score_with_logits_flickr(logits, labels, recall = 1):
# Manually changed the recall here when evaluating... A bit clumsy
labels_mask = (labels != 0.0).float()
upper_bound_labels = labels.sum(-1).view(-1).sum(-1)
labels = torch.ones_like(labels) * labels_mask
if recall != 1:
# Evaluation model. We could slow down.
# labels = batch x seq x target length
logits = logits.topk(k=recall, dim = -1)[1].data.cpu().numpy()
counter = 0.0
labels = labels.data.cpu().numpy()
for i in range(logits.shape[0]):
for j in range(logits.shape[1]):
possibles = logits[i][j]
current_label = labels[i][j][possibles]
if current_label.sum(-1) != 0:
counter += 1
counter = torch.Tensor([counter]).cuda()
return counter, upper_bound_labels
logits = torch.max(logits, -1)[1].data # argmax
logits = logits.unsqueeze(-1)
scores = torch.gather(input = labels, dim = 2, index = logits)
scores = scores.view(-1).sum(-1)
return scores, upper_bound_labels
def transform_to_batch_sequence(tensor):
if tensor is not None:
if len(tensor.size()) == 2:
return tensor
else:
assert(len(tensor.size()) == 3)
return tensor.contiguous().view(-1, tensor.size(-1))
else:
return None
def transform_to_batch_sequence_dim(tensor):
if tensor is not None:
if len(tensor.size()) == 3:
return tensor
else:
assert(len(tensor.size()) == 4)
return tensor.contiguous().view(-1, tensor.size(-2), tensor.size(-1))
else:
return None
def masked_unk_softmax(x, dim, mask_idx):
x1 = F.softmax(x, dim=dim)
x1[:, mask_idx] = 0
x1_sum = torch.sum(x1, dim=1, keepdim=True)
y = x1 / x1_sum
return y
def compute_score_with_logits(logits, labels):
logits = masked_unk_softmax(logits, 1, 0)
logits = torch.max(logits, 1)[1].data # argmax
one_hots = torch.zeros_like(labels)
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
def batched_index_select(t, dim, inds):
dummy = inds.unsqueeze(2).expand(inds.size(0), inds.size(1), t.size(2))
out = t.gather(dim, dummy) # b x e x f
return out | 84,216 | 48.077506 | 259 | py |
visualbert | visualbert-master/visualbert/pytorch_pretrained_bert/fine_tuning.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import os
import random
from io import open
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from pytorch_pretrained_bert.modeling import BertForPreTraining
from pytorch_pretrained_bert.tokenization import BertTokenizer
#from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear
from torch.utils.data import Dataset
import random
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class BERTDataset(Dataset):
def __init__(self, corpus_path, tokenizer, seq_len, encoding="utf-8", corpus_lines=None, on_memory=True):
self.vocab = tokenizer.vocab
self.tokenizer = tokenizer
self.seq_len = seq_len
self.on_memory = on_memory
self.corpus_lines = corpus_lines # number of non-empty lines in input corpus
self.corpus_path = corpus_path
self.encoding = encoding
self.current_doc = 0 # to avoid random sentence from same doc
# for loading samples directly from file
self.sample_counter = 0 # used to keep track of full epochs on file
self.line_buffer = None # keep second sentence of a pair in memory and use as first sentence in next pair
# for loading samples in memory
self.current_random_doc = 0
self.num_docs = 0
self.sample_to_doc = [] # map sample index to doc and line
# load samples into memory
if on_memory:
self.all_docs = []
doc = []
self.corpus_lines = 0
with open(corpus_path, "r", encoding=encoding) as f:
for line in tqdm(f, desc="Loading Dataset", total=corpus_lines):
line = line.strip()
if line == "":
self.all_docs.append(doc)
doc = []
#remove last added sample because there won't be a subsequent line anymore in the doc
self.sample_to_doc.pop()
else:
#store as one sample
sample = {"doc_id": len(self.all_docs),
"line": len(doc)}
self.sample_to_doc.append(sample)
doc.append(line)
self.corpus_lines = self.corpus_lines + 1
# if last row in file is not empty
if self.all_docs[-1] != doc:
self.all_docs.append(doc)
self.sample_to_doc.pop()
self.num_docs = len(self.all_docs)
# load samples later lazily from disk
else:
if self.corpus_lines is None:
with open(corpus_path, "r", encoding=encoding) as f:
self.corpus_lines = 0
for line in tqdm(f, desc="Loading Dataset", total=corpus_lines):
if line.strip() == "":
self.num_docs += 1
else:
self.corpus_lines += 1
# if doc does not end with empty line
if line.strip() != "":
self.num_docs += 1
self.file = open(corpus_path, "r", encoding=encoding)
self.random_file = open(corpus_path, "r", encoding=encoding)
def __len__(self):
# last line of doc won't be used, because there's no "nextSentence". Additionally, we start counting at 0.
return self.corpus_lines - self.num_docs - 1
def __getitem__(self, item):
cur_id = self.sample_counter
self.sample_counter += 1
if not self.on_memory:
# after one epoch we start again from beginning of file
if cur_id != 0 and (cur_id % len(self) == 0):
self.file.close()
self.file = open(self.corpus_path, "r", encoding=self.encoding)
t1, t2, is_next_label = self.random_sent(item)
# tokenize
tokens_a = self.tokenizer.tokenize(t1)
tokens_b = self.tokenizer.tokenize(t2)
# combine to one sample
cur_example = InputExample(guid=cur_id, tokens_a=tokens_a, tokens_b=tokens_b, is_next=is_next_label)
# transform sample to features
cur_features = convert_example_to_features(cur_example, self.seq_len, self.tokenizer)
cur_tensors = (torch.tensor(cur_features.input_ids),
torch.tensor(cur_features.input_mask),
torch.tensor(cur_features.segment_ids),
torch.tensor(cur_features.lm_label_ids),
torch.tensor(cur_features.is_next))
return cur_tensors
def random_sent(self, index):
"""
Get one sample from corpus consisting of two sentences. With prob. 50% these are two subsequent sentences
from one doc. With 50% the second sentence will be a random one from another doc.
:param index: int, index of sample.
:return: (str, str, int), sentence 1, sentence 2, isNextSentence Label
"""
t1, t2 = self.get_corpus_line(index)
if random.random() > 0.5:
label = 0
else:
t2 = self.get_random_line()
label = 1
assert len(t1) > 0
assert len(t2) > 0
return t1, t2, label
def get_corpus_line(self, item):
"""
Get one sample from corpus consisting of a pair of two subsequent lines from the same doc.
:param item: int, index of sample.
:return: (str, str), two subsequent sentences from corpus
"""
t1 = ""
t2 = ""
assert item < self.corpus_lines
if self.on_memory:
sample = self.sample_to_doc[item]
t1 = self.all_docs[sample["doc_id"]][sample["line"]]
t2 = self.all_docs[sample["doc_id"]][sample["line"]+1]
# used later to avoid random nextSentence from same doc
self.current_doc = sample["doc_id"]
return t1, t2
else:
if self.line_buffer is None:
# read first non-empty line of file
while t1 == "" :
t1 = next(self.file).strip()
t2 = next(self.file).strip()
else:
# use t2 from previous iteration as new t1
t1 = self.line_buffer
t2 = next(self.file).strip()
# skip empty rows that are used for separating documents and keep track of current doc id
while t2 == "" or t1 == "":
t1 = next(self.file).strip()
t2 = next(self.file).strip()
self.current_doc = self.current_doc+1
self.line_buffer = t2
assert t1 != ""
assert t2 != ""
return t1, t2
def get_random_line(self):
"""
Get random line from another document for nextSentence task.
:return: str, content of one line
"""
# Similar to original tf repo: This outer loop should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document we're processing.
for _ in range(10):
if self.on_memory:
rand_doc_idx = random.randint(0, len(self.all_docs)-1)
rand_doc = self.all_docs[rand_doc_idx]
line = rand_doc[random.randrange(len(rand_doc))]
else:
rand_index = random.randint(1, self.corpus_lines if self.corpus_lines < 1000 else 1000)
#pick random line
for _ in range(rand_index):
line = self.get_next_line()
#check if our picked random line is really from another doc like we want it to be
if self.current_random_doc != self.current_doc:
break
return line
def get_next_line(self):
""" Gets next line of random_file and starts over when reaching end of file"""
try:
line = next(self.random_file).strip()
#keep track of which document we are currently looking at to later avoid having the same doc as t1
if line == "":
self.current_random_doc = self.current_random_doc + 1
line = next(self.random_file).strip()
except StopIteration:
self.random_file.close()
self.random_file = open(self.corpus_path, "r", encoding=self.encoding)
line = next(self.random_file).strip()
return line
class InputExample(object):
"""A single training/test example for the language model."""
def __init__(self, guid, tokens_a, tokens_b=None, is_next=None, lm_labels=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
tokens_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
tokens_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.tokens_a = tokens_a
self.tokens_b = tokens_b
self.is_next = is_next # nextSentence
self.lm_labels = lm_labels # masked words for language model
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, is_next, lm_label_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.is_next = is_next
self.lm_label_ids = lm_label_ids
def random_word(tokens, tokenizer, probability = 0.15):
"""
Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
:param tokens: list of str, tokenized sentence.
:param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
:return: (list of str, list of int), masked tokens and related labels for LM prediction
"""
output_label = []
for i, token in enumerate(tokens):
prob = random.random()
# mask token with 15% probability
if prob < probability:
prob /= probability
# 80% randomly change token to mask token
if prob < 0.8:
tokens[i] = "[MASK]"
# 10% randomly change token to random token
elif prob < 0.9:
tokens[i] = random.choice(list(tokenizer.vocab.items()))[0]
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
try:
output_label.append(tokenizer.vocab[token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
output_label.append(tokenizer.vocab["[UNK]"])
logger.warning("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token))
else:
# no masking token (will be ignored by loss function later)
output_label.append(-1)
return tokens, output_label
def convert_example_to_features(example, max_seq_length, tokenizer):
"""
Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with
IDs, LM labels, input_mask, CLS and SEP tokens etc.
:param example: InputExample, containing sentence input as strings and is_next label
:param max_seq_length: int, maximum length of sequence.
:param tokenizer: Tokenizer
:return: InputFeatures, containing all inputs and labels of one sample as IDs (as used for model training)
"""
tokens_a = example.tokens_a
tokens_b = example.tokens_b
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
tokens_a, t1_label = random_word(tokens_a, tokenizer)
tokens_b, t2_label = random_word(tokens_b, tokenizer)
# concatenate lm labels and account for CLS, SEP, SEP
lm_label_ids = ([-1] + t1_label + [-1] + t2_label + [-1])
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
assert len(tokens_b) > 0
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
lm_label_ids.append(-1)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(lm_label_ids) == max_seq_length
if example.guid < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("LM label: %s " % (lm_label_ids))
logger.info("Is next sentence label: %s " % (example.is_next))
features = InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
lm_label_ids=lm_label_ids,
is_next=example.is_next)
return features
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--train_file",
default=None,
type=str,
required=True,
help="The input train corpus.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints will be written.")
## Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--on_memory",
action='store_true',
help="Whether to load train samples into memory or use disk")
parser.add_argument("--do_lower_case",
action='store_true',
help="Whether to lower case the input text. True for uncased models, False for cased models.")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumualte before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type = float, default = 0,
help = "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train:
raise ValueError("Training is currently the only implemented execution option. Please set `do_train`.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
#train_examples = None
num_train_optimization_steps = None
if args.do_train:
print("Loading Train Dataset", args.train_file)
train_dataset = BERTDataset(args.train_file, tokenizer, seq_len=args.max_seq_length,
corpus_lines=None, on_memory=args.on_memory)
num_train_optimization_steps = int(
len(train_dataset) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
# Prepare model
model = BertForPreTraining.from_pretrained(args.bert_model)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
global_step = 0
if args.do_train:
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
if args.local_rank == -1:
train_sampler = RandomSampler(train_dataset)
else:
#TODO: check if this works with current data generator from disk that relies on next(file)
# (it doesn't return item back by index)
train_sampler = DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, lm_label_ids, is_next = batch
loss = model(input_ids, segment_ids, input_mask, lm_label_ids, is_next)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
# Save a trained model
logger.info("** ** * Saving fine - tuned model ** ** * ")
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
if args.do_train:
torch.save(model_to_save.state_dict(), output_model_file)
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
if __name__ == "__main__":
main() | 27,941 | 42.187017 | 139 | py |
visualbert | visualbert-master/visualbert/pytorch_pretrained_bert/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import os
import logging
import shutil
import tempfile
import json
from urllib.parse import urlparse
from pathlib import Path
from typing import Optional, Tuple, Union, IO, Callable, Set
from hashlib import sha256
from functools import wraps
from tqdm import tqdm
import boto3
from botocore.exceptions import ClientError
import requests
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
def url_to_filename(url: str, etag: str = None) -> str:
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] = None) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise FileNotFoundError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func: Callable):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url: str) -> Optional[str]:
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url: str, temp_file: IO) -> None:
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename: str) -> Set[str]:
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path: str, dot=True, lower: bool = True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| 8,021 | 32.425 | 98 | py |
visualbert | visualbert-master/visualbert/pytorch_pretrained_bert/__init__.py | __version__ = "0.4.0"
from .tokenization import BertTokenizer, BasicTokenizer, WordpieceTokenizer
from .modeling import (BertConfig, BertModel, BertForPreTraining,
BertForMaskedLM, BertForNextSentencePrediction,
BertForSequenceClassification, BertForMultipleChoice,
BertForTokenClassification, BertForQuestionAnswering)
from .optimization import BertAdam
from .file_utils import PYTORCH_PRETRAINED_BERT_CACHE
| 478 | 52.222222 | 76 | py |
visualbert | visualbert-master/visualbert/dataloaders/vcr.py | # Modifed from R2C
"""
Dataloaders for VCR
"""
import json
import pickle
import os
from collections import defaultdict
import numpy as np
import numpy
import torch
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField, ListField, LabelField, SequenceLabelField, ArrayField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import ELMoTokenCharactersIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn.util import get_text_field_mask
from torch.utils.data import Dataset
from dataloaders.box_utils import load_image, resize_image, to_tensor_and_normalize
from dataloaders.mask_utils import make_mask
from dataloaders.bert_field import BertField
import h5py
from copy import deepcopy
from tqdm import tqdm
from .vcr_data_utils import data_iter, data_iter_test, data_iter_item
from .bert_data_utils import InputExample, InputFeatures, get_one_image_feature_npz_screening_parameters, get_image_feat_reader, faster_RCNN_feat_reader, screen_feature
from .bert_field import IntArrayField
from visualbert.pytorch_pretrained_bert.fine_tuning import _truncate_seq_pair, random_word
from visualbert.pytorch_pretrained_bert.tokenization import BertTokenizer
GENDER_NEUTRAL_NAMES = ['Casey', 'Riley', 'Jessie', 'Jackie', 'Avery', 'Jaime', 'Peyton', 'Kerry', 'Jody', 'Kendall',
'Peyton', 'Skyler', 'Frankie', 'Pat', 'Quinn']
# Here's an example jsonl
# {
# "movie": "3015_CHARLIE_ST_CLOUD",
# "objects": ["person", "person", "person", "car"],
# "interesting_scores": [0],
# "answer_likelihood": "possible",
# "img_fn": "lsmdc_3015_CHARLIE_ST_CLOUD/3015_CHARLIE_ST_CLOUD_00.23.57.935-00.24.00.783@0.jpg",
# "metadata_fn": "lsmdc_3015_CHARLIE_ST_CLOUD/3015_CHARLIE_ST_CLOUD_00.23.57.935-00.24.00.783@0.json",
# "answer_orig": "No she does not",
# "question_orig": "Does 3 feel comfortable?",
# "rationale_orig": "She is standing with her arms crossed and looks disturbed",
# "question": ["Does", [2], "feel", "comfortable", "?"],
# "answer_match_iter": [3, 0, 2, 1],
# "answer_sources": [3287, 0, 10184, 2260],
# "answer_choices": [
# ["Yes", "because", "the", "person", "sitting", "next", "to", "her", "is", "smiling", "."],
# ["No", "she", "does", "not", "."],
# ["Yes", ",", "she", "is", "wearing", "something", "with", "thin", "straps", "."],
# ["Yes", ",", "she", "is", "cold", "."]],
# "answer_label": 1,
# "rationale_choices": [
# ["There", "is", "snow", "on", "the", "ground", ",", "and",
# "she", "is", "wearing", "a", "coat", "and", "hate", "."],
# ["She", "is", "standing", "with", "her", "arms", "crossed", "and", "looks", "disturbed", "."],
# ["She", "is", "sitting", "very", "rigidly", "and", "tensely", "on", "the", "edge", "of", "the",
# "bed", ".", "her", "posture", "is", "not", "relaxed", "and", "her", "face", "looks", "serious", "."],
# [[2], "is", "laying", "in", "bed", "but", "not", "sleeping", ".",
# "she", "looks", "sad", "and", "is", "curled", "into", "a", "ball", "."]],
# "rationale_sources": [1921, 0, 9750, 25743],
# "rationale_match_iter": [3, 0, 2, 1],
# "rationale_label": 1,
# "img_id": "train-0",
# "question_number": 0,
# "annot_id": "train-0",
# "match_fold": "train-0",
# "match_index": 0,
# }
class VCR(Dataset):
def __init__(self,
split,
mode,
only_use_relevant_dets=True,
add_image_as_a_box=True,
conditioned_answer_choice=0,
do_lower_case = True,
bert_model_name = "",
max_seq_length = 128,
pretraining = False,
pretraining_include_qa_and_qar = False,
complete_shuffle = False,
use_alignment = False,
add_all_features = False,
answer_labels_path = None,
vcr_annots_dir = None,
vcr_image_dir = None
):
# Should clean this mess when I find the time...
self.split = split
self.mode = mode
self.only_use_relevant_dets = only_use_relevant_dets
self.pretraining_include_qa_and_qar = pretraining_include_qa_and_qar
self.add_all_features = add_all_features
self.use_alignment = use_alignment
self.add_image_as_a_box = add_image_as_a_box
self.conditioned_answer_choice = conditioned_answer_choice
self.vcr_annots_dir = vcr_annots_dir
self.vcr_image_dir = vcr_image_dir
with open(os.path.join(self.vcr_annots_dir, '{}.jsonl'.format(split)), 'r') as f:
self.items = [json.loads(s) for s in f]
if split not in ('test', 'train', 'val'):
raise ValueError("Mode must be in test, train, or val. Supplied {}".format(mode))
if mode not in ('answer', 'rationale'):
raise ValueError("split must be answer or rationale")
self.vocab = Vocabulary()
with open(os.path.join(os.path.dirname(self.vcr_annots_dir), 'dataloaders', 'cocoontology.json'), 'r') as f:
coco = json.load(f)
self.coco_objects = ['__background__'] + [x['name'] for k, x in sorted(coco.items(), key=lambda x: int(x[0]))]
self.coco_obj_to_ind = {o: i for i, o in enumerate(self.coco_objects)}
self.do_lower_case = do_lower_case
self.bert_model_name = bert_model_name
self.max_seq_length = max_seq_length
self.tokenizer = BertTokenizer.from_pretrained(self.bert_model_name, do_lower_case=self.do_lower_case)
self.pretraining = pretraining
# This is for pretraining
self.masked_lm_prob = 0.15
self.max_predictions_per_seq = 20
self.complete_shuffle = complete_shuffle
##########
self.only_qar = True if self.mode=='rationale' else False
if answer_labels_path is not None:
# Only when we are testing rationale...
assert(self.only_qar)
if answer_labels_path == 0:
for index, i in enumerate(self.items):
i["answer_label"] = 0
elif answer_labels_path == 1:
for index, i in enumerate(self.items):
i["answer_label"] = 1
elif answer_labels_path == 2:
for index, i in enumerate(self.items):
i["answer_label"] = 2
elif answer_labels_path == 3:
for index, i in enumerate(self.items):
i["answer_label"] = 3
else:
self.answer_labels = np.load(answer_labels_path)
self.answer_labels = self.answer_labels.argmax(1)
if self.split == "test":
assert(self.answer_labels.shape[0] == len(self))
for index, i in enumerate(self.items):
i["answer_label"] = self.answer_labels[index]
else:
self.answer_labels = None
@property
def is_train(self):
return self.split == 'train'
@classmethod
def splits(cls, **kwargs):
""" Helper method to generate splits of the dataset"""
kwargs_copy = {x: y for x, y in kwargs.items()}
if 'mode' not in kwargs:
kwargs_copy['mode'] = 'answer'
train = cls(split='train', **kwargs_copy)
val = cls(split='val', **kwargs_copy)
test = cls(split='test', **kwargs_copy)
return train, val, test
def __len__(self):
if self.complete_shuffle:
if self.pretraining_include_qa_and_qar:
return len(self.items) * 8
else:
return len(self.items) * 4
return len(self.items)
def _get_dets_to_use(self, item, only_use_answer = False, only_use_qar = False): # Need to fix this match
"""
We might want to use fewer detectiosn so lets do so.
:param item:
:param question:
:param answer_choices:
:return:
"""
# Load questions and answers
question = item['question']
answer_choices = item['{}_choices'.format(self.mode)]
if self.mode == "answer":
question = item['question']
answer_choices = item['{}_choices'.format(self.mode)]
elif self.mode == "rationale":
question = item['question'] + item['answer_choices'][item['answer_label']]
answer_choices = item['{}_choices'.format(self.mode)]
if self.pretraining_include_qa_and_qar:
answer_choices = item['answer_choices'] + item['rationale_choices']
if self.add_all_features:
question = item['question']
answer_choices = item['answer_choices'] + item['rationale_choices']
if self.only_use_relevant_dets:
dets2use = np.zeros(len(item['objects']), dtype=bool)
people = np.array([x == 'person' for x in item['objects']], dtype=bool)
for sent in answer_choices + [question]:
for possibly_det_list in sent:
if isinstance(possibly_det_list, list):
for tag in possibly_det_list:
if tag >= 0 and tag < len(item['objects']): # sanity check
dets2use[tag] = True
elif possibly_det_list.lower() in ('everyone', 'everyones'):
dets2use |= people
if not dets2use.any():
dets2use |= people
else:
dets2use = np.ones(len(item['objects']), dtype=bool)
# we will use these detections
dets2use = np.where(dets2use)[0]
old_det_to_new_ind = np.zeros(len(item['objects']), dtype=np.int32) - 1
old_det_to_new_ind[dets2use] = np.arange(dets2use.shape[0], dtype=np.int32)
# If we add the image as an extra box then the 0th will be the image.
if self.add_image_as_a_box:
old_det_to_new_ind[dets2use] += 1
old_det_to_new_ind = old_det_to_new_ind.tolist()
return dets2use, old_det_to_new_ind
def __getitem__(self, index):
if self.complete_shuffle:
if self.pretraining_include_qa_and_qar:
index = index // 8
which = index % 8
else:
index = index // 4
which = index % 4
else:
which = None
item = deepcopy(self.items[index])
###################################################################
# Load questions and answers
answer_choices = item['{}_choices'.format(self.mode)]
if self.complete_shuffle and which < 4:
only_use_answer = True
else:
only_use_answer = False
if self.complete_shuffle and which >= 4:
only_use_qar = True
else:
only_use_qar = False
dets2use, old_det_to_new_ind = self._get_dets_to_use(item, only_use_answer = only_use_answer, only_use_qar = only_use_qar)
# The only_use_qar is ambigious...
instance_dict = {}
if self.split != 'test':
instance_dict['label'] = LabelField(item['{}_label'.format(self.mode)], skip_indexing=True)
instance_dict['metadata'] = MetadataField({'annot_id': item['annot_id'], 'ind': index, 'movie': item['movie'],
'img_fn': item['img_fn'],
'question_number': item['question_number']})
###################################################################
# Load image now and rescale it. Might have to subtract the mean and whatnot here too.
image = load_image(os.path.join(self.vcr_image_dir, item['img_fn']))
#image = self.imagedatas(item['img_fn'])
image, window, img_scale, padding = resize_image(image, random_pad=self.is_train)
image = to_tensor_and_normalize(image)
c, h, w = image.shape
###################################################################
# Load boxes.
with open(os.path.join(self.vcr_image_dir, item['metadata_fn']), 'r') as f:
metadata = json.load(f)
# [nobj, 14, 14]
segms = np.stack([make_mask(mask_size=14, box=metadata['boxes'][i], polygons_list=metadata['segms'][i]) for i in dets2use])
# Chop off the final dimension, that's the confidence
boxes = np.array(metadata['boxes'])[dets2use, :-1]
# Possibly rescale them if necessary
boxes *= img_scale
boxes[:, :2] += np.array(padding[:2])[None]
boxes[:, 2:] += np.array(padding[:2])[None]
obj_labels = [self.coco_obj_to_ind[item['objects'][i]] for i in dets2use.tolist()]
if self.add_image_as_a_box:
boxes = np.row_stack((window, boxes))
segms = np.concatenate((np.ones((1, 14, 14), dtype=np.float32), segms), 0)
obj_labels = [self.coco_obj_to_ind['__background__']] + obj_labels
examples = data_iter_item(item, tokenizer=self.tokenizer,
max_seq_length=self.max_seq_length,
endingonly=False,
include_qar = self.pretraining_include_qa_and_qar,
only_qar = self.only_qar)
self.getitem_bert_part(examples, item, instance_dict, which)
if self.use_alignment: # Alignment between objects and text
######################
examples_alginment_pack = []
for i in range(len(examples)):
if self.pretraining_include_qa_and_qar:
if i < 4:
raw_text_a = item["question"]
raw_text_b = item['answer_choices'][i]
else:
raw_text_a = item["question"] + item['answer_choices'][item['answer_label']]
raw_text_b = item['rationale_choices'][i - 4]
elif self.only_qar:
raw_text_a = item["question"] + item['answer_choices'][item['answer_label']] # This is the correct alignment right now.
raw_text_b = item['rationale_choices'][i]
else:
raw_text_a = item["question"]
raw_text_b = item['answer_choices'][i]
true_text_a = examples[i][0].text_a
true_text_b = examples[i][0].text_b
text_alignment_a = examples[i][1]
text_alignment_b = examples[i][2]
examples_alginment_pack.append((raw_text_a, raw_text_b, true_text_a, true_text_b, text_alignment_a, text_alignment_b))
image_box_position = []
if which is not None:
raw_text_a, raw_text_b, true_text_a, true_text_b, text_alignment_a, text_alignment_b = examples_alginment_pack[which]
box_record = defaultdict(list)
self.get_alignment_original(raw_text_a, text_alignment_a, old_det_to_new_ind, box_record, offset = 1)
self.get_alignment_original(raw_text_b, text_alignment_b, old_det_to_new_ind, box_record, offset = 1 + len(text_alignment_a) + 1)
image_text_alignment = ListField([IntArrayField(np.array(box_record[i]), padding_value = -1) for i in range(len(boxes))])
else:
for raw_text_a, raw_text_b, true_text_a, true_text_b, text_alignment_a, text_alignment_b in examples_alginment_pack:
box_record = defaultdict(list)
self.get_alignment_original(raw_text_a, text_alignment_a, old_det_to_new_ind, box_record, offset = 1)
self.get_alignment_original(raw_text_b, text_alignment_b, old_det_to_new_ind, box_record, offset = 1 + len(text_alignment_a) + 1)
image_box_position.append(ListField([IntArrayField(np.array(box_record[i]), padding_value = -1) for i in range(len(boxes))]))
image_text_alignment = ListField(image_box_position)
######################
instance_dict["image_text_alignment"] = image_text_alignment
instance_dict['segms'] = ArrayField(segms, padding_value=0)
instance_dict['objects'] = ListField([LabelField(x, skip_indexing=True) for x in obj_labels])
if not np.all((boxes[:, 0] >= 0.) & (boxes[:, 0] < boxes[:, 2])):
import ipdb
ipdb.set_trace()
assert np.all((boxes[:, 1] >= 0.) & (boxes[:, 1] < boxes[:, 3]))
assert np.all((boxes[:, 2] <= w))
assert np.all((boxes[:, 3] <= h))
instance_dict['boxes'] = ArrayField(boxes, padding_value=-1)
instance = Instance(instance_dict)
instance.index_fields(self.vocab)
return image, instance
def get_alignment_original(self, raw_text_mixed, text_alignment, old_det_to_new_ind, box_record, offset):
# raw_text_mixed is the raw text information in VCR dataset
# text_alignment is the result from BERT tokenizer recording the alignment between raw tokens and subword tokens.
counter = 0
for i in raw_text_mixed:
if isinstance(i, list):
for box_index in i:
new_box_index = old_det_to_new_ind[box_index]
assert(new_box_index != -1)
# Need to record which box corresponds to which person.
for i in text_alignment:
if i == counter:
box_record[new_box_index].append(i + offset)
break
counter += 1
else:
counter += 1
def getitem_bert_part(self, examples, item, instance_dict, which = None):
# In examples, each element: InputExample, Alignment for context, algiment for answer
if self.pretraining:
if self.complete_shuffle:
assert(which is not None)
feature = InputFeatures.convert_one_example_to_features_pretraining(
example = examples[which][0],
tokenizer=self.tokenizer,
probability = self.masked_lm_prob)
feature.insert_field_into_dict(instance_dict)
return
features = []
for i in examples:
inputexample_instance = i[0]
example = InputFeatures.convert_one_example_to_features_pretraining(
example = inputexample_instance,
tokenizer=self.tokenizer,
probability = self.masked_lm_prob)
features.append(example)
InputFeatures.convert_list_features_to_allennlp_list_feild(features, instance_dict)
else:
features = InputFeatures.convert_examples_to_features(
examples=[x[0] for x in examples],
tokenizer=self.tokenizer)
InputFeatures.convert_list_features_to_allennlp_list_feild(features, instance_dict)
@staticmethod
def collate_fn(data):
if isinstance(data[0], Instance):
batch = Batch(data)
td = batch.as_tensor_dict()
return td
else:
images, instances = zip(*data)
images = torch.stack(images, 0)
batch = Batch(instances)
td = batch.as_tensor_dict()
if 'question' in td:
td['question_mask'] = get_text_field_mask(td['question'], num_wrapping_dims=1)
td['question_tags'][td['question_mask'] == 0] = -2 # Padding
if "answer" in td:
td['answer_mask'] = get_text_field_mask(td['answers'], num_wrapping_dims=1)
td['answer_tags'][td['answer_mask'] == 0] = -2
td['box_mask'] = torch.all(td['boxes'] >= 0, -1).long()
td['images'] = images
return td
class VCRLoader(torch.utils.data.DataLoader):
"""
Iterates through the data, filtering out None,
but also loads everything as a (cuda) variable
"""
@classmethod
def from_dataset(cls, data, batch_size=3, num_workers=6, num_gpus=3, **kwargs):
loader = cls(
dataset=data,
batch_size=batch_size * num_gpus,
shuffle=data.is_train,
num_workers=num_workers,
collate_fn=data.collate_fn,
drop_last=False,
pin_memory=False,
**kwargs,
)
return loader | 20,515 | 42.191579 | 168 | py |
visualbert | visualbert-master/visualbert/dataloaders/flickr_dataset.py | import os
from torch.utils.data import Dataset
import numpy as np
import random
import json
from collections import defaultdict
from tqdm import tqdm
import json
import os
import numpy as np
import numpy
import torch
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField, ListField, LabelField, SequenceLabelField, ArrayField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import ELMoTokenCharactersIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn.util import get_text_field_mask
from torch.utils.data import Dataset
from dataloaders.box_utils import load_image, resize_image, to_tensor_and_normalize
from dataloaders.mask_utils import make_mask
from dataloaders.bert_field import BertField
import h5py
from copy import deepcopy
from torch.utils.data.dataloader import default_collate
from allennlp.data.instance import Instance
from allennlp.data.dataset import Batch
from pytorch_pretrained_bert.fine_tuning import _truncate_seq_pair, random_word
from dataloaders.bert_field import IntArrayField
import numpy as np
from allennlp.data.fields import ListField
import os
import json
import _pickle as cPickle
import numpy as np
import utils
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=FutureWarning)
import h5py
from xml.etree.ElementTree import parse
import torch
from torch.utils.data import Dataset
import itertools
import re
COUNTING_ONLY = False
from .flickr_ban.dataset import _load_flickr30k, _load_flickr30k_our
from .bert_data_utils import *
from .vcr_data_utils import retokenize_with_alignment
class Flickr30kFeatureDataset(Dataset):
def __init__(self, name, args, dictionary = None, data_root='data/flickr30k/', chunk = None, entries = None):
super(Flickr30kFeatureDataset, self).__init__()
self.add_spatial_features = args.add_spatial_features
self.dictionary = dictionary
self.use_visual_genome = args.get("use_visual_genome", True)
if self.use_visual_genome:
self.img_id2idx = cPickle.load(
open(os.path.join(data_root, '%s_imgid2idx.pkl' % name), 'rb'))
h5_path = os.path.join(data_root, '%s.hdf5' % name)
with h5py.File(h5_path, 'r') as hf:
self.features = np.array(hf.get('image_features'))
self.spatials = np.array(hf.get('spatial_features'))
self.bbox = np.array(hf.get('image_bb'))
self.pos_boxes = np.array(hf.get('pos_boxes'))
self.entries = _load_flickr30k(data_root, self.img_id2idx, self.bbox, self.pos_boxes, limit = None, cache_name = name)
else:
self.features_chunk = chunk
self.entries = entries
self.pretraining = args.pretraining
from pytorch_pretrained_bert.tokenization import BertTokenizer
self.do_lower_case = args.do_lower_case
self.bert_model_name = args.bert_model_name
self.max_seq_length = args.max_seq_length
self.tokenizer = BertTokenizer.from_pretrained(self.bert_model_name, do_lower_case=self.do_lower_case)
self.masked_lm_prob = args.get("masked_lm_prob", 0.15)
@classmethod
def splits(cls, args):
data_root = args.data_root
if args.get("use_visual_genome", True):
chunk = None
train_entries = None
val_entries = None
test_entries = None
else:
assert(0)
'''
chunk = torch.load(chunk_data)
image_screening_parameters = args.image_screening_parameters
for image_id in chunk.keys():
image_feat_variable, image_boxes, confidence, image_h, image_w = chunk[image_id]
image_feat_variable, image_boxes, confidence = screen_feature(image_feat_variable, image_boxes, confidence, image_screening_parameters)
chunk[image_id] = (image_feat_variable, image_boxes, confidence, image_h, image_w)
train_ids = cPickle.load(
open(os.path.join(data_root, '%s_imgid2idx.pkl' % "train"), 'rb'))
val_ids = cPickle.load(
open(os.path.join(data_root, '%s_imgid2idx.pkl' % "val"), 'rb'))
test_ids = cPickle.load(
open(os.path.join(data_root, '%s_imgid2idx.pkl' % "test"), 'rb'))
val_ids = list(val_ids.keys())
train_ids = list(train_ids.keys())
test_ids = list(test_ids.keys())
entities_data_path = os.path.join(data_root, args.entries_path)
if not os.path.exists(entities_data_path):
entries = _load_flickr30k_our(data_root, chunk)
with open(entities_data_path, 'wb') as f:
cPickle.dump(entries, f)
else:
entries = cPickle.load(open(entities_data_path, "rb"))
train_entries = [i for i in entries if int(i["image"][:-4]) in train_ids]
val_entries = [i for i in entries if int(i["image"][:-4]) in val_ids]
test_entries = [i for i in entries if int(i["image"][:-4]) in test_ids]
'''
train = cls(name = "train", args = args, data_root = data_root, chunk = chunk, entries = train_entries)
train.is_train = True
val = cls(name = "val", args = args, data_root = data_root, chunk = chunk, entries = val_entries)
val.is_train = False
test = cls(name = "test", args = args, data_root = data_root, chunk = chunk, entries = test_entries)
test.is_train = False
return train, val, test
def tokenize(self, max_length=82):
"""Tokenizes the questions.
This will add q_token in each entry of the dataset.
-1 represent nil, and should be treated as padding_idx in embedding
"""
for entry in self.entries:
tokens = self.dictionary.tokenize(entry['sentence'], False)
tokens = tokens[:max_length]
if len(tokens) < max_length:
# Note here we pad in front of the sentence
padding = [self.dictionary.padding_idx] * (max_length - len(tokens))
tokens = tokens + padding
utils.assert_eq(len(tokens), max_length)
entry['p_token'] = tokens
def tensorize(self, max_box=100, max_entities=16, max_length=82):
self.features = torch.from_numpy(self.features)
self.spatials = torch.from_numpy(self.spatials)
for entry in self.entries:
phrase = torch.from_numpy(np.array(entry['p_token']))
entry['p_token'] = phrase
assert len(entry['target_indices']) == entry['entity_num']
assert len(entry['entity_indices']) == entry['entity_num']
target_tensors = []
for i in range(entry['entity_num']):
target_tensor = torch.zeros(1, max_box)
if len(entry['target_indices'][i]) > 0:
target_idx = torch.from_numpy(np.array(entry['target_indices'][i]))
target_tensor = torch.zeros(max_box).scatter_(0, target_idx, 1).unsqueeze(0)
target_tensors.append(target_tensor)
assert len(target_tensors) <= max_entities, '> %d entities!' % max_entities
for i in range(max_entities - len(target_tensors)):
target_tensor = torch.zeros(1, max_box)
target_tensors.append(target_tensor)
entry['entity_ids'].append(0)
# padding entity_indices with non-overlapping indices
entry['entity_indices'] += [x for x in range(max_length) if x not in entry['entity_indices']]
entry['entity_indices'] = entry['entity_indices'][:max_entities]
entry['target'] = torch.cat(target_tensors, 0)
# entity positions in (e) tensor
entry['e_pos'] = torch.LongTensor(entry['entity_indices'])
entry['e_num'] = torch.LongTensor([entry['entity_num']])
entry['entity_ids'] = torch.LongTensor(entry['entity_ids'])
entry['entity_types'] = torch.LongTensor(entry['entity_types'])
def __getitem__(self, index):
entry = self.entries[index]
sentence = entry['sentence']
e_pos = entry['entity_indices']
e_num = entry['entity_num']
target = entry['target_indices']
entity_ids = entry['entity_ids']
entity_types = entry['entity_types']
#v, b, p, e, n, a, idx, types
if self.use_visual_genome:
features = self.features[self.pos_boxes[entry['image']][0]:self.pos_boxes[entry['image']][1], :]
spatials = self.spatials[self.pos_boxes[entry['image']][0]:self.pos_boxes[entry['image']][1], :]
else:
image_id = entry["image"]
features, cls_boxes, max_conf, image_h, image_w = self.features_chunk[image_id]
if self.add_spatial_features:
features = np.concatenate((features, spatials), axis=1)
else:
spatials = None
sample = {}
image_feat_variable = ArrayField(features)
image_dim_variable = IntArrayField(np.array(len(features)))
sample["image_feat_variable"] = image_feat_variable
sample["image_dim_variable"] = image_dim_variable
tokenized_sentence, alignment = retokenize_with_alignment(sentence.split(" "), self.tokenizer)
e_pos_after_subword = []
current_index = 0
for position in e_pos:
for index, i in enumerate(alignment):
if i == position:
if index == len(alignment) - 1 or alignment[index+1] != i:
e_pos_after_subword.append(index + 1) # Because the added [CTX] token
if len(e_pos_after_subword) != len(e_pos) or len(e_pos_after_subword) != len(target):
assert(0)
# Need to convert target into soft scores:
target_len = features.shape[0]
new_target = []
for i in target:
new_i = [0.0] * target_len
if len(i) != 0:
score = 1.0 / len(i)
for j in i:
new_i[j] = score
new_target.append(new_i)
# target = entity_num x v_feature_size
target = ArrayField(np.array(new_target, dtype="float"), padding_value = 0.0)
original_position = IntArrayField(np.array(e_pos_after_subword, dtype="int"), padding_value = -1)
sample["label"] = target # Remember that sometimes that label is empty for certain entities, that's because the boxes we provided do not have a match.
sample["flickr_position"] = original_position
bert_example = InputExample(unique_id = -1, text_a = tokenized_sentence, text_b = None, is_correct = None, max_seq_length = self.max_seq_length)
if self.pretraining:
bert_feature = InputFeatures.convert_one_example_to_features_pretraining(
example = bert_example,
tokenizer=self.tokenizer,
probability = self.masked_lm_prob)
bert_feature.insert_field_into_dict(sample)
else:
bert_feature = InputFeatures.convert_one_example_to_features(
example = bert_example,
tokenizer=self.tokenizer)
bert_feature.insert_field_into_dict(sample)
return Instance(sample)
def __len__(self):
return len(self.entries)
@staticmethod
def collate_fn(data):
batch = Batch(data)
td = batch.as_tensor_dict()
return td
| 11,696 | 40.626335 | 158 | py |
visualbert | visualbert-master/visualbert/dataloaders/box_utils.py | import os
import random
import numpy as np
import scipy
import warnings
from torchvision.datasets.folder import default_loader
from torchvision.transforms import functional
USE_IMAGENET_PRETRAINED = True
##### Image
def load_image(img_fn):
"""Load the specified image and return a [H,W,3] Numpy array.
"""
return default_loader(img_fn)
# # Load image
# image = skimage.io.imread(img_fn)
# # If grayscale. Convert to RGB for consistency.
# if image.ndim != 3:
# image = skimage.color.gray2rgb(image)
# # If has an alpha channel, remove it for consistency
# if image.shape[-1] == 4:
# image = image[..., :3]
# return image
# Let's do 16x9
# Two common resolutions: 16x9 and 16/6 -> go to 16x8 as that's simple
# let's say width is 576. for neural motifs it was 576*576 pixels so 331776. here we have 2x*x = 331776-> 408 base
# so the best thing that's divisible by 4 is 384. that's
def resize_image(image, desired_width=768, desired_height=384, random_pad=False):
"""Resizes an image keeping the aspect ratio mostly unchanged.
Returns:
image: the resized image
window: (x1, y1, x2, y2). If max_dim is provided, padding might
be inserted in the returned image. If so, this window is the
coordinates of the image part of the full image (excluding
the padding). The x2, y2 pixels are not included.
scale: The scale factor used to resize the image
padding: Padding added to the image [left, top, right, bottom]
"""
# Default window (x1, y1, x2, y2) and default scale == 1.
w, h = image.size
width_scale = desired_width / w
height_scale = desired_height / h
scale = min(width_scale, height_scale)
# Resize image using bilinear interpolation
if scale != 1:
image = functional.resize(image, (round(h * scale), round(w * scale)))
w, h = image.size
y_pad = desired_height - h
x_pad = desired_width - w
top_pad = random.randint(0, y_pad) if random_pad else y_pad // 2
left_pad = random.randint(0, x_pad) if random_pad else x_pad // 2
padding = (left_pad, top_pad, x_pad - left_pad, y_pad - top_pad)
assert all([x >= 0 for x in padding])
image = functional.pad(image, padding)
window = [left_pad, top_pad, w + left_pad, h + top_pad]
return image, window, scale, padding
if USE_IMAGENET_PRETRAINED:
def to_tensor_and_normalize(image):
return functional.normalize(functional.to_tensor(image), mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
else:
# For COCO pretrained
def to_tensor_and_normalize(image):
tensor255 = functional.to_tensor(image) * 255
return functional.normalize(tensor255, mean=(102.9801, 115.9465, 122.7717), std=(1, 1, 1))
| 2,765 | 35.88 | 119 | py |
visualbert | visualbert-master/visualbert/dataloaders/nlvr_dataset.py | import os
from torch.utils.data import Dataset
import numpy as np
import random
import json
from collections import defaultdict
from tqdm import tqdm
import json
import os
import numpy as np
import numpy
import torch
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField, ListField, LabelField, SequenceLabelField, ArrayField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import ELMoTokenCharactersIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn.util import get_text_field_mask
from torch.utils.data import Dataset
from dataloaders.box_utils import load_image, resize_image, to_tensor_and_normalize
from dataloaders.mask_utils import make_mask
from dataloaders.bert_field import BertField
import h5py
from copy import deepcopy
from torch.utils.data.dataloader import default_collate
from allennlp.data.instance import Instance
from allennlp.data.dataset import Batch
from pytorch_pretrained_bert.fine_tuning import _truncate_seq_pair, random_word
from dataloaders.bert_field import IntArrayField
import numpy as np
from allennlp.data.fields import ListField
from visualbert.pytorch_pretrained_bert.tokenization import BertTokenizer
from .bert_data_utils import *
class NLVRDataset(Dataset):
def __init__(self, args):
super(NLVRDataset, self).__init__()
self.args = args
self.annots_path = args.annots_path
self.split = args.split
self.text_only = args.get("text_only", False)
self.no_next_sentence = args.get("no_next_sentence", False)
with open(self.annots_path, 'r') as f:
self.items = [json.loads(s) for s in f]
self.image_feat_reader = faster_RCNN_feat_reader()
self.image_screening_parameters = self.args.image_screening_parameters
if args.get("chunk_path", None) is not None:
self.chunk = torch.load(args.chunk_path)
average = 0.0
new_chunk = {}
for image_id in self.chunk.keys():
image_feat_variable, image_boxes, confidence = self.chunk[image_id]
if "npz" not in image_id:
new_chunk[image_id+".npz"] = screen_feature(image_feat_variable, image_boxes,confidence, self.image_screening_parameters)
average += new_chunk[image_id+".npz"][2]
else:
new_chunk[image_id] = screen_feature(image_feat_variable, image_boxes,confidence, self.image_screening_parameters)
average += new_chunk[image_id][2]
self.chunk = new_chunk
print("{} features on average.".format(average/len(self.chunk)))
##########
self.do_lower_case = args.do_lower_case
self.bert_model_name = args.bert_model_name
self.max_seq_length = args.max_seq_length
# 1. Initialize the BERT tokenizer
self.tokenizer = BertTokenizer.from_pretrained(self.bert_model_name, do_lower_case=self.do_lower_case)
self.pretraining = args.pretraining
# This is for pretraining
self.masked_lm_prob = 0.15
self.max_predictions_per_seq = 20
def get_image_features_by_training_index(self, index, which_one):
item = self.items[index]
image_file_name = "{}img{}.png.npz".format(item['identifier'][:-1], which_one)
return self.chunk[image_file_name]
def __len__(self):
return len(self.items)
def __getitem__(self, index):
item = self.items[index]
sample = {}
if not self.text_only:
image_feat_variable_0, image_boxes_0, image_dim_variable_0 = self.get_image_features_by_training_index(index, 0)
image_feat_variable_1, image_boxes_1, image_dim_variable_1 = self.get_image_features_by_training_index(index, 1)
visual_embeddings_type_0 = np.zeros(image_feat_variable_0.shape[0])
visual_embeddings_type_1 = np.ones(image_feat_variable_1.shape[0])
visual_embeddings_type = numpy.concatenate((visual_embeddings_type_0, visual_embeddings_type_1), axis = 0)
image_feat_variable = numpy.concatenate((image_feat_variable_0, image_feat_variable_1), axis = 0)
image_dim_variable = image_dim_variable_0 + image_dim_variable_1
image_feat_variable = torch.Tensor(image_feat_variable)
#image_boxes = ArrayField(image_boxes)
image_dim_variable = torch.LongTensor([image_dim_variable])
visual_embeddings_type = torch.LongTensor(visual_embeddings_type)
sample["image_feat_variable"] = image_feat_variable
#sample["image_boxes"] = image_boxes
sample["image_dim_variable"] = image_dim_variable
sample["visual_embeddings_type"] = visual_embeddings_type
caption_a = item["sentence"]
if item.get("label", None) is not None:
sample["label"] = torch.LongTensor([1 if item["label"] == "True" else 0])
else:
sample["label"] = torch.LongTensor([0]) # Pseudo label
if self.pretraining:
subword_tokens_a = self.tokenizer.tokenize(caption_a)
if self.no_next_sentence:
bert_example = InputExample(unique_id = index, text_a = subword_tokens_a, text_b = None, is_correct=None, max_seq_length = self.max_seq_length)
else:
bert_example = InputExample(unique_id = index, text_a = subword_tokens_a, text_b = None, is_correct=1 if item["label"] == "True" else 0, max_seq_length = self.max_seq_length)
bert_feature = InputFeatures.convert_one_example_to_features_pretraining(
example = bert_example,
tokenizer=self.tokenizer,
probability = 0.15)
bert_feature.insert_tensor_into_dict(sample)
else:
subword_tokens_a = self.tokenizer.tokenize(caption_a)
bert_example = InputExample(unique_id = index, text_a = subword_tokens_a, text_b = None, is_correct=1 if item.get("label", None) == "True" else 0, max_seq_length = self.max_seq_length)
bert_feature = InputFeatures.convert_one_example_to_features(
example = bert_example,
tokenizer=self.tokenizer)
bert_feature.insert_tensor_into_dict(sample)
return sample
@classmethod
def splits(cls, args):
data_root = args.data_root
args_copy = deepcopy(args)
args_copy.split = "train"
args_copy.annots_path = os.path.join(data_root, "{}.json".format(args_copy.split))
if args.image_screening_parameters["image_feature_cap"] is not None and args.image_screening_parameters["image_feature_cap"] > 36:
args_copy.chunk_path = os.path.join(data_root, "features_{}_150.th".format(args_copy.split))
else:
args_copy.chunk_path = os.path.join(data_root, "features_chunk_{}.th".format(args_copy.split))
if args.get("do_test", False):
trainset = None
validationset = None
else:
trainset = cls(args_copy)
trainset.is_train = True
args_copy = deepcopy(args)
args_copy.split = "dev"
args_copy.annots_path = os.path.join(data_root, "{}.json".format(args_copy.split))
if args.image_screening_parameters["image_feature_cap"] is not None and args.image_screening_parameters["image_feature_cap"] > 36:
args_copy.chunk_path = os.path.join(data_root, "features_{}_150.th".format(args_copy.split))
else:
args_copy.chunk_path = os.path.join(data_root, "features_chunk_{}.th".format(args_copy.split))
validationset = cls(args_copy)
validationset.is_train = False
args_copy = deepcopy(args)
args_copy.split = "test1"
if args.get("test_on_hidden", False):
args_copy.split = "test2"
args_copy.annots_path = os.path.join(data_root, "{}.json".format(args_copy.split))
if args.image_screening_parameters["image_feature_cap"] is not None and args.image_screening_parameters["image_feature_cap"] > 36:
args_copy.chunk_path = os.path.join(data_root, "features_{}_150.th".format(args_copy.split))
else:
args_copy.chunk_path = os.path.join(data_root, "features_chunk_{}.th".format(args_copy.split))
testset = cls(args_copy)
testset.is_train = False
if args.get("do_test", False):
trainset = testset
validationset = testset
return trainset, validationset, testset
@staticmethod
def collate_fn(data):
if isinstance(data[0], dict):
for index, i in enumerate(data):
if "image_feat_variable" in i:
i["image_feat_variable"] = ArrayTensorField(i["image_feat_variable"])
i["image_dim_variable"] = IntArrayTensorField(i["image_dim_variable"])
i["visual_embeddings_type"] = IntArrayTensorField(i["visual_embeddings_type"])
i["bert_input_ids"] = IntArrayTensorField(i["bert_input_ids"])
i["bert_input_mask"] = IntArrayTensorField(i["bert_input_mask"])
i["bert_input_type_ids"] = IntArrayTensorField(i["bert_input_type_ids"])
if "masked_lm_labels" in i:
i["masked_lm_labels"] = IntArrayTensorField(i["masked_lm_labels"], padding_value = -1)
if "is_random_next" in i:
i["is_random_next"] = IntArrayTensorField(i["is_random_next"])
i['label'] = IntArrayTensorField(i['label'])
data[index] = Instance(i)
batch = Batch(data)
td = batch.as_tensor_dict()
td["label"] = td["label"].squeeze(-1)
return td | 9,904 | 44.645161 | 196 | py |
visualbert | visualbert-master/visualbert/dataloaders/bert_field.py | from typing import Dict, List, Optional
import textwrap
from overrides import overrides
from spacy.tokens import Token as SpacyToken
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.data.fields.sequence_field import SequenceField
from allennlp.data.tokenizers.token import Token
from allennlp.data.token_indexers.token_indexer import TokenIndexer, TokenType
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn import util
import numpy
TokenList = List[TokenType] # pylint: disable=invalid-name
# This will work for anything really
class BertField(SequenceField[Dict[str, torch.Tensor]]):
"""
A class representing an array, which could have arbitrary dimensions.
A batch of these arrays are padded to the max dimension length in the batch
for each dimension.
"""
def __init__(self, tokens: List[Token], embs: numpy.ndarray, padding_value: int = 0,
token_indexers=None) -> None:
self.tokens = tokens
self.embs = embs
self.padding_value = padding_value
if len(self.tokens) != self.embs.shape[0]:
raise ValueError("The tokens you passed into the BERTField, {} "
"aren't the same size as the embeddings of shape {}".format(self.tokens, self.embs.shape))
assert len(self.tokens) == self.embs.shape[0]
@overrides
def sequence_length(self) -> int:
return len(self.tokens)
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {'num_tokens': self.sequence_length()}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> Dict[str, torch.Tensor]:
num_tokens = padding_lengths['num_tokens']
new_arr = numpy.ones((num_tokens, self.embs.shape[1]),
dtype=numpy.float32) * self.padding_value
new_arr[:self.sequence_length()] = self.embs
tensor = torch.from_numpy(new_arr)
return {'bert': tensor}
@overrides
def empty_field(self):
return BertField([], numpy.array([], dtype="float32"),padding_value=self.padding_value)
@overrides
def batch_tensors(self, tensor_list: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
# pylint: disable=no-self-use
# This is creating a dict of {token_indexer_key: batch_tensor} for each token indexer used
# to index this field.
return util.batch_tensor_dicts(tensor_list)
def __str__(self) -> str:
return f"BertField: {self.tokens} and {self.embs.shape}."
from typing import Dict
import numpy
import torch
from overrides import overrides
from allennlp.data.fields.field import Field
class IntArrayField(Field[numpy.ndarray]):
"""
Modified by Harold.
The default ArrayField by Allennlp is Float. Here we want IntArray.
"""
def __init__(self, array: numpy.ndarray, padding_value: int = 0) -> None:
self.array = array
self.padding_value = padding_value
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {"dimension_" + str(i): shape
for i, shape in enumerate(self.array.shape)}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> torch.Tensor:
max_shape = [padding_lengths["dimension_{}".format(i)]
for i in range(len(padding_lengths))]
# Convert explicitly to an ndarray just in case it's an scalar (it'd end up not being an ndarray otherwise)
return_array = numpy.asarray(numpy.ones(max_shape, "int64") * self.padding_value)
# If the tensor has a different shape from the largest tensor, pad dimensions with zeros to
# form the right shaped list of slices for insertion into the final tensor.
slicing_shape = list(self.array.shape)
if len(self.array.shape) < len(max_shape):
slicing_shape = slicing_shape + [0 for _ in range(len(max_shape) - len(self.array.shape))]
slices = tuple([slice(0, x) for x in slicing_shape])
return_array[slices] = self.array
tensor = torch.from_numpy(return_array)
return tensor
@overrides
def empty_field(self): # pylint: disable=no-self-use
# Pass the padding_value, so that any outer field, e.g., `ListField[ArrayField]` uses the
# same padding_value in the padded ArrayFields
return IntArrayField(numpy.array([], dtype="int64"), padding_value=self.padding_value)
def __str__(self) -> str:
return f"ArrayField with shape: {self.array.shape}."
class IntArrayTensorField(Field[numpy.ndarray]):
"""
Modified by Harold.
The default ArrayField by Allennlp is Float. Here we want IntArray.
# We need an torch Tensor Field!!
A class representing an array, which could have arbitrary dimensions.
A batch of these arrays are padded to the max dimension length in the batch
for each dimension.
"""
def __init__(self, array, padding_value: int = 0) -> None:
self.array = array
self.padding_value = padding_value
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {"dimension_" + str(i): shape
for i, shape in enumerate(self.array.size())}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> torch.Tensor:
max_shape = [padding_lengths["dimension_{}".format(i)]
for i in range(len(padding_lengths))]
# Convert explicitly to an ndarray just in case it's an scalar (it'd end up not being an ndarray otherwise)
return_array = torch.ones(max_shape, dtype = torch.int64) * self.padding_value
# If the tensor has a different shape from the largest tensor, pad dimensions with zeros to
# form the right shaped list of slices for insertion into the final tensor.
slicing_shape = list(self.array.size())
if len(self.array.size()) < len(max_shape):
slicing_shape = slicing_shape + [0 for _ in range(len(max_shape) - len(self.array.size()))]
slices = tuple([slice(0, x) for x in slicing_shape])
return_array[slices] = self.array
return return_array
@overrides
def empty_field(self): # pylint: disable=no-self-use
# Pass the padding_value, so that any outer field, e.g., `ListField[ArrayField]` uses the
# same padding_value in the padded ArrayFields
return IntArrayTensorField(torch.LongTensor([]), padding_value=self.padding_value)
def __str__(self) -> str:
return f"ArrayField with shape: {self.array.shape}."
class ArrayTensorField(Field[numpy.ndarray]):
def __init__(self, array, padding_value: int = 0) -> None:
self.array = array
self.padding_value = padding_value
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {"dimension_" + str(i): shape
for i, shape in enumerate(self.array.size())}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> torch.Tensor:
max_shape = [padding_lengths["dimension_{}".format(i)]
for i in range(len(padding_lengths))]
# Convert explicitly to an ndarray just in case it's an scalar (it'd end up not being an ndarray otherwise)
return_array = torch.ones(max_shape, dtype = torch.float) * self.padding_value
# If the tensor has a different shape from the largest tensor, pad dimensions with zeros to
# form the right shaped list of slices for insertion into the final tensor.
slicing_shape = list(self.array.size())
if len(self.array.size()) < len(max_shape):
slicing_shape = slicing_shape + [0 for _ in range(len(max_shape) - len(self.array.size()))]
slices = tuple([slice(0, x) for x in slicing_shape])
return_array[slices] = self.array
return return_array
@overrides
def empty_field(self): # pylint: disable=no-self-use
# Pass the padding_value, so that any outer field, e.g., `ListField[ArrayField]` uses the
# same padding_value in the padded ArrayFields
return ArrayTensorField(torch.Tensor([]), padding_value=self.padding_value)
def __str__(self) -> str:
return f"ArrayField with shape: {self.array.size()}." | 8,295 | 40.273632 | 119 | py |
visualbert | visualbert-master/visualbert/dataloaders/vcr_data_utils.py | # This is adapted data/get_bert_embedding/from vcr_loader.py from R2C. Renamed to make
import json
from collections import defaultdict
from tqdm import tqdm
from .bert_data_utils import InputExample, InputFeatures
GENDER_NEUTRAL_NAMES = ['Casey', 'Riley', 'Jessie', 'Jackie', 'Avery', 'Jaime', 'Peyton', 'Kerry', 'Jody', 'Kendall',
'Peyton', 'Skyler', 'Frankie', 'Pat', 'Quinn',
]
##################################################################################################
def _fix_tokenization(tokenized_sent, obj_to_type, det_hist=None):
if det_hist is None:
det_hist = {}
else:
det_hist = {k: v for k, v in det_hist.items()}
obj2count = defaultdict(int)
# Comment this in if you want to preserve stuff from the earlier rounds.
for v in det_hist.values():
obj2count[v.split('_')[0]] += 1
new_tokenization = []
for i, tok in enumerate(tokenized_sent):
if isinstance(tok, list):
for int_name in tok:
if int_name not in det_hist:
if obj_to_type[int_name] == 'person':
det_hist[int_name] = GENDER_NEUTRAL_NAMES[obj2count['person'] % len(GENDER_NEUTRAL_NAMES)]
else:
det_hist[int_name] = obj_to_type[int_name]
obj2count[obj_to_type[int_name]] += 1
new_tokenization.append(det_hist[int_name])
else:
new_tokenization.append(tok)
return new_tokenization, det_hist
def fix_item(item, answer_label=None, rationales=True):
if rationales:
assert answer_label is not None
ctx = item['question'] + item['answer_choices'][answer_label]
else:
ctx = item['question']
q_tok, hist = _fix_tokenization(ctx, item['objects'])
choices = item['rationale_choices'] if rationales else item['answer_choices']
a_toks = [_fix_tokenization(choice, obj_to_type=item['objects'], det_hist=hist)[0] for choice in choices]
return q_tok, a_toks
def retokenize_with_alignment(span, tokenizer):
tokens = []
alignment = []
for i, tok in enumerate(span):
for token in tokenizer.basic_tokenizer.tokenize(tok):
for sub_token in tokenizer.wordpiece_tokenizer.tokenize(token):
tokens.append(sub_token)
alignment.append(i)
return tokens, alignment
def process_ctx_ans_for_bert(ctx_raw, ans_raw, tokenizer, counter, endingonly, max_seq_length, is_correct):
"""
Processes a Q/A pair for BERT
:param ctx_raw:
:param ans_raw:
:param tokenizer:
:param counter:
:param endingonly:
:param max_seq_length:
:param is_correct:
:return:
"""
context = retokenize_with_alignment(ctx_raw, tokenizer)
answer = retokenize_with_alignment(ans_raw, tokenizer)
if endingonly:
take_away_from_ctx = len(answer[0]) - max_seq_length + 2
if take_away_from_ctx > 0:
answer = (answer[0][take_away_from_ctx:],
answer[1][take_away_from_ctx:])
return InputExample(unique_id=counter, text_a=answer[0], text_b=None,
is_correct=is_correct), answer[1], None
len_total = len(context[0]) + len(answer[0]) + 3
if len_total > max_seq_length:
take_away_from_ctx = min((len_total - max_seq_length + 1) // 2, max(len(context) - 32, 0))
take_away_from_answer = len_total - max_seq_length + take_away_from_ctx
context = (context[0][take_away_from_ctx:],
context[1][take_away_from_ctx:])
answer = (answer[0][take_away_from_answer:],
answer[1][take_away_from_answer:])
#print("FOR Q{} A {}\nLTotal was {} so take away {} from ctx and {} from answer".format(' '.join(context[0]), ' '.join(answer[0]), len_total, take_away_from_ctx,take_away_from_answer), flush=True)
#print(len(context[0]) + len(answer[0]) + 3)
assert len(context[0]) + len(answer[0]) + 3 <= max_seq_length
return InputExample(unique_id=counter,
text_a=context[0],
text_b=answer[0], is_correct=is_correct), context[1], answer[1]
def data_iter(data_fn, tokenizer, max_seq_length, endingonly):
counter = 0
with open(data_fn, 'r') as f:
for line_no, line in enumerate(tqdm(f)):
item = json.loads(line)
q_tokens, a_tokens = fix_item(item, rationales=False)
qa_tokens, r_tokens = fix_item(item, answer_label=item['answer_label'], rationales=True)
for (name, ctx, answers) in (('qa', q_tokens, a_tokens), ('qar', qa_tokens, r_tokens)):
for i in range(4):
is_correct = item['answer_label' if name == 'qa' else 'rationale_label'] == i
yield process_ctx_ans_for_bert(ctx, answers[i], tokenizer, counter=counter, endingonly=endingonly,
max_seq_length=max_seq_length, is_correct=is_correct)
counter += 1
def data_iter_item(item, tokenizer, max_seq_length, endingonly, include_qar = False, only_qar = False):
counter = 0
q_tokens, a_tokens = fix_item(item, rationales=False)
returned_list = []
if include_qar:
qa_tokens, r_tokens = fix_item(item, answer_label=item['answer_label'], rationales=True)
tuples_to_process = [('qa', q_tokens, a_tokens), ('qar', qa_tokens, r_tokens)]
elif only_qar:
qa_tokens, r_tokens = fix_item(item, answer_label=item['answer_label'], rationales=True)
#tuples_to_process = [('qar', qa_tokens, r_tokens)]
tuples_to_process = [('qar', qa_tokens, r_tokens)]
else:
tuples_to_process = [('qa', q_tokens, a_tokens)]
for (name, ctx, answers) in tuples_to_process:
for i in range(4):
try:
is_correct = item['answer_label' if name == 'qa' else 'rationale_label'] == i
except:
is_correct = None
returned_list.append(process_ctx_ans_for_bert(ctx, answers[i], tokenizer, counter=counter, endingonly=endingonly, max_seq_length=max_seq_length, is_correct=is_correct))
counter += 1
return returned_list
def data_iter_test(data_fn, tokenizer, max_seq_length, endingonly):
""" Essentially this needs to be a bit separate from data_iter because we don't know which answer is correct."""
counter = 0
with open(data_fn, 'r') as f:
for line_no, line in enumerate(tqdm(f)):
item = json.loads(line)
q_tokens, a_tokens = fix_item(item, rationales=False)
# First yield the answers
for i in range(4):
yield process_ctx_ans_for_bert(q_tokens, a_tokens[i], tokenizer, counter=counter, endingonly=endingonly,
max_seq_length=max_seq_length, is_correct=False)
counter += 1
for i in range(4):
qa_tokens, r_tokens = fix_item(item, answer_label=i, rationales=True)
for r_token in r_tokens:
yield process_ctx_ans_for_bert(qa_tokens, r_token, tokenizer, counter=counter,
endingonly=endingonly,
max_seq_length=max_seq_length, is_correct=False)
counter += 1
| 7,436 | 42.747059 | 204 | py |
visualbert | visualbert-master/visualbert/dataloaders/vqa_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
from torch.utils.data import Dataset
import numpy as np
from copy import deepcopy
import torch
from torch.utils.data.dataloader import default_collate
from allennlp.data.instance import Instance
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField, ListField, LabelField, SequenceLabelField, ArrayField, MetadataField
from dataloaders.bert_field import IntArrayField
from .bert_data_utils import *
from visualbert.pytorch_pretrained_bert.tokenization import BertTokenizer
imdb_version = 1 # Not sure what this does... Just follow it
import random
import json
def compute_answer_scores(answers, num_of_answers, unk_idx):
scores = np.zeros((num_of_answers), np.float32)
for answer in set(answers):
if answer == unk_idx:
scores[answer] = 0
else:
answer_count = answers.count(answer)
scores[answer] = min(np.float32(answer_count)*0.3, 1)
return scores
def read_in_image_feats(image_dirs, image_readers, image_file_name):
image_feats = []
for i, image_dir in enumerate(image_dirs):
image_feat_path = os.path.join(image_dir, image_file_name)
tmp_image_feat = image_readers[i].read(image_feat_path)
image_feats.append(tmp_image_feat)
return image_feats
class VQADataset(Dataset):
def __init__(self, args, chunk_train = None, chunk_val = None): # Using args is not exactly a very good coding habit...
super(VQADataset, self).__init__()
if isinstance(args.imdb_file, list) or isinstance(args.imdb_file, tuple): # For training dataset, the imdb_file is a list of strs, containing train and val:
imdb = np.load(args.imdb_file[0], allow_pickle = True)[1:]
for i in args.imdb_file[1:]:
imdb_i = np.load(i, allow_pickle = True)[1:]
imdb = np.concatenate((imdb, imdb_i))
else:
if args.imdb_file.endswith('.npy'):
imdb = np.load(args.imdb_file, allow_pickle = True)[1:]
else:
raise TypeError('unknown imdb format.')
self.items = imdb
self.chunk_train = chunk_train
self.chunk_val = chunk_val
self.args = args
self.data_root = args.data_root
self.use_visual_genome = args.use_visual_genome
self.pretraining = args.pretraining
self.include_res152 = args.get('include_res152', False)
self.no_next_sentence = args.get("no_next_sentence", False)
self.false_caption_ratio = args.get("false_caption_ratio", 0.5)
# the answer dict is always loaded, regardless of self.load_answer
self.answer_dict = VocabDict(args.vocab_answer_file)
self.do_lower_case = args["do_lower_case"]
self.bert_model_name = args.bert_model_name
self.max_seq_length = args.max_seq_length
self.pretraining = args.pretraining
self.masked_lm_prob = args.get("masked_lm_prob", 0.15)
self.tokenizer = BertTokenizer.from_pretrained(args["bert_model_name"], do_lower_case=args["do_lower_case"])
self.advanced_vqa = True if args.model.training_head_type == "vqa_advanced" else False
if self.advanced_vqa:
tokenized_list = []
for i in self.answer_dict.word_list:
tokenized_list.append(self.tokenizer.tokenize(i))
max_len = max(len(i) for i in tokenized_list)
for index, i in enumerate(tokenized_list):
if len(i) < max_len:
tokenized_list[index] = i + ["[MASK]"] * (max_len - len(i))
self.tokenized_list = tokenized_list
def __len__(self):
return len(self.items)
def get_image_features_by_training_index(self, index):
if not self.use_visual_genome:
item = self.items[index]
image_file_name = self.items[index]['image_name'] + ".jpg.npz"
try:
return self.chunk_train[image_file_name]
except:
return self.chunk_val[image_file_name]
else:
iminfo = self.items[index]
image_file_name = iminfo['image_name'] + ".npy"
if "train" in image_file_name:
folder = os.path.join(self.data_root, "data/detectron_fix_100/fc6/vqa/train2014")
elif "val" in image_file_name:
folder = os.path.join(self.data_root, "data/detectron_fix_100/fc6/vqa/val2014")
elif "test" in image_file_name:
folder = os.path.join(self.data_root, "data/detectron_fix_100/fc6/vqa/test2015")
detectron_features = np.load(os.path.join(folder, image_file_name))
image_feat_variable = detectron_features
image_dim_variable = image_feat_variable.shape[0]
visual_embeddings_type = None
return image_feat_variable, None, image_dim_variable
def __getitem__(self, index):
iminfo = self.items[index]
image_feat_variable, image_boxes, image_dim_variable = self.get_image_features_by_training_index(index)
sample = {}
image_feat_variable = ArrayField(image_feat_variable)
image_dim_variable = IntArrayField(np.array(image_dim_variable))
sample["image_feat_variable"] = image_feat_variable
sample["image_dim_variable"] = image_dim_variable
answer = None
valid_answers_idx = np.zeros((10), np.int32)
valid_answers_idx.fill(-1)
answer_scores = np.zeros(self.answer_dict.num_vocab, np.float32)
if 'answer' in iminfo:
answer = iminfo['answer']
elif 'valid_answers' in iminfo:
valid_answers = iminfo['valid_answers']
answer = np.random.choice(valid_answers)
valid_answers_idx[:len(valid_answers)] = (
[self.answer_dict.word2idx(ans) for ans in valid_answers])
ans_idx = (
[self.answer_dict.word2idx(ans) for ans in valid_answers])
answer_scores = (compute_answer_scores(ans_idx,
self.answer_dict.num_vocab,
self.answer_dict.UNK_idx))
if answer is not None:
answer_idx = self.answer_dict.word2idx(answer)
if self.advanced_vqa:
new_answer = self.tokenized_list[self.answer_dict.word2idx(answer)]
subword_tokens = self.tokenizer.tokenize(" ".join(iminfo['question_tokens']))
subword_tokens = ["[CLS]"] + subword_tokens + ["?"] # We will use the last word to do predictio
masked_lm_labels = [-1] * len(subword_tokens)
for i in new_answer:
subword_tokens.append("[MASK]")
masked_lm_labels.append(self.tokenizer.vocab[i])
subword_tokens.append("[SEP]")
masked_lm_labels.append(-1)
input_ids = []
for i in subword_tokens:
input_ids.append(self.tokenizer.vocab[i])
bert_feature = InputFeatures(
unique_id = -1,
tokens = subword_tokens,
input_ids = input_ids,
input_mask = [1] * len(input_ids),
input_type_ids = [0] * len(input_ids),
is_correct = 1,
lm_label_ids = masked_lm_labels
)
bert_feature.insert_field_into_dict(sample)
else:
if self.pretraining:
item = iminfo
if self.no_next_sentence:
answer = answer
label = None
subword_tokens_a = self.tokenizer.tokenize(" ".join(item['question_tokens'])) + ["?"]
subword_tokens_b = self.tokenizer.tokenize(" ".join(answer))
bert_example = InputExample(unique_id = index, text_a = subword_tokens_a + subword_tokens_b, text_b = None, is_correct = None, max_seq_length = self.max_seq_length)
bert_feature = InputFeatures.convert_one_example_to_features_pretraining(
example = bert_example,
tokenizer=self.tokenizer,
probability = 0.15)
else:
assert(0) # Should not use this part
'''if random.random() > self.false_caption_ratio:
answer = answer
label = 1
else:
while(True):
wrong_answer = np.random.choice(self.answer_dict.word_list)
if wrong_answer not in valid_answers:
wrong_answer = answer
label = 0
break
subword_tokens_a = self.tokenizer.tokenize(" ".join(item['question_tokens'])) + ["?"]
subword_tokens_b = self.tokenizer.tokenize(" ".join(answer))
bert_example = InputExample(unique_id = index, text_a = subword_tokens_a, text_b = subword_tokens_b, is_correct = label, max_seq_length = self.max_seq_length)
bert_feature = InputFeatures.convert_one_example_to_features_pretraining(
example = bert_example,
tokenizer=self.tokenizer,
probability = 0.15)'''
bert_feature.insert_field_into_dict(sample)
else:
item = iminfo
subword_tokens = self.tokenizer.tokenize(" ".join(item['question_tokens']))
if self.no_next_sentence:
subword_tokens = subword_tokens + ["?", "[MASK]"] # We will use the last word to do predictio
subwords_b = None
else:
subword_tokens = subword_tokens + ["?"]
subwords_b = ["[MASK]"]
bert_example = InputExample(unique_id = -1, text_a = subword_tokens, text_b = subwords_b,max_seq_length = self.max_seq_length)
bert_feature = InputFeatures.convert_one_example_to_features(bert_example,tokenizer =self.tokenizer)
bert_feature.insert_field_into_dict(sample)
if answer is not None:
sample['label'] = ArrayField(np.array(answer_scores))
return Instance(sample)
@staticmethod
def collate_fn(data):
if isinstance(data[0], Instance):
batch = Batch(data)
td = batch.as_tensor_dict()
return td
@classmethod
def splits(cls, args):
""" Helper method to generate splits of the dataset"""
data_root = args.data_root
if not args.use_visual_genome:
assert(0) # This part should not be used
'''chunk_train = torch.load(os.path.join(data_root, "coco/features_chunk_train.th"))
chunk_val = torch.load(os.path.join(data_root, "coco/features_chunk_val.th"))
print("Processing imges...")
average = 0.0
for image_id in chunk_train.keys():
image_feat_variable, image_boxes, confidence = chunk_train[image_id]
chunk_train[image_id] = screen_feature(image_feat_variable, image_boxes,confidence, args.image_screening_parameters)
average += chunk_train[image_id][2]
print("{} features on average.".format(average/len(chunk_train)))
for image_id in chunk_val.keys():
image_feat_variable, image_boxes, confidence = chunk_val[image_id]
chunk_val[image_id] = screen_feature(image_feat_variable, image_boxes,confidence, args.image_screening_parameters)
average += chunk_val[image_id][2]'''
else:
chunk_train = None
chunk_val = None
args_copy = deepcopy(args)
args_copy.vocab_answer_file = os.path.join(data_root, "data/answers_vqa.txt")
args_copy.imdb_file = [os.path.join(data_root, "data/imdb/imdb_train2014.npy"), os.path.join(data_root, "data/imdb/imdb_val2014.npy")] #imdb_val2train2014, imdb_val2014
train = cls(args_copy, chunk_train = chunk_train, chunk_val = chunk_val)
train.is_train = True
args_copy_1 = deepcopy(args_copy)
args_copy_1.imdb_file = os.path.join(data_root, "data/imdb/imdb_minival2014.npy")
val = cls(args_copy_1, chunk_train = chunk_train, chunk_val = chunk_val)
val.is_train = False
args_copy_2 = deepcopy(args_copy)
args_copy_2.imdb_file = os.path.join(data_root, "data/imdb/imdb_test2015.npy")
test = cls(args_copy_2)
test.is_train = False
return train, val, test
def generate_test_file(self, logits, out_file):
assert(len(self.items) == logits.size(0))
out_list = []
for index, i in enumerate(self.items):
question_id = i["question_id"]
out_list.append(
{
"question_id": question_id,
"answer": self.answer_dict.idx2word(logits[index].argmax(0))
}
)
with open(out_file, "w") as f:
json.dump(out_list, f)
import re
SENTENCE_SPLIT_REGEX = re.compile(r'(\W+)')
def tokenize(sentence):
sentence = sentence.lower()
sentence = (
sentence.replace(',', '').replace('?', '').replace('\'s', ' \'s'))
tokens = SENTENCE_SPLIT_REGEX.split(sentence)
tokens = [t.strip() for t in tokens if len(t.strip()) > 0]
return tokens
def load_str_list(fname):
with open(fname) as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
return lines
class VocabDict:
def __init__(self, vocab_file):
self.word_list = load_str_list(vocab_file)
self.word2idx_dict = {w: n_w for n_w, w in enumerate(self.word_list)}
self.num_vocab = len(self.word_list)
self.UNK_idx = (self.word2idx_dict['<unk>']
if '<unk>' in self.word2idx_dict else None)
def idx2word(self, n_w):
return self.word_list[n_w]
def word2idx(self, w):
if w in self.word2idx_dict:
return self.word2idx_dict[w]
elif self.UNK_idx is not None:
return self.UNK_idx
else:
raise ValueError('word %s not in dictionary \
(while dictionary does not contain <unk>)' % w)
def tokenize_and_index(self, sentence):
inds = [self.word2idx(w) for w in tokenize(sentence)]
return inds
| 14,744 | 41.615607 | 184 | py |
visualbert | visualbert-master/visualbert/dataloaders/bert_data_utils.py | # Functions to convert raw strings into BERT input feature (InputFeatures' class method)
# Some functions for reading image features
# To take care of padding, we will use AllenNLP's Field;
# Caveat: we pad sequences with zero with one exception: BERT's pre-training language model objective mask's padding should be -1.
import os
from torch.utils.data import Dataset
import numpy as np
import random
import json
from collections import defaultdict
from tqdm import tqdm
import os
import numpy
import torch
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField, ListField, LabelField, SequenceLabelField, ArrayField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import ELMoTokenCharactersIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn.util import get_text_field_mask
from torch.utils.data import Dataset
import h5py
from copy import deepcopy
import copy
from torch.utils.data.dataloader import default_collate
from allennlp.data.instance import Instance
from allennlp.data.dataset import Batch
from allennlp.data.fields import ListField
from pytorch_pretrained_bert.fine_tuning import _truncate_seq_pair, random_word
from .box_utils import load_image, resize_image, to_tensor_and_normalize
from .mask_utils import make_mask
from .bert_field import BertField, IntArrayField, IntArrayTensorField, ArrayTensorField
class InputExample(object):
def __init__(self, unique_id=None, text_a=None, text_b=None, is_correct=True, lm_labels=None, max_seq_length = None):
self.unique_id = unique_id
self.text_a = text_a
self.text_b = text_b
self.is_correct = is_correct # This sort of serves as the correct label as well as the is_next label
# This should always be None. Right?
assert(lm_labels is None)
self.lm_labels = lm_labels # masked words for language model
if max_seq_length is not None:
self.perform_truncate(max_seq_length)
def perform_truncate(self, max_seq_length):
if self.text_b is None:
len_total = len(self.text_a) + 2
self.text_a = self.text_a[:max_seq_length - 2]
else:
len_total = len(self.text_a) + len(self.text_b) + 3
if len_total > max_seq_length:
take_away_from_ctx = min((len_total - max_seq_length + 1) // 2, max(len(self.text_a) - 32, 0))
take_away_from_answer = len_total - max_seq_length + take_away_from_ctx
# Follows VCR, perform truncate from the front...
self.text_a = self.text_a[take_away_from_ctx:]
self.text_b = self.text_b[take_away_from_answer:]
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids, is_correct, lm_label_ids=None):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
self.is_correct = is_correct
self.lm_label_ids = lm_label_ids
# For compatiblity with Huggingface Models:
self.segment_ids = input_type_ids
self.is_next = is_correct
# Convert one sentence_a + sentence_b to pre-training example
@classmethod
def convert_one_example_to_features(cls, example, tokenizer):
# note, this is different because weve already tokenized
tokens_a = example.text_a
# tokens_b = example.text_b
tokens_b = None
if example.text_b:
tokens_b = example.text_b
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append("[SEP]")
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
return cls(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids,
is_correct=example.is_correct)
@classmethod
def convert_examples_to_features(cls, examples, tokenizer):
# This one does not pad
max_len = 0
features = []
for (ex_index, example) in enumerate(examples):
feature = cls.convert_one_example_to_features(example, tokenizer)
if max_len < len(feature.input_ids):
max_len = len(feature.input_ids)
features.append(feature)
for i in features:
# Zero-pad up to the sequence length.
while len(i.input_ids) < max_len:
i.input_ids.append(0)
i.input_mask.append(0)
i.input_type_ids.append(0)
assert len(i.input_ids) == max_len
assert len(i.input_mask) == max_len
assert len(i.input_type_ids) == max_len
return features
@classmethod
def convert_one_example_to_features_pretraining(cls, example, tokenizer, probability):
############ Modifed by Harold
# This function does not care about padding, and we leave it to AllenNLP's field to take care of that.
# But we need to be extra carefule about the padding index.
# Not everything is padded with zero.
"""
Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with
IDs, LM labels, input_mask, CLS and SEP tokens etc.
:param example: InputExample, containing sentence input as strings and is_next label
:param max_seq_length: int, maximum length of sequence.
:param tokenizer: Tokenizer
:return: InputFeatures, containing all inputs and labels of one sample as IDs (as used for model training)
"""
tokens_a = example.text_a
tokens_b = None
if example.text_b:
tokens_b = example.text_b
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
#_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
tokens_a, t1_label = random_word(tokens_a, tokenizer, probability)
if tokens_b:
tokens_b, t2_label = random_word(tokens_b, tokenizer, probability)
# concatenate lm labels and account for CLS, SEP, SEP
if tokens_b:
lm_label_ids = ([-1] + t1_label + [-1] + t2_label + [-1])
else:
lm_label_ids = ([-1] + t1_label + [-1])
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
assert len(tokens_b) > 0
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
return cls(unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=segment_ids,
lm_label_ids=lm_label_ids,
is_correct=example.is_correct)
def convert_to_allennlp_feild(self):
self.input_ids_field = IntArrayField(np.array(self.input_ids, dtype="int"), padding_value = 0)
self.input_mask_field = IntArrayField(np.array(self.input_mask, dtype="int"))
self.input_type_ids_field = IntArrayField(np.array(self.segment_ids, dtype="int"))
# Padding Value = -1
if self.lm_label_ids is not None:
self.masked_lm_labels_field = IntArrayField(np.array(self.lm_label_ids, dtype="int"), padding_value = -1)
else:
self.masked_lm_labels_field = None
if self.is_next is not None:
self.is_random_next_field = IntArrayField(np.array(self.is_next, dtype="int"))
else:
self.is_random_next_field = None
def convert_to_pytorch_tensor(self):
# For multi-process efficiency, we will use pytorch tensor instead of ...
self.input_ids_field = torch.tensor(self.input_ids, dtype=torch.int64)
self.input_mask_field = torch.tensor(self.input_mask, dtype=torch.int64)
self.input_type_ids_field = torch.tensor(self.segment_ids, dtype=torch.int64)
# Padding Value = -1
if self.lm_label_ids is not None:
self.masked_lm_labels_field = torch.tensor(self.lm_label_ids, dtype=torch.int64)
else:
self.masked_lm_labels_field = None
if self.is_next is not None:
self.is_random_next_field = torch.tensor([int(self.is_next)], dtype=torch.int64)
else:
self.is_random_next_field = None
def insert_field_into_dict(self, instance_dict):
self.convert_to_allennlp_feild()
instance_dict["bert_input_ids"] = self.input_ids_field
instance_dict["bert_input_mask"] = self.input_mask_field
instance_dict["bert_input_type_ids"] = self.input_type_ids_field
if self.masked_lm_labels_field is not None:
instance_dict["masked_lm_labels"] = self.masked_lm_labels_field
if self.is_random_next_field is not None:
instance_dict["is_random_next"] = self.is_random_next_field
def insert_tensor_into_dict(self, instance_dict):
self.convert_to_pytorch_tensor()
instance_dict["bert_input_ids"] = self.input_ids_field
instance_dict["bert_input_mask"] = self.input_mask_field
instance_dict["bert_input_type_ids"] = self.input_type_ids_field
if self.masked_lm_labels_field is not None:
instance_dict["masked_lm_labels"] = self.masked_lm_labels_field
if self.is_random_next_field is not None:
instance_dict["is_random_next"] = self.is_random_next_field
@staticmethod
def convert_list_features_to_allennlp_list_feild(list_features, instance_dict):
input_ids_list = []
input_mask_list = []
input_type_ids_list = []
masked_lm_labels_list = []
is_random_next_list = []
# Every element in the list_features is a feature instance
for i in list_features:
i.convert_to_allennlp_feild()
input_ids_list.append(i.input_ids_field)
input_mask_list.append(i.input_mask_field)
input_type_ids_list.append(i.input_type_ids_field)
masked_lm_labels_list.append(i.masked_lm_labels_field)
is_random_next_list.append(i.is_random_next_field)
input_ids_list = ListField(input_ids_list)
input_mask_list = ListField(input_mask_list)
input_type_ids_list = ListField(input_type_ids_list)
if masked_lm_labels_list[0]:
masked_lm_labels_list = ListField(masked_lm_labels_list)
is_random_next_list = ListField(is_random_next_list)
else:
masked_lm_labels_list = None
is_random_next_list = None
instance_dict["bert_input_ids"] = input_ids_list
instance_dict["bert_input_mask"] = input_mask_list
instance_dict["bert_input_type_ids"] = input_type_ids_list
if masked_lm_labels_list:
instance_dict["masked_lm_labels"] = masked_lm_labels_list
instance_dict["is_random_next"] = is_random_next_list
return
class faster_RCNN_feat_reader:
def read(self, image_feat_path):
return np.load(image_feat_path)
class CHW_feat_reader:
def read(self, image_feat_path):
feat = np.load(image_feat_path)
assert (feat.shape[0] == 1), "batch is not 1"
feat = feat.squeeze(0)
return feat
class dim_3_reader:
def read(self, image_feat_path):
tmp = np.load(image_feat_path)
_, _, c_dim = tmp.shape
image_feat = np.reshape(tmp, (-1, c_dim))
return image_feat
class HWC_feat_reader:
def read(self, image_feat_path):
tmp = np.load(image_feat_path)
assert (tmp.shape[0] == 1), "batch is not 1"
_, _, _, c_dim = tmp.shape
image_feat = np.reshape(tmp, (-1, c_dim))
return image_feat
class padded_faster_RCNN_feat_reader:
def __init__(self, max_loc):
self.max_loc = max_loc
def read(self, image_feat_path):
image_feat = np.load(image_feat_path)
image_loc, image_dim = image_feat.shape
tmp_image_feat = np.zeros((self.max_loc, image_dim), dtype=np.float32)
tmp_image_feat[0:image_loc, ] = image_feat
image_feat = tmp_image_feat
return (image_feat, image_loc)
class padded_faster_RCNN_with_bbox_feat_reader:
def __init__(self, max_loc):
self.max_loc = max_loc
def read(self, image_feat_path):
image_feat_bbox = np.load(image_feat_path)
image_boxes = image_feat_bbox.item().get('image_bboxes')
tmp_image_feat = image_feat_bbox.item().get('image_feat')
image_loc, image_dim = tmp_image_feat.shape
tmp_image_feat_2 = np.zeros((self.max_loc, image_dim),
dtype=np.float32)
tmp_image_feat_2[0:image_loc, ] = tmp_image_feat
tmp_image_box = np.zeros((self.max_loc, 4), dtype=np.int32)
tmp_image_box[0:image_loc] = image_boxes
return (tmp_image_feat_2, image_loc, tmp_image_box)
def read(self, image_feat_path):
image_feat = np.load(image_feat_path)
image_loc, image_dim = image_feat.shape
tmp_image_feat = np.zeros((self.max_loc, image_dim), dtype=np.float32)
tmp_image_feat[0:image_loc, ] = image_feat
image_feat = tmp_image_feat
return (image_feat, image_loc)
def parse_npz_img_feat(feat):
return feat['x']
def get_image_feat_reader(ndim, channel_first, image_feat, max_loc=None):
if ndim == 2 or ndim == 0:
if max_loc is None:
return faster_RCNN_feat_reader()
else:
if isinstance(image_feat.item(0), dict):
return padded_faster_RCNN_with_bbox_feat_reader(max_loc)
else:
return padded_faster_RCNN_feat_reader(max_loc)
elif ndim == 3 and not channel_first:
return dim_3_reader()
elif ndim == 4 and channel_first:
return CHW_feat_reader()
elif ndim == 4 and not channel_first:
return HWC_feat_reader()
else:
raise TypeError("unkown image feature format")
def compute_answer_scores(answers, num_of_answers, unk_idx):
scores = np.zeros((num_of_answers), np.float32)
for answer in set(answers):
if answer == unk_idx:
scores[answer] = 0
else:
answer_count = answers.count(answer)
scores[answer] = min(np.float32(answer_count)*0.3, 1)
return scores
def read_in_image_feats(image_dirs, image_readers, image_file_name):
image_feats = []
for i, image_dir in enumerate(image_dirs):
image_feat_path = os.path.join(image_dir, image_file_name)
tmp_image_feat = image_readers[i].read(image_feat_path)
image_feats.append(tmp_image_feat)
return image_feats
def get_one_image_feature(path, reader, image_feature_cap):
image_feat = reader.read(path)
image_loc = image_feat[1]
if len(image_feat) == 3:
image_boxes = image_feat[2]
else:
image_boxes = None
returned_feat = image_feat[0]
if image_feature_cap != -1:
if image_feature_cap < image_loc:
returned_feat = returned_feat[:image_feature_cap, :]
if image_boxes is not None:
image_boxes = image_boxes[:image_feature_cap]
image_loc = image_feature_cap
return returned_feat, image_boxes, image_loc
def get_one_image_feature_npz_screening_parameters(path, reader, image_screening_parameters, return_confidence = False):
result = reader.read(path)
image_feat = result["box_features"]
max_conf = result["max_conf"]
cls_boxes = result["cls_boxes"]
confidence_cap = image_screening_parameters.get("confidence_cap", None)
image_feature_cap = image_screening_parameters.get("image_feature_cap", None)
if confidence_cap:
keep_boxes = np.where(max_conf >= confidence_cap)[0]
if keep_boxes.shape[0] == 0:
image_feat = image_feat[:1] # Just keep one feature...
cls_boxes = cls_boxes[:1]
max_conf = max_conf[:1]
else:
image_feat = image_feat[keep_boxes]
cls_boxes = cls_boxes[keep_boxes]
max_conf = max_conf[keep_boxes]
if image_feature_cap:
image_loc = image_feat.shape[0]
if image_feature_cap < image_loc:
image_feat = image_feat[:image_feature_cap, :]
cls_boxes = cls_boxes[:image_feature_cap]
max_conf = max_conf[:image_feature_cap]
image_loc = image_feat.shape[0]
if return_confidence:
return image_feat, cls_boxes, max_conf
else:
return image_feat, cls_boxes, image_loc
def screen_feature(image_feat, cls_boxes, max_conf, image_screening_parameters, mandatory_keep = None):
confidence_cap = image_screening_parameters.get("confidence_cap", None)
image_feature_cap = image_screening_parameters.get("image_feature_cap", None)
min_cap = image_screening_parameters.get("min_cap", 1)
max_cap = image_screening_parameters.get("max_cap", 300)
keep_boxes = np.arange(image_feat.shape[0])
if confidence_cap:
keep_boxes = np.where(max_conf >= confidence_cap)[0]
if keep_boxes.shape[0] < min_cap:
keep_boxes = np.arange(min_cap)
#image_feat = image_feat[:min_cap]
#cls_boxes = cls_boxes[:min_cap]
if image_feature_cap:
if image_feature_cap < keep_boxes.shape[0]:
keep_boxes = np.arange(image_feature_cap)
if max_cap:
if max_cap < keep_boxes.shape[0]:
keep_boxes = np.arange(max_cap)
if mandatory_keep is not None:
keep_boxes = np.union1d(keep_boxes, mandatory_keep)
image_feat = image_feat[keep_boxes]
cls_boxes = cls_boxes[keep_boxes]
image_loc = image_feat.shape[0]
return image_feat, cls_boxes, image_loc
| 21,319 | 39.378788 | 130 | py |
visualbert | visualbert-master/visualbert/dataloaders/__init__.py | 0 | 0 | 0 | py | |
visualbert | visualbert-master/visualbert/dataloaders/coco_dataset.py | import os
import random
import json
from collections import defaultdict
from tqdm import tqdm
import numpy as np
import numpy
import torch
from torch.utils.data import Dataset
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField, ListField, LabelField, SequenceLabelField, ArrayField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import ELMoTokenCharactersIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn.util import get_text_field_mask
from torch.utils.data import Dataset
from dataloaders.box_utils import load_image, resize_image, to_tensor_and_normalize
from dataloaders.mask_utils import make_mask
from dataloaders.bert_field import BertField
import h5py
from copy import deepcopy
from torch.utils.data.dataloader import default_collate
from allennlp.data.instance import Instance
from allennlp.data.dataset import Batch
from pytorch_pretrained_bert.fine_tuning import _truncate_seq_pair, random_word
from dataloaders.bert_field import IntArrayField
from allennlp.data.fields import ListField
from .bert_data_utils import *
from visualbert.pytorch_pretrained_bert.tokenization import BertTokenizer
from pycocotools.coco import COCO
class COCODataset(Dataset):
def __init__(self, args, visual_genome_chunk = False):
super(COCODataset, self).__init__()
self.args = args
self.coco = COCO(args.annots_path)
self.annots_path = args.annots_path
self.split_name = args.split_name
self.data_root = args.data_root
self.visual_genome_chunk = visual_genome_chunk
self.masks = args.masks
self.image_feature_type = args.image_feature_type
self.text_only = args.get("text_only", False)
self.add_spatial_features = args.get("add_spatial_features", False)
self.expanded = False
########## Loading Annotations
self.items = self.coco.loadAnns(self.coco.getAnnIds())
print("{} of captions in total.".format(len(self.items)))
self.image_feat_reader = faster_RCNN_feat_reader()
if args.get("chunk_path", None) is not None and self.image_feature_type == "nlvr":
print("Loading images...")
self.chunk = torch.load(args.chunk_path)
average = 0.0
counter = 0
new_chunk = {}
for image_id in self.chunk.keys():
image_feat_variable, image_boxes, confidence = self.chunk[image_id]
if ".npz" in image_id:
new_chunk[image_id] = screen_feature(image_feat_variable, image_boxes,confidence, args.image_screening_parameters)
average += new_chunk[image_id][2]
else:
new_chunk[image_id+".npz"] = screen_feature(image_feat_variable, image_boxes,confidence, args.image_screening_parameters)
average += new_chunk[image_id+".npz"][2]
print("{} features on average.".format(average/len(self.chunk)))
self.chunk = new_chunk
self.do_lower_case = args.do_lower_case
self.bert_model_name = args.bert_model_name
self.max_seq_length = args.max_seq_length
self.tokenizer = BertTokenizer.from_pretrained(self.bert_model_name, do_lower_case=self.do_lower_case)
self.pretraining = args.pretraining
self.masked_lm_prob = args.get("masked_lm_prob", 0.15)
with open(os.path.join('./cocoontology.json'), 'r') as f:
coco = json.load(f)
self.coco_objects = ['__background__'] + [x['name'] for k, x in sorted(coco.items(), key=lambda x: int(x[0]))]
self.coco_obj_to_ind = {o: i for i, o in enumerate(self.coco_objects)}
if self.image_feature_type == "r2c":
items = []
counter = 0
for i in self.items:
if self.expanded and index >= self.train_size:
image_file_name = "COCO_val2014_{:0>12d}.jpg".format(i['image_id'])
else:
image_file_name = "COCO_{}2014_{:0>12d}.jpg".format(self.split_name, i['image_id'])
if isinstance(self.masks[image_file_name], dict):
items.append(i)
else:
# For some images, the detector seems to have Null output. Thus we just skip them. This will not affect much.
counter += 1
print("Discarded {} instances in {}.".format(counter, self.split_name))
self.items = items
def get_image_features_by_training_index(self, index):
item = self.items[index]
if self.args.image_feature_type == "flickr":
v_item = self.visual_genome_chunk[item['image_id']]
image_feat_variable = v_item["features"]
image_boxes = None
image_dim_variable = image_feat_variable.shape[0]
if self.add_spatial_features:
image_w = float(v_item['image_w'])
image_h = float(v_item['image_h'])
bboxes = v_item["boxes"]
box_width = bboxes[:, 2] - bboxes[:, 0]
box_height = bboxes[:, 3] - bboxes[:, 1]
scaled_width = box_width / image_w
scaled_height = box_height / image_h
scaled_x = bboxes[:, 0] / image_w
scaled_y = bboxes[:, 1] / image_h
box_width = box_width[..., np.newaxis]
box_height = box_height[..., np.newaxis]
scaled_width = scaled_width[..., np.newaxis]
scaled_height = scaled_height[..., np.newaxis]
scaled_x = scaled_x[..., np.newaxis]
scaled_y = scaled_y[..., np.newaxis]
spatial_features = np.concatenate(
(scaled_x,
scaled_y,
scaled_x + scaled_width,
scaled_y + scaled_height,
scaled_width,
scaled_height),
axis=1)
image_feat_variable = np.concatenate((image_feat_variable, spatial_features), axis=1)
return image_feat_variable, image_boxes, image_dim_variable
if self.args.image_feature_type == "vqa_fix_100":
if self.expanded and index >= self.train_size:
image_file_name = "COCO_val2014_{:0>12d}.npy".format(item['image_id'])
else:
image_file_name = "COCO_{}2014_{:0>12d}.npy".format(self.split_name, item['image_id'])
if "train" in image_file_name:
folder = os.path.join(self.data_root, "data/detectron_fix_100/fc6/vqa/train2014")
elif "val" in image_file_name:
folder = os.path.join(self.data_root, "data/detectron_fix_100/fc6/vqa/val2014")
image_feat_variable = np.load(os.path.join(folder, image_file_name))
image_dim_variable = image_feat_variable.shape[0]
return image_feat_variable, None, image_dim_variable
if self.expanded and index >= self.train_size:
image_file_name = "COCO_val2014_{:0>12d}.jpg.npz".format(item['image_id'])
return self.chunk_val[image_file_name]
else:
image_file_name = "COCO_{}2014_{:0>12d}.jpg.npz".format(self.split_name, item['image_id'])
if self.args.get("chunk_path", None) is not None:
return self.chunk[image_file_name]
def __len__(self):
return len(self.items)
def __getitem__(self, index):
if self.image_feature_type == "r2c":
return self.__getitem_detector__(index)
item = self.items[index]
sample = {}
if not self.text_only:
image_feat_variable, image_boxes, image_dim_variable = self.get_image_features_by_training_index(index)
image_feat_variable = ArrayField(image_feat_variable)
image_dim_variable = IntArrayField(np.array(image_dim_variable))
sample["image_feat_variable"] = image_feat_variable
sample["image_dim_variable"] = image_dim_variable
sample["label"] = image_dim_variable
else:
sample["label"] = IntArrayField(np.array([0]))
caption_a = item["caption"]
imageID = item["image_id"]
if self.expanded and index >= self.train_size:
coco = self.coco_val
else:
coco = self.coco
rest_anns = coco.loadAnns([i for i in coco.getAnnIds(imgIds=imageID) if i != item['id']])
if self.args.get("two_sentence", True):
if random.random() > 0.5:
item_b = self.items[random.randint(0, len(self.items) - 1)]
while item_b["image_id"] == imageID:
item_b = self.items[random.randint(0, len(self.items) - 1)]
flag = False
else:
item_b = rest_anns[random.randint(0, len(rest_anns) - 1)]
flag = True
caption_b = item_b["caption"]
subword_tokens_a = self.tokenizer.tokenize(caption_a)
subword_tokens_b = self.tokenizer.tokenize(caption_b)
bert_example = InputExample(unique_id = index, text_a = subword_tokens_a, text_b = subword_tokens_b, is_correct=flag, max_seq_length = self.max_seq_length)
elif not self.args.get("no_next_sentence", False):
if random.random() < self.args.false_caption_ratio:
item_b = self.items[random.randint(0, len(self.items) - 1)]
while item_b["image_id"] == imageID:
item_b = self.items[random.randint(0, len(self.items) - 1)]
flag = False
else:
item_b = item
flag = True
caption_b = item_b["caption"]
subword_tokens_b = self.tokenizer.tokenize(caption_b)
bert_example = InputExample(unique_id = index, text_a = subword_tokens_b, text_b = None, is_correct=flag, max_seq_length = self.max_seq_length)
else:
caption_b = item["caption"]
subword_tokens_b = self.tokenizer.tokenize(caption_b)
bert_example = InputExample(unique_id = index, text_a = subword_tokens_b, text_b = None, is_correct=None, max_seq_length = self.max_seq_length)
bert_feature = InputFeatures.convert_one_example_to_features_pretraining(
example = bert_example,
tokenizer=self.tokenizer,
probability = self.masked_lm_prob)
bert_feature.insert_field_into_dict(sample)
return Instance(sample)
def __getitem_detector__(self, index):
item = self.items[index]
sample = {}
if self.expanded and index >= self.train_size:
image_file_name = "COCO_val2014_{:0>12d}.jpg".format(item['image_id'])
else:
image_file_name = "COCO_{}2014_{:0>12d}.jpg".format(self.split_name, item['image_id'])
image_info = self.masks[image_file_name]
if "train" in image_file_name:
image_file_path = os.path.join(self.data_root, "train2014", image_file_name)
elif "val" in image_file_name:
image_file_path = os.path.join(self.data_root, "val2014", image_file_name)
###################################################################
# Most of things adapted from VCR
# Load image now and rescale it. Might have to subtract the mean and whatnot here too.
image = load_image(image_file_path)
image, window, img_scale, padding = resize_image(image, random_pad=self.is_train)
image = to_tensor_and_normalize(image)
c, h, w = image.shape
###################################################################
metadata = self.masks[image_file_name] # Get the metadata
# Load boxes.
# We will use all detections
dets2use = np.arange(len(metadata['boxes']))
# [nobj, 14, 14]
segms = np.stack([make_mask(mask_size=14, box=metadata['boxes'][i], polygons_list=metadata['segms'][i]) for i in dets2use])
# Chop off the final dimension, that's the confidence
boxes = np.array(metadata['boxes'])[dets2use, :-1]
# Possibly rescale them if necessary
boxes *= img_scale
boxes[:, :2] += np.array(padding[:2])[None]
boxes[:, 2:] += np.array(padding[:2])[None]
try:
metadata['names'] = [i.split(" ")[1][1:-1] for i in metadata["names"]]
except:
pass
obj_labels = [self.coco_obj_to_ind[metadata['names'][i]] for i in dets2use.tolist()]
boxes = np.row_stack((window, boxes))
segms = np.concatenate((np.ones((1, 14, 14), dtype=np.float32), segms), 0)
obj_labels = [self.coco_obj_to_ind['__background__']] + obj_labels
sample['segms'] = ArrayField(segms, padding_value=0)
sample['objects'] = ListField([LabelField(x, skip_indexing=True) for x in obj_labels])
if not np.all((boxes[:, 0] >= 0.) & (boxes[:, 0] < boxes[:, 2])):
import ipdb
ipdb.set_trace()
assert np.all((boxes[:, 1] >= 0.) & (boxes[:, 1] < boxes[:, 3]))
assert np.all((boxes[:, 2] <= w))
assert np.all((boxes[:, 3] <= h))
sample['boxes'] = ArrayField(boxes, padding_value=-1)
caption_a = item["caption"]
imageID = item["image_id"]
sample["label"] = sample['objects'] # This is an useless field. Just so that they know the batch size.
if self.expanded and index >= self.train_size:
coco = self.coco_val
else:
coco = self.coco
rest_anns = coco.loadAnns([i for i in coco.getAnnIds(imgIds=imageID) if i != item['id']])
if self.args.get("two_sentence", True):
if random.random() > 0.5:
item_b = self.items[random.randint(0, len(self.items) - 1)]
while item_b["image_id"] == imageID:
item_b = self.items[random.randint(0, len(self.items) - 1)]
flag = False
else:
item_b = rest_anns[random.randint(0, len(rest_anns) - 1)]
flag = True # is next sentence
caption_b = item_b["caption"]
subword_tokens_a = self.tokenizer.tokenize(caption_a)
subword_tokens_b = self.tokenizer.tokenize(caption_b)
bert_example = InputExample(unique_id = index, text_a = subword_tokens_a, text_b = subword_tokens_b, is_correct=flag, max_seq_length = self.max_seq_length)
elif not self.args.get("no_next_sentence", False):
if random.random() < self.args.false_caption_ratio:
item_b = self.items[random.randint(0, len(self.items) - 1)]
while item_b["image_id"] == imageID:
item_b = self.items[random.randint(0, len(self.items) - 1)]
flag = False
else:
item_b = item
flag = True # is next sentence
caption_b = item_b["caption"]
subword_tokens_b = self.tokenizer.tokenize(caption_b)
bert_example = InputExample(unique_id = index, text_a = subword_tokens_b, text_b = None, is_correct=flag, max_seq_length = self.max_seq_length)
else:
subword_tokens_a = self.tokenizer.tokenize(caption_a)
bert_example = InputExample(unique_id = index, text_a = subword_tokens_a, text_b = None, is_correct=None, max_seq_length = self.max_seq_length)
bert_feature = InputFeatures.convert_one_example_to_features_pretraining(
example = bert_example,
tokenizer=self.tokenizer,
probability = self.masked_lm_prob)
bert_feature.insert_field_into_dict(sample)
return image, Instance(sample)
@classmethod
def splits(cls, args):
data_root = args.data_root
if args.image_feature_type == "r2c":
# For r2c, the masks are pre-computed from a larger detector. Thus, when pre-training on COCO, we follow the same procedure.
masks = torch.load(os.path.join(data_root, "mask_train.th"))
mask_val = torch.load(os.path.join(data_root, "mask_val.th"))
for i in mask_val:
masks[i] = mask_val[i]
else:
masks = None
if args.image_feature_type == "flickr":
import base64
import csv
import sys
import zlib
import time
import mmap
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ['image_id', 'image_w','image_h','num_boxes', 'boxes', 'features']
infiles = [
os.path.join(data_root, "trainval/karpathy_test_resnet101_faster_rcnn_genome.tsv"),
os.path.join(data_root, "trainval/karpathy_train_resnet101_faster_rcnn_genome.tsv.0"),
os.path.join(data_root, "trainval/karpathy_train_resnet101_faster_rcnn_genome.tsv.1"),
os.path.join(data_root, "trainval/karpathy_val_resnet101_faster_rcnn_genome.tsv")
]
chunk = {}
chunk_file = os.path.join(data_root, "trainval/resnet101_genome.th")
if not os.path.exists(chunk_file):
print("Loading COCO files for Flickr30K for the first time...")
for infile in infiles:
with open(infile, "r+") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames = FIELDNAMES)
for item in tqdm(reader):
item['image_id'] = int(item['image_id'])
item['image_h'] = float(item['image_h'])
item['image_w'] = float(item['image_w'])
item['num_boxes'] = int(item['num_boxes'])
for field in ['boxes', 'features']:
# Hope the python2/3 b64decode does not mess things up.
item[field] = np.frombuffer(base64.b64decode(item[field]),
dtype=np.float32).reshape((item['num_boxes'],-1))
item["features"] = torch.from_numpy(item["features"])
item["boxes"] = torch.from_numpy(item["boxes"])
chunk[item['image_id']] = item
torch.save(chunk, chunk_file)
else:
chunk = torch.load(chunk_file)
else:
chunk = None
copy_args = deepcopy(args)
copy_args.split_name = "train"
copy_args.annots_path = os.path.join(data_root, "annotations/captions_{}2014.json".format(copy_args.split_name))
if args.image_feature_type == "nlvr":
copy_args.chunk_path = os.path.join(data_root, "coco_features_{}_150.th".format(copy_args.split_name))
copy_args.data_root = data_root
copy_args.masks = masks
trainset = cls(copy_args, chunk)
trainset.is_train = True
copy_args = deepcopy(args)
copy_args.split_name = "val"
copy_args.annots_path = os.path.join(data_root, "annotations/captions_{}2014.json".format(copy_args.split_name))
if args.image_feature_type == "nlvr":
copy_args.chunk_path = os.path.join(data_root, "coco_features_{}_150.th".format(copy_args.split_name))
copy_args.data_root = data_root
copy_args.masks = masks
validationset = cls(copy_args, chunk)
validationset.is_train = False
if args.get("expand_coco", False):
# This is to expand the COCO train
trainset.expanded = True
trainset.train_size = len(trainset.items)
trainset.items.extend(validationset.items)
trainset.coco_val = validationset.coco
if args.image_feature_type != "r2c" and args.image_feature_type != "vqa_fix_100" and args.image_feature_type != "flickr": # For NLVR, we pre-load features so we need to expand the chunk as well
trainset.chunk_val = validationset.chunk
imdb = np.load(os.path.join(data_root, "data/imdb/imdb_minival2014.npy"), allow_pickle = True)[1:]
image_names_mini_val = set([i["image_name"] + ".jpg" for i in imdb])
if args.get("exclude_minival", False):
trainset.items = [i for i in trainset.items if "COCO_val2014_{:0>12d}.jpg".format(i['image_id']) not in image_names_mini_val]
validationset.items = [i for i in validationset.items if "COCO_val2014_{:0>12d}.jpg".format(i['image_id']) in image_names_mini_val]
print("After expanding, train has {} items, val has {} items".format(len(trainset.items), len(validationset.items)))
testset = validationset # Testset will not be used so this is just a placeholder
return trainset, validationset, testset
@staticmethod
def collate_fn(data):
if isinstance(data[0], Instance):
batch = Batch(data)
td = batch.as_tensor_dict()
return td
else:
images, instances = zip(*data)
images = torch.stack(images, 0)
batch = Batch(instances)
td = batch.as_tensor_dict()
td['box_mask'] = torch.all(td['boxes'] >= 0, -1).long()
td['images'] = images
return td
| 21,482 | 45.600868 | 205 | py |
visualbert | visualbert-master/visualbert/dataloaders/mask_utils.py | import numpy as np
import matplotlib
from matplotlib import path
matplotlib.use('agg')
def _spaced_points(low, high,n):
""" We want n points between low and high, but we don't want them to touch either side"""
padding = (high-low)/(n*2)
return np.linspace(low + padding, high-padding, num=n)
def make_mask(mask_size, box, polygons_list):
"""
Mask size: int about how big mask will be
box: [x1, y1, x2, y2, conf.]
polygons_list: List of polygons that go inside the box
"""
mask = np.zeros((mask_size, mask_size), dtype=np.bool)
xy = np.meshgrid(_spaced_points(box[0], box[2], n=mask_size),
_spaced_points(box[1], box[3], n=mask_size))
xy_flat = np.stack(xy, 2).reshape((-1, 2))
for polygon in polygons_list:
polygon_path = path.Path(polygon)
mask |= polygon_path.contains_points(xy_flat).reshape((mask_size, mask_size))
return mask.astype(np.float32)
#
#from matplotlib import pyplot as plt
#
#
#with open('XdtbL0dP0X0@44.json', 'r') as f:
# metadata = json.load(f)
#from time import time
#s = time()
#for i in range(100):
# mask = make_mask(14, metadata['boxes'][3], metadata['segms'][3])
#print("Elapsed {:3f}s".format(time()-s))
#plt.imshow(mask) | 1,253 | 31.153846 | 93 | py |
visualbert | visualbert-master/visualbert/dataloaders/flickr_ban/utils.py | # Copied from https://github.com/jnhwkim/ban-vqa
"""
This code is extended from Hengyuan Hu's repository.
https://github.com/hengyuan-hu/bottom-up-attention-vqa
"""
from __future__ import print_function
import errno
import os
import re
import collections
import numpy as np
import operator
import functools
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch._six import string_classes
from torch.utils.data.dataloader import default_collate
EPS = 1e-7
def assert_eq(real, expected):
assert real == expected, '%s (true) vs %s (expected)' % (real, expected)
def assert_array_eq(real, expected):
assert (np.abs(real-expected) < EPS).all(), \
'%s (true) vs %s (expected)' % (real, expected)
def assert_tensor_eq(real, expected, eps=EPS):
assert (torch.abs(real-expected) < eps).all(), \
'%s (true) vs %s (expected)' % (real, expected)
def load_folder(folder, suffix):
imgs = []
for f in sorted(os.listdir(folder)):
if f.endswith(suffix):
imgs.append(os.path.join(folder, f))
return imgs
def load_imageid(folder):
images = load_folder(folder, 'jpg')
img_ids = set()
for img in images:
img_id = int(img.split('/')[-1].split('.')[0].split('_')[-1])
img_ids.add(img_id)
return img_ids
def pil_loader(path):
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
def weights_init(m):
"""custom weights initialization."""
cname = m.__class__
if cname == nn.Linear or cname == nn.Conv2d or cname == nn.ConvTranspose2d:
m.weight.data.normal_(0.0, 0.02)
elif cname == nn.BatchNorm2d:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
else:
print('%s is not initialized.' % cname)
def init_net(net, net_file):
if net_file:
net.load_state_dict(torch.load(net_file))
else:
net.apply(weights_init)
def create_dir(path):
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def print_model(model, logger):
print(model)
nParams = 0
for w in model.parameters():
nParams += functools.reduce(operator.mul, w.size(), 1)
if logger:
logger.write('nParams=\t'+str(nParams))
def save_model(path, model, epoch, optimizer=None):
model_dict = {
'epoch': epoch,
'model_state': model.state_dict()
}
if optimizer is not None:
model_dict['optimizer_state'] = optimizer.state_dict()
torch.save(model_dict, path)
# Select the indices given by `lengths` in the second dimension
# As a result, # of dimensions is shrinked by one
# @param pad(Tensor)
# @param len(list[int])
def rho_select(pad, lengths):
# Index of the last output for each sequence.
idx_ = (lengths-1).view(-1,1).expand(pad.size(0), pad.size(2)).unsqueeze(1)
extracted = pad.gather(1, idx_).squeeze(1)
return extracted
def trim_collate(batch):
"Puts each data field into a tensor with outer dimension batch size"
_use_shared_memory = True
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if torch.is_tensor(batch[0]):
out = None
if 1 < batch[0].dim(): # image features
max_num_boxes = max([x.size(0) for x in batch])
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = len(batch) * max_num_boxes * batch[0].size(-1)
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
# warning: F.pad returns Variable!
return torch.stack([F.pad(x, (0,0,0,max_num_boxes-x.size(0))).data for x in batch], 0, out=out)
else:
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [trim_collate(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))
class Logger(object):
def __init__(self, output_name):
dirname = os.path.dirname(output_name)
if not os.path.exists(dirname):
os.mkdir(dirname)
self.log_file = open(output_name, 'w')
self.infos = {}
def append(self, key, val):
vals = self.infos.setdefault(key, [])
vals.append(val)
def log(self, extra_msg=''):
msgs = [extra_msg]
for key, vals in self.infos.iteritems():
msgs.append('%s %.6f' % (key, np.mean(vals)))
msg = '\n'.join(msgs)
self.log_file.write(msg + '\n')
self.log_file.flush()
self.infos = {}
return msg
def write(self, msg):
self.log_file.write(msg + '\n')
self.log_file.flush()
print(msg)
def create_glove_embedding_init(idx2word, glove_file):
word2emb = {}
with open(glove_file, 'r', encoding='utf-8') as f:
entries = f.readlines()
emb_dim = len(entries[0].split(' ')) - 1
print('embedding dim is %d' % emb_dim)
weights = np.zeros((len(idx2word), emb_dim), dtype=np.float32)
for entry in entries:
vals = entry.split(' ')
word = vals[0]
vals = list(map(float, vals[1:]))
word2emb[word] = np.array(vals)
for idx, word in enumerate(idx2word):
if word not in word2emb:
continue
weights[idx] = word2emb[word]
return weights, word2emb
# Remove Flickr30K Entity annotations in a string
def remove_annotations(s):
return re.sub(r'\[[^ ]+ ','',s).replace(']', '')
def get_sent_data(file_path):
phrases = []
with open(file_path, 'r', encoding='utf-8') as f:
for sent in f:
str = remove_annotations(sent.strip())
phrases.append(str)
return phrases
# Find position of a given sublist
# return the index of the last token
def find_sublist(arr, sub):
sublen = len(sub)
first = sub[0]
indx = -1
while True:
try:
indx = arr.index(first, indx + 1)
except ValueError:
break
if sub == arr[indx: indx + sublen]:
return indx + sublen - 1
return -1
# Find position of a given sublist
# return the index of the last token as well as the first token
def find_sublist_full(arr, sub):
sublen = len(sub)
first = sub[0]
indx = -1
while True:
try:
indx = arr.index(first, indx + 1)
except ValueError:
break
if sub == arr[indx: indx + sublen]:
return (indx, indx + sublen - 1)
return (-1, -1)
def calculate_iou(obj1, obj2):
area1 = calculate_area(obj1)
area2 = calculate_area(obj2)
intersection = get_intersection(obj1, obj2)
area_int = calculate_area(intersection)
return area_int / (area1 + area2 - area_int)
def calculate_area(obj):
return (obj[2] - obj[0]) * (obj[3] - obj[1])
def get_intersection(obj1, obj2):
left = obj1[0] if obj1[0] > obj2[0] else obj2[0]
top = obj1[1] if obj1[1] > obj2[1] else obj2[1]
right = obj1[2] if obj1[2] < obj2[2] else obj2[2]
bottom = obj1[3] if obj1[3] < obj2[3] else obj2[3]
if left > right or top > bottom:
return [0, 0, 0, 0]
return [left, top, right, bottom]
def get_match_index(src_bboxes, dst_bboxes):
indices = set()
for src_bbox in src_bboxes:
for i, dst_bbox in enumerate(dst_bboxes):
iou = calculate_iou(src_bbox, dst_bbox)
if iou >= 0.5:
indices.add(i)
return list(indices)
# Batched index_select
def batched_index_select(t, dim, inds):
dummy = inds.unsqueeze(2).expand(inds.size(0), inds.size(1), t.size(2))
out = t.gather(dim, dummy) # b x e x f
return out
| 9,306 | 29.817881 | 107 | py |
visualbert | visualbert-master/visualbert/dataloaders/flickr_ban/dataset.py | # Modified from https://github.com/jnhwkim/ban-vqa
"""
This code is modified from Hengyuan Hu's repository.
https://github.com/hengyuan-hu/bottom-up-attention-vqa
"""
from __future__ import print_function
import os
import json
import _pickle as cPickle
import pickle
import numpy as np
from visualbert.dataloaders.flickr_ban import utils
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=FutureWarning)
import h5py
from xml.etree.ElementTree import parse
import torch
from torch.utils.data import Dataset
try:
from tools import compute_softscore
except:
pass
import itertools
import re
from tqdm import tqdm
COUNTING_ONLY = False
# Following Trott et al. (ICLR 2018)
# Interpretable Counting for Visual Question Answering
def is_howmany(q, a, label2ans):
if 'how many' in q.lower() or \
('number of' in q.lower() and 'number of the' not in q.lower()) or \
'amount of' in q.lower() or \
'count of' in q.lower():
if a is None or answer_filter(a, label2ans):
return True
else:
return False
else:
return False
def answer_filter(answers, label2ans, max_num=10):
for ans in answers['labels']:
if label2ans[ans].isdigit() and max_num >= int(label2ans[ans]):
return True
return False
class Dictionary(object):
def __init__(self, word2idx=None, idx2word=None):
if word2idx is None:
word2idx = {}
if idx2word is None:
idx2word = []
self.word2idx = word2idx
self.idx2word = idx2word
@property
def ntoken(self):
return len(self.word2idx)
@property
def padding_idx(self):
return len(self.word2idx)
def tokenize(self, sentence, add_word):
sentence = sentence.lower()
sentence = sentence.replace(',', '').replace('?', '').replace('\'s', ' \'s')
words = sentence.split()
tokens = []
if add_word:
for w in words:
tokens.append(self.add_word(w))
else:
for w in words:
# the least frequent word (`bebe`) as UNK for Visual Genome dataset
tokens.append(self.word2idx.get(w, self.padding_idx-1))
return tokens
def dump_to_file(self, path):
cPickle.dump([self.word2idx, self.idx2word], open(path, 'wb'))
print('dictionary dumped to %s' % path)
@classmethod
def load_from_file(cls, path):
print('loading dictionary from %s' % path)
word2idx, idx2word = cPickle.load(open(path, 'rb'))
d = cls(word2idx, idx2word)
return d
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
def _create_entry(img, question, answer):
if None!=answer:
answer.pop('image_id')
answer.pop('question_id')
entry = {
'question_id' : question['question_id'],
'image_id' : question['image_id'],
'image' : img,
'question' : question['question'],
'answer' : answer}
return entry
def _load_dataset(dataroot, name, img_id2val, label2ans):
"""Load entries
img_id2val: dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val', 'test-dev2015', test2015'
"""
question_path = os.path.join(
dataroot, 'v2_OpenEnded_mscoco_%s_questions.json' % \
(name + '2014' if 'test'!=name[:4] else name))
questions = sorted(json.load(open(question_path))['questions'],
key=lambda x: x['question_id'])
if 'test'!=name[:4]: # train, val
answer_path = os.path.join(dataroot, 'cache', '%s_target.pkl' % name)
answers = cPickle.load(open(answer_path, 'rb'))
answers = sorted(answers, key=lambda x: x['question_id'])
utils.assert_eq(len(questions), len(answers))
entries = []
for question, answer in zip(questions, answers):
utils.assert_eq(question['question_id'], answer['question_id'])
utils.assert_eq(question['image_id'], answer['image_id'])
img_id = question['image_id']
if not COUNTING_ONLY or is_howmany(question['question'], answer, label2ans):
entries.append(_create_entry(img_id2val[img_id], question, answer))
else: # test2015
entries = []
for question in questions:
img_id = question['image_id']
if not COUNTING_ONLY or is_howmany(question['question'], None, None):
entries.append(_create_entry(img_id2val[img_id], question, None))
return entries
def _load_visualgenome(dataroot, name, img_id2val, label2ans, adaptive=True):
"""Load entries
img_id2val: dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val'
"""
question_path = os.path.join(dataroot, 'question_answers.json')
image_data_path = os.path.join(dataroot, 'image_data.json')
ans2label_path = os.path.join(dataroot, 'cache', 'trainval_ans2label.pkl')
cache_path = os.path.join(dataroot, 'cache', 'vg_%s%s_target.pkl' % (name, '_adaptive' if adaptive else ''))
if os.path.isfile(cache_path):
entries = cPickle.load(open(cache_path, 'rb'))
else:
entries = []
ans2label = cPickle.load(open(ans2label_path, 'rb'))
vgq = json.load(open(question_path, 'r'))
_vgv = json.load(open(image_data_path, 'r')) #108,077
vgv = {}
for _v in _vgv:
if None != _v['coco_id']:
vgv[_v['id']] = _v['coco_id']
counts = [0, 0, 0, 0] # used image, used question, total question, out-of-split
for vg in vgq:
coco_id = vgv.get(vg['id'], None)
if None != coco_id:
counts[0] += 1
img_idx = img_id2val.get(coco_id, None)
if None == img_idx:
counts[3] += 1
for q in vg['qas']:
counts[2] += 1
_answer = tools.compute_softscore.preprocess_answer(q['answer'])
label = ans2label.get(_answer, None)
if None != label and None != img_idx:
counts[1] += 1
answer = {
'labels': [label],
'scores': [1.]}
entry = {
'question_id' : q['id'],
'image_id' : coco_id,
'image' : img_idx,
'question' : q['question'],
'answer' : answer}
if not COUNTING_ONLY or is_howmany(q['question'], answer, label2ans):
entries.append(entry)
print('Loading VisualGenome %s' % name)
print('\tUsed COCO images: %d/%d (%.4f)' % \
(counts[0], len(_vgv), counts[0]/len(_vgv)))
print('\tOut-of-split COCO images: %d/%d (%.4f)' % \
(counts[3], counts[0], counts[3]/counts[0]))
print('\tUsed VG questions: %d/%d (%.4f)' % \
(counts[1], counts[2], counts[1]/counts[2]))
with open(cache_path, 'wb') as f:
cPickle.dump(entries, open(cache_path, 'wb'))
return entries
def _find_coco_id(vgv, vgv_id):
for v in vgv:
if v['id']==vgv_id:
return v['coco_id']
return None
def _load_flickr30k(dataroot, img_id2idx, bbox, pos_boxes, limit = None, cache_name = None):
"""Load entries
img_id2idx: dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val', 'test-dev2015', test2015'
"""
pattern_phrase = r'\[(.*?)\]'
pattern_no = r'\/EN\#(\d+)'
missing_entity_count = dict()
multibox_entity_count = 0
entries = []
counter = 0
cache_name = os.path.join(dataroot, "{}.cache".format(cache_name))
if os.path.exists(cache_name):
with open(cache_name, "rb") as f:
entries = pickle.load(f)
else:
for image_id, idx in tqdm(img_id2idx.items()):
if limit is not None and counter == limit:
break
counter += 1
phrase_file = os.path.join(dataroot, 'Flickr30kEntities/Sentences/%d.txt' % image_id)
anno_file = os.path.join(dataroot, 'Flickr30kEntities/Annotations/%d.xml' % image_id)
with open(phrase_file, 'r', encoding='utf-8') as f:
sents = [x.strip() for x in f]
# Parse Annotation
root = parse(anno_file).getroot()
obj_elems = root.findall('./object')
pos_box = pos_boxes[idx]
bboxes = bbox[pos_box[0]:pos_box[1]]
target_bboxes = {}
for elem in obj_elems:
if elem.find('bndbox') == None or len(elem.find('bndbox')) == 0:
continue
left = int(elem.findtext('./bndbox/xmin'))
top = int(elem.findtext('./bndbox/ymin'))
right = int(elem.findtext('./bndbox/xmax'))
bottom = int(elem.findtext('./bndbox/ymax'))
assert 0 < left and 0 < top
for name in elem.findall('name'):
entity_id = int(name.text)
assert 0 < entity_id
if not entity_id in target_bboxes:
target_bboxes[entity_id] = []
else:
multibox_entity_count += 1
target_bboxes[entity_id].append([left, top, right, bottom])
# Parse Sentence
for sent_id, sent in enumerate(sents):
sentence = utils.remove_annotations(sent)
entities = re.findall(pattern_phrase, sent)
entity_indices = []
target_indices = []
entity_ids = []
entity_types = []
#print(sentence)
for entity_i, entity in enumerate(entities):
info, phrase = entity.split(' ', 1)
entity_id = int(re.findall(pattern_no, info)[0])
entity_type = info.split('/')[2:]
entity_idx = utils.find_sublist(sentence.split(' '), phrase.split(' '))
#assert 0 <= entity_idx
if not entity_id in target_bboxes:
if entity_id >= 0:
missing_entity_count[entity_type[0]] = missing_entity_count.get(entity_type[0], 0) + 1
continue
assert 0 < entity_id
entity_ids.append(entity_id)
entity_types.append(entity_type)
target_idx = utils.get_match_index(target_bboxes[entity_id], bboxes)
entity_indices.append(entity_idx)
target_indices.append(target_idx)
if 0 == len(entity_ids):
continue
entries.append(
_create_flickr_entry(idx, sentence, entity_indices, target_indices, entity_ids, entity_types))
if 0 < len(missing_entity_count.keys()):
print('missing_entity_count=')
print(missing_entity_count)
print('multibox_entity_count=%d' % multibox_entity_count)
with open(cache_name, "wb") as f:
pickle.dump(entries,f)
return entries
def _load_flickr30k_full_entity(dataroot, img_id2idx, bbox, pos_boxes, limit = None, cache_name = None):
"""Load entries
img_id2idx: dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val', 'test-dev2015', test2015'
"""
pattern_phrase = r'\[(.*?)\]'
pattern_no = r'\/EN\#(\d+)'
missing_entity_count = dict()
multibox_entity_count = 0
entries = []
counter = 0
cache_name = os.path.join(dataroot, "{}.cache".format(cache_name))
if os.path.exists(cache_name):
with open(cache_name, "rb") as f:
entries = pickle.load(f)
else:
for image_id, idx in tqdm(img_id2idx.items()):
if limit is not None and counter == limit:
break
counter += 1
phrase_file = os.path.join(dataroot, 'Flickr30kEntities/Sentences/%d.txt' % image_id)
anno_file = os.path.join(dataroot, 'Flickr30kEntities/Annotations/%d.xml' % image_id)
with open(phrase_file, 'r', encoding='utf-8') as f:
sents = [x.strip() for x in f]
# Parse Annotation
root = parse(anno_file).getroot()
obj_elems = root.findall('./object')
pos_box = pos_boxes[idx]
bboxes = bbox[pos_box[0]:pos_box[1]]
target_bboxes = {}
for elem in obj_elems:
if elem.find('bndbox') == None or len(elem.find('bndbox')) == 0:
continue
left = int(elem.findtext('./bndbox/xmin'))
top = int(elem.findtext('./bndbox/ymin'))
right = int(elem.findtext('./bndbox/xmax'))
bottom = int(elem.findtext('./bndbox/ymax'))
assert 0 < left and 0 < top
for name in elem.findall('name'):
entity_id = int(name.text)
assert 0 < entity_id
if not entity_id in target_bboxes:
target_bboxes[entity_id] = []
else:
multibox_entity_count += 1
target_bboxes[entity_id].append([left, top, right, bottom])
# Parse Sentence
for sent_id, sent in enumerate(sents):
sentence = utils.remove_annotations(sent)
entities = re.findall(pattern_phrase, sent)
entity_indices = []
target_indices = []
entity_ids = []
entity_types = []
original_target = []
#print(sentence)
for entity_i, entity in enumerate(entities):
info, phrase = entity.split(' ', 1)
entity_id = int(re.findall(pattern_no, info)[0])
entity_type = info.split('/')[2:]
entity_idx = utils.find_sublist_full(sentence.split(' '), phrase.split(' '))
#assert 0 <= entity_idx
if not entity_id in target_bboxes:
if entity_id >= 0:
missing_entity_count[entity_type[0]] = missing_entity_count.get(entity_type[0], 0) + 1
continue
assert 0 < entity_id
entity_ids.append(entity_id)
entity_types.append(entity_type)
target_idx = utils.get_match_index(target_bboxes[entity_id], bboxes)
entity_indices.append(entity_idx)
target_indices.append(target_idx)
original_target.append(target_bboxes[entity_id])
if 0 == len(entity_ids):
continue
entries.append(
_create_flickr_entry(idx, sentence, entity_indices, target_indices, entity_ids, entity_types, original_target = original_target))
if 0 < len(missing_entity_count.keys()):
print('missing_entity_count=')
print(missing_entity_count)
print('multibox_entity_count=%d' % multibox_entity_count)
with open(cache_name, "wb") as f:
pickle.dump(entries,f)
return entries
def _load_flickr30k_our(dataroot, features, limit = None):
"""Load entries
img_id2idx: dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val', 'test-dev2015', test2015'
"""
pattern_phrase = r'\[(.*?)\]'
pattern_no = r'\/EN\#(\d+)'
missing_entity_count = dict()
multibox_entity_count = 0
entries = []
counter = 0
for image_id, item in tqdm(features.items()):
if limit is not None and counter == limit:
break
counter += 1
bboxes = item[1]
image_id_number = int(image_id[:-4])
phrase_file = os.path.join(dataroot, 'Flickr30kEntities/Sentences/%d.txt' % image_id_number)
anno_file = os.path.join(dataroot, 'Flickr30kEntities/Annotations/%d.xml' % image_id_number)
with open(phrase_file, 'r', encoding='utf-8') as f:
sents = [x.strip() for x in f]
# Parse Annotation
root = parse(anno_file).getroot()
obj_elems = root.findall('./object')
target_bboxes = {}
for elem in obj_elems:
if elem.find('bndbox') == None or len(elem.find('bndbox')) == 0:
continue
left = int(elem.findtext('./bndbox/xmin'))
top = int(elem.findtext('./bndbox/ymin'))
right = int(elem.findtext('./bndbox/xmax'))
bottom = int(elem.findtext('./bndbox/ymax'))
assert 0 < left and 0 < top
for name in elem.findall('name'):
entity_id = int(name.text)
assert 0 < entity_id
if not entity_id in target_bboxes:
target_bboxes[entity_id] = []
else:
multibox_entity_count += 1
target_bboxes[entity_id].append([left, top, right, bottom])
# Parse Sentence
for sent_id, sent in enumerate(sents):
sentence = utils.remove_annotations(sent)
entities = re.findall(pattern_phrase, sent)
entity_indices = []
target_indices = []
entity_ids = []
entity_types = []
#print(sentence)
for entity_i, entity in enumerate(entities):
info, phrase = entity.split(' ', 1)
entity_id = int(re.findall(pattern_no, info)[0])
entity_type = info.split('/')[2:]
entity_idx = utils.find_sublist(sentence.split(' '), phrase.split(' '))
assert 0 <= entity_idx
if not entity_id in target_bboxes:
if entity_id >= 0:
missing_entity_count[entity_type[0]] = missing_entity_count.get(entity_type[0], 0) + 1
continue
assert 0 < entity_id
entity_ids.append(entity_id)
entity_types.append(entity_type)
target_idx = utils.get_match_index(target_bboxes[entity_id], bboxes)
entity_indices.append(entity_idx)
target_indices.append(target_idx)
if 0 == len(entity_ids):
continue
entries.append(
_create_flickr_entry(image_id, sentence, entity_indices, target_indices, entity_ids, entity_types))
if 0 < len(missing_entity_count.keys()):
print('missing_entity_count=')
print(missing_entity_count)
print('multibox_entity_count=%d' % multibox_entity_count)
return entries
# idx, sentence, entity_indices, target_indices, entity_ids, entity_types
def _create_flickr_entry(img, sentence, entity_indices, target_indices, entity_ids, entity_types, original_target = None):
type_map = {'people':0,'clothing':1,'bodyparts':2,'animals':3,'vehicles':4,'instruments':5,'scene':6,'other':7}
MAX_TYPE_NUM = 3
for i, entity_type in enumerate(entity_types):
assert MAX_TYPE_NUM >= len(entity_type)
entity_types[i] = list(type_map[x] for x in entity_type)
entity_types[i] += [-1] * (MAX_TYPE_NUM-len(entity_type))
entry = {
'image' : img,
'sentence' : sentence,
'entity_indices' : entity_indices,
'target_indices' : target_indices,
'entity_ids' : entity_ids,
'entity_types' : entity_types,
'entity_num' : len(entity_ids),
"original_target": original_target}
return entry
def tfidf_from_questions(names, dictionary, dataroot='data', target=['vqa', 'vg', 'cap', 'flickr']):
inds = [[], []] # rows, cols for uncoalesce sparse matrix
df = dict()
N = len(dictionary)
def populate(inds, df, text):
tokens = dictionary.tokenize(text, True)
for t in tokens:
df[t] = df.get(t, 0) + 1
combin = list(itertools.combinations(tokens, 2))
for c in combin:
if c[0] < N:
inds[0].append(c[0]); inds[1].append(c[1])
if c[1] < N:
inds[0].append(c[1]); inds[1].append(c[0])
if 'vqa' in target: # VQA 2.0
for name in names:
assert name in ['train', 'val', 'test-dev2015', 'test2015']
question_path = os.path.join(
dataroot, 'v2_OpenEnded_mscoco_%s_questions.json' % \
(name + '2014' if 'test'!=name[:4] else name))
questions = json.load(open(question_path))['questions']
for question in questions:
populate(inds, df, question['question'])
if 'vg' in target: # Visual Genome
question_path = os.path.join(dataroot, 'question_answers.json')
vgq = json.load(open(question_path, 'r'))
for vg in vgq:
for q in vg['qas']:
populate(inds, df, q['question'])
if 'cap' in target: # MSCOCO Caption
for split in ['train2017', 'val2017']:
captions = json.load(open('data/annotations/captions_%s.json' % split, 'r'))
for caps in captions['annotations']:
populate(inds, df, caps['caption'])
# TF-IDF
vals = [1] * len(inds[1])
for idx, col in enumerate(inds[1]):
assert df[col] >= 1, 'document frequency should be greater than zero!'
vals[col] /= df[col]
# Make stochastic matrix
def normalize(inds, vals):
z = dict()
for row, val in zip(inds[0], vals):
z[row] = z.get(row, 0) + val
for idx, row in enumerate(inds[0]):
vals[idx] /= z[row]
return vals
vals = normalize(inds, vals)
tfidf = torch.sparse.FloatTensor(torch.LongTensor(inds), torch.FloatTensor(vals))
tfidf = tfidf.coalesce()
# Latent word embeddings
emb_dim = 300
glove_file = 'data/glove/glove.6B.%dd.txt' % emb_dim
weights, word2emb = utils.create_glove_embedding_init(dictionary.idx2word[N:], glove_file)
print('tf-idf stochastic matrix (%d x %d) is generated.' % (tfidf.size(0), tfidf.size(1)))
return tfidf, weights
| 23,011 | 35.8192 | 149 | py |
visualbert | visualbert-master/visualbert/utils/pytorch_misc.py | """
Question relevance model
"""
# Make stuff
import os
import re
import shutil
import time
import numpy as np
import pandas as pd
import torch
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.nn.util import device_mapping
from allennlp.training.trainer import move_optimizer_to_cuda
from torch.nn import DataParallel
import torch.nn.functional as F
def time_batch(gen, reset_every=100):
"""
Gets timing info for a batch
:param gen:
:param reset_every: How often we'll reset
:return:
"""
start = time.time()
start_t = 0
for i, item in enumerate(gen):
time_per_batch = (time.time() - start) / (i + 1 - start_t)
yield time_per_batch, item
if i % reset_every == 0:
start = time.time()
start_t = i
class Flattener(torch.nn.Module):
def __init__(self):
"""
Flattens last 3 dimensions to make it only batch size, -1
"""
super(Flattener, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
def pad_sequence(sequence, lengths):
"""
:param sequence: [\sum b, .....] sequence
:param lengths: [b1, b2, b3...] that sum to \sum b
:return: [len(lengths), maxlen(b), .....] tensor
"""
output = sequence.new_zeros(len(lengths), max(lengths), *sequence.shape[1:])
start = 0
for i, diff in enumerate(lengths):
if diff > 0:
output[i, :diff] = sequence[start:(start + diff)]
start += diff
return output
def extra_leading_dim_in_sequence(f, x, mask):
return f(x.view(-1, *x.shape[2:]), mask.view(-1, mask.shape[2])).view(*x.shape[:3], -1)
def clip_grad_norm(named_parameters, max_norm, clip=True, verbose=False):
"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Arguments:
parameters (Iterable[Variable]): an iterable of Variables that will have
gradients normalized
max_norm (float or int): max norm of the gradients
Returns:
Total norm of the parameters (viewed as a single vector).
"""
max_norm = float(max_norm)
parameters = [(n, p) for n, p in named_parameters if p.grad is not None]
total_norm = 0
param_to_norm = {}
param_to_shape = {}
for n, p in parameters:
param_norm = p.grad.data.norm(2)
total_norm += param_norm ** 2
param_to_norm[n] = param_norm
param_to_shape[n] = tuple(p.size())
if np.isnan(param_norm.item()):
raise ValueError("the param {} was null.".format(n))
total_norm = total_norm ** (1. / 2)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef.item() < 1 and clip:
for n, p in parameters:
p.grad.data.mul_(clip_coef)
if verbose:
print('---Total norm {:.3f} clip coef {:.3f}-----------------'.format(total_norm, clip_coef))
for name, norm in sorted(param_to_norm.items(), key=lambda x: -x[1]):
print("{:<60s}: {:.3f}, ({}: {})".format(name, norm, np.prod(param_to_shape[name]), param_to_shape[name]))
print('-------------------------------', flush=True)
return pd.Series({name: norm.item() for name, norm in param_to_norm.items()})
def find_latest_checkpoint(serialization_dir, epoch_to_load = None):
"""
Return the location of the latest model and training state files.
If there isn't a valid checkpoint then return None.
"""
have_checkpoint = (serialization_dir is not None and
any("model_state_epoch_" in x for x in os.listdir(serialization_dir)))
if not have_checkpoint:
return None
serialization_files = os.listdir(serialization_dir)
model_checkpoints = [x for x in serialization_files if "model_state_epoch" in x]
# Get the last checkpoint file. Epochs are specified as either an
# int (for end of epoch files) or with epoch and timestamp for
# within epoch checkpoints, e.g. 5.2018-02-02-15-33-42
found_epochs = [
# pylint: disable=anomalous-backslash-in-string
re.search("model_state_epoch_([0-9\.\-]+)\.th", x).group(1)
for x in model_checkpoints
]
int_epochs = []
for epoch in found_epochs:
pieces = epoch.split('.')
if len(pieces) == 1:
# Just a single epoch without timestamp
int_epochs.append([int(pieces[0]), 0])
else:
# has a timestamp
int_epochs.append([int(pieces[0]), pieces[1]])
last_epoch = sorted(int_epochs, reverse=True)[0]
if epoch_to_load is None:
if last_epoch[1] == 0:
epoch_to_load = str(last_epoch[0])
else:
epoch_to_load = '{0}.{1}'.format(last_epoch[0], last_epoch[1])
model_path = os.path.join(serialization_dir,
"model_state_epoch_{}.th".format(epoch_to_load))
training_state_path = os.path.join(serialization_dir,
"training_state_epoch_{}.th".format(epoch_to_load))
return model_path, training_state_path
def find_latest_checkpoint_step(serialization_dir, epoch_to_load = None):
"""
Return the location of the latest model and training state files.
If there isn't a valid checkpoint then return None.
"""
have_checkpoint = (serialization_dir is not None and
any("model_step_" in x for x in os.listdir(serialization_dir)))
if not have_checkpoint:
return None
serialization_files = os.listdir(serialization_dir)
model_checkpoints = [x for x in serialization_files if "model_step_" in x]
# Get the last checkpoint file. Epochs are specified as either an
# int (for end of epoch files) or with epoch and timestamp for
# within epoch checkpoints, e.g. 5.2018-02-02-15-33-42
info = [(x, int(x.split('_')[2]), int(x.split('_')[4].split('.')[0])) for x in model_checkpoints]
max_epoch = -1
max_step = -1
max_index = -1
for index, i in enumerate(info):
if i[2] > max_epoch:
max_epoch = i[2]
max_step = i[1]
max_index = index
elif i[2] == max_epoch:
if i[1] > max_step:
max_step = i[1]
max_index = index
model_path = os.path.join(serialization_dir,
"model_step_{}_epoch_{}.th".format(max_step, max_epoch))
training_state_path = os.path.join(serialization_dir,
"training_step_{}_epoch_{}.th".format(max_step, max_epoch))
return model_path, training_state_path
def save_checkpoint(model, optimizer, serialization_dir, epoch, val_metric_per_epoch, is_best=None,
learning_rate_scheduler=None) -> None:
"""
Saves a checkpoint of the model to self._serialization_dir.
Is a no-op if self._serialization_dir is None.
Parameters
----------
epoch : Union[int, str], required.
The epoch of training. If the checkpoint is saved in the middle
of an epoch, the parameter is a string with the epoch and timestamp.
is_best: bool, optional (default = None)
A flag which causes the model weights at the given epoch to
be copied to a "best.th" file. The value of this flag should
be based on some validation metric computed by your model.
"""
if serialization_dir is not None:
model_path = os.path.join(serialization_dir, "model_state_epoch_{}.th".format(epoch))
model_state = model.module.state_dict() if isinstance(model, DataParallel) else model.state_dict()
torch.save(model_state, model_path)
training_state = {'epoch': epoch,
'val_metric_per_epoch': val_metric_per_epoch,
'optimizer': optimizer.state_dict()
}
if learning_rate_scheduler is not None:
training_state["learning_rate_scheduler"] = \
learning_rate_scheduler.lr_scheduler.state_dict()
training_path = os.path.join(serialization_dir,
"training_state_epoch_{}.th".format(epoch))
torch.save(training_state, training_path)
if is_best:
print("Best validation performance so far. Copying weights to '{}/best.th'.".format(serialization_dir))
shutil.copyfile(model_path, os.path.join(serialization_dir, "best.th"))
def restore_best_checkpoint(model, serialization_dir):
fn = os.path.join(serialization_dir, 'best.th')
model_state = torch.load(fn, map_location=device_mapping(-1))
assert os.path.exists(fn)
if isinstance(model, DataParallel):
model.module.load_state_dict(model_state)
else:
model.load_state_dict(model_state)
def restore_checkpoint_flexible(model, fn):
model_state = torch.load(fn, map_location=device_mapping(-1))
assert os.path.exists(fn)
if isinstance(model, DataParallel):
load_state_dict_flexible(model.module, model_state)
else:
load_state_dict_flexible(model, model_state)
def load_state_dict_flexible(model, state_dict):
try:
model.load_state_dict(state_dict)
except:
print("Full loading failed!! Try partial loading!!")
own_state = model.state_dict()
for name, param in state_dict.items():
if name not in own_state:
print("Skipped: " + name)
continue
if isinstance(param, torch.nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
own_state[name].copy_(param)
print("Successfully loaded: "+name)
except:
print("Part load failed: " + name)
def restore_checkpoint(model, optimizer, serialization_dir, epoch_to_load = None, learning_rate_scheduler=None):
"""
Restores a model from a serialization_dir to the last saved checkpoint.
This includes an epoch count and optimizer state, which is serialized separately
from model parameters. This function should only be used to continue training -
if you wish to load a model for inference/load parts of a model into a new
computation graph, you should use the native Pytorch functions:
`` model.load_state_dict(torch.load("/path/to/model/weights.th"))``
If ``self._serialization_dir`` does not exist or does not contain any checkpointed weights,
this function will do nothing and return 0.
Returns
-------
epoch: int
The epoch at which to resume training, which should be one after the epoch
in the saved training state.
"""
latest_checkpoint = find_latest_checkpoint(serialization_dir, epoch_to_load)
latest_checkpoint_step = find_latest_checkpoint_step(serialization_dir, epoch_to_load)
if latest_checkpoint is None and latest_checkpoint_step is None:
# No checkpoint to restore, start at 0
return 0, []
if latest_checkpoint is None:
latest_checkpoint = latest_checkpoint_step
model_path, training_state_path = latest_checkpoint
# Load the parameters onto CPU, then transfer to GPU.
# This avoids potential OOM on GPU for large models that
# load parameters onto GPU then make a new GPU copy into the parameter
# buffer. The GPU transfer happens implicitly in load_state_dict.
model_state = torch.load(model_path, map_location=device_mapping(-1))
training_state = torch.load(training_state_path, map_location=device_mapping(-1))
if isinstance(model, DataParallel):
model.module.load_state_dict(model_state)
else:
model.load_state_dict(model_state)
# idk this is always bad luck for me
optimizer.load_state_dict(training_state["optimizer"])
if learning_rate_scheduler is not None and "learning_rate_scheduler" in training_state:
learning_rate_scheduler.lr_scheduler.load_state_dict(
training_state["learning_rate_scheduler"])
move_optimizer_to_cuda(optimizer)
# We didn't used to save `validation_metric_per_epoch`, so we can't assume
# that it's part of the trainer state. If it's not there, an empty list is all
# we can do.
if "val_metric_per_epoch" not in training_state:
print("trainer state `val_metric_per_epoch` not found, using empty list")
val_metric_per_epoch: []
else:
val_metric_per_epoch = training_state["val_metric_per_epoch"]
if isinstance(training_state["epoch"], int):
epoch_to_return = training_state["epoch"] + 1
else:
epoch_to_return = int(training_state["epoch"].split('.')[0]) + 1
print("########### Restroing states... from {}, at epoch {}".format(model_path, epoch_to_return))
if "step" in training_state:
print("########### Restroing states... from {}, at step {}".format(model_path, training_state["step"]))
return epoch_to_return, val_metric_per_epoch
def detokenize(array, vocab):
"""
Given an array of ints, we'll turn this into a string or a list of strings.
:param array: possibly multidimensional numpy array
:return:
"""
if array.ndim > 1:
return [detokenize(x, vocab) for x in array]
tokenized = [vocab.get_token_from_index(v) for v in array]
return ' '.join([x for x in tokenized if x not in (vocab._padding_token, START_SYMBOL, END_SYMBOL)])
def print_para(model):
"""
Prints parameters of a model
:param opt:
:return:
"""
st = {}
total_params = 0
total_params_training = 0
for p_name, p in model.named_parameters():
# if not ('bias' in p_name.split('.')[-1] or 'bn' in p_name.split('.')[-1]):
st[p_name] = ([str(x) for x in p.size()], np.prod(p.size()), p.requires_grad)
total_params += np.prod(p.size())
if p.requires_grad:
total_params_training += np.prod(p.size())
pd.set_option('display.max_columns', None)
shapes_df = pd.DataFrame([(p_name, '[{}]'.format(','.join(size)), prod, p_req_grad)
for p_name, (size, prod, p_req_grad) in sorted(st.items(), key=lambda x: -x[1][1])],
columns=['name', 'shape', 'size', 'requires_grad']).set_index('name')
print('\n {:.1f}M total parameters. {:.1f}M training \n ----- \n {} \n ----'.format(total_params / 1000000.0,
total_params_training / 1000000.0,
shapes_df.to_string()),
flush=True)
return shapes_df
def batch_index_iterator(len_l, batch_size, skip_end=True):
"""
Provides indices that iterate over a list
:param len_l: int representing size of thing that we will
iterate over
:param batch_size: size of each batch
:param skip_end: if true, don't iterate over the last batch
:return: A generator that returns (start, end) tuples
as it goes through all batches
"""
iterate_until = len_l
if skip_end:
iterate_until = (len_l // batch_size) * batch_size
for b_start in range(0, iterate_until, batch_size):
yield (b_start, min(b_start + batch_size, len_l))
def batch_iterator(seq, batch_size, skip_end=True):
for b_start, b_end in batch_index_iterator(len(seq), batch_size, skip_end=skip_end):
yield seq[b_start:b_end]
def masked_unk_softmax(x, dim, mask_idx):
x1 = F.softmax(x, dim=dim)
x1[:, mask_idx] = 0
x1_sum = torch.sum(x1, dim=1, keepdim=True)
y = x1 / x1_sum
return y
def compute_score_with_logits(logits, labels):
logits = masked_unk_softmax(logits, 1, 0)
logits = torch.max(logits, 1)[1].data # argmax
one_hots = torch.zeros_like(labels)
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores | 15,975 | 38.156863 | 122 | py |
visualbert | visualbert-master/visualbert/utils/detector.py | """
ok so I lied. it's not a detector, it's the resnet backbone
"""
import torch
import torch.nn as nn
import torch.nn.parallel
from torchvision.models import resnet
from utils.pytorch_misc import Flattener
import torch.utils.model_zoo as model_zoo
#from config_vcr import USE_IMAGENET_PRETRAINED
from utils.pytorch_misc import pad_sequence
from torch.nn import functional as F
USE_IMAGENET_PRETRAINED = True
def _load_resnet(pretrained=True):
# huge thx to https://github.com/ruotianluo/pytorch-faster-rcnn/blob/master/lib/nets/resnet_v1.py
backbone = resnet.resnet50(pretrained=False)
if pretrained:
backbone.load_state_dict(model_zoo.load_url(
'https://s3.us-west-2.amazonaws.com/ai2-rowanz/resnet50-e13db6895d81.th'))
for i in range(2, 4):
getattr(backbone, 'layer%d' % i)[0].conv1.stride = (2, 2)
getattr(backbone, 'layer%d' % i)[0].conv2.stride = (1, 1)
return backbone
def _load_resnet_imagenet(pretrained=True):
# huge thx to https://github.com/ruotianluo/pytorch-faster-rcnn/blob/master/lib/nets/resnet_v1.py
backbone = resnet.resnet50(pretrained=pretrained)
for i in range(2, 4):
getattr(backbone, 'layer%d' % i)[0].conv1.stride = (2, 2)
getattr(backbone, 'layer%d' % i)[0].conv2.stride = (1, 1)
# use stride 1 for the last conv4 layer (same as tf-faster-rcnn)
backbone.layer4[0].conv2.stride = (1, 1)
backbone.layer4[0].downsample[0].stride = (1, 1)
# # Make batchnorm more sensible
# for submodule in backbone.modules():
# if isinstance(submodule, torch.nn.BatchNorm2d):
# submodule.momentum = 0.01
return backbone
class SimpleDetector(nn.Module):
def __init__(self, pretrained=True, average_pool=True, semantic=True, final_dim=1024):
"""
:param average_pool: whether or not to average pool the representations
:param pretrained: Whether we need to load from scratch
:param semantic: Whether or not we want to introduce the mask and the class label early on (default Yes)
"""
super(SimpleDetector, self).__init__()
# huge thx to https://github.com/ruotianluo/pytorch-faster-rcnn/blob/master/lib/nets/resnet_v1.py
backbone = _load_resnet_imagenet(pretrained=pretrained) if USE_IMAGENET_PRETRAINED else _load_resnet(
pretrained=pretrained)
self.backbone = nn.Sequential(
backbone.conv1,
backbone.bn1,
backbone.relu,
backbone.maxpool,
backbone.layer1,
backbone.layer2,
backbone.layer3,
# backbone.l ayer4
)
from torchvision.layers import ROIAlign
self.roi_align = ROIAlign((7, 7) if USE_IMAGENET_PRETRAINED else (14, 14),
spatial_scale=1 / 16, sampling_ratio=0)
if semantic:
self.mask_dims = 32
self.object_embed = torch.nn.Embedding(num_embeddings=81, embedding_dim=128)
self.mask_upsample = torch.nn.Conv2d(1, self.mask_dims, kernel_size=3,
stride=2 if USE_IMAGENET_PRETRAINED else 1,
padding=1, bias=True)
else:
self.object_embed = None
self.mask_upsample = None
after_roi_align = [backbone.layer4]
self.final_dim = final_dim
if average_pool:
after_roi_align += [nn.AvgPool2d(7, stride=1), Flattener()]
self.after_roi_align = torch.nn.Sequential(*after_roi_align)
self.obj_downsample = torch.nn.Sequential(
torch.nn.Dropout(p=0.1),
torch.nn.Linear(2048 + (128 if semantic else 0), final_dim),
torch.nn.ReLU(inplace=True),
)
self.regularizing_predictor = torch.nn.Linear(2048, 81)
def forward(self,
images: torch.Tensor,
boxes: torch.Tensor,
box_mask: torch.LongTensor,
classes: torch.Tensor = None,
segms: torch.Tensor = None,
):
"""
:param images: [batch_size, 3, im_height, im_width]
:param boxes: [batch_size, max_num_objects, 4] Padded boxes
:param box_mask: [batch_size, max_num_objects] Mask for whether or not each box is OK
:return: object reps [batch_size, max_num_objects, dim]
"""
# [batch_size, 2048, im_height // 32, im_width // 32
img_feats = self.backbone(images)
box_inds = box_mask.nonzero()
assert box_inds.shape[0] > 0
rois = torch.cat((
box_inds[:, 0, None].type(boxes.dtype),
boxes[box_inds[:, 0], box_inds[:, 1]],
), 1)
# Object class and segmentation representations
roi_align_res = self.roi_align(img_feats, rois)
if self.mask_upsample is not None:
assert segms is not None
segms_indexed = segms[box_inds[:, 0], None, box_inds[:, 1]] - 0.5
roi_align_res[:, :self.mask_dims] += self.mask_upsample(segms_indexed)
post_roialign = self.after_roi_align(roi_align_res)
# Add some regularization, encouraging the model to keep giving decent enough predictions
obj_logits = self.regularizing_predictor(post_roialign)
obj_labels = classes[box_inds[:, 0], box_inds[:, 1]]
cnn_regularization = F.cross_entropy(obj_logits, obj_labels, size_average=True)[None]
feats_to_downsample = post_roialign if self.object_embed is None else torch.cat((post_roialign, self.object_embed(obj_labels)), -1)
roi_aligned_feats = self.obj_downsample(feats_to_downsample)
# Reshape into a padded sequence - this is expensive and annoying but easier to implement and debug...
obj_reps = pad_sequence(roi_aligned_feats, box_mask.sum(1).tolist())
return {
'obj_reps_raw': post_roialign,
'obj_reps': obj_reps,
'obj_logits': obj_logits,
'obj_labels': obj_labels,
'cnn_regularization_loss': cnn_regularization
}
| 6,108 | 41.131034 | 139 | py |
visualbert | visualbert-master/visualbert/utils/__init__.py | 0 | 0 | 0 | py | |
visualbert | visualbert-master/visualbert/utils/get_image_features/get_mask.py | #!/usr/bin/env python2
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Perform inference on a single image or all images with a certain extension
(e.g., .jpg) in a folder.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import argparse
import cv2 # NOQA (Must import before importing caffe2 due to bug in cv2)
import glob
import logging
import os
import sys
import numpy as np
import base64
import csv
import timeit
import json
import torch
from detectron.utils.io import cache_url
import detectron.utils.c2 as c2_utils
c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
from caffe2.python import workspace
import caffe2
from detectron.core.config import assert_and_infer_cfg
from detectron.core.config import cfg
from detectron.core.config import merge_cfg_from_file
from detectron.utils.timer import Timer
import detectron.core.test_engine as model_engine
import detectron.core.test as infer_engine
import detectron.datasets.dummy_datasets as dummy_datasets
import detectron.utils.c2 as c2_utils
import detectron.utils.logging
import detectron.utils.vis as vis_utils
from detectron.utils.boxes import nms
c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
csv.field_size_limit(sys.maxsize)
BOTTOM_UP_FIELDNAMES = ['image_id', 'image_w', 'image_h',
'num_boxes', 'boxes', 'features']
FIELDNAMES = ['image_id', 'image_w', 'image_h', 'num_boxes',
'boxes', 'features', 'object']
from get_mask_utils import detect_from_img, get_model
def parse_args():
parser = argparse.ArgumentParser(description='End-to-end inference')
parser.add_argument(
'--cfg',
dest='cfg',
help='cfg model file (/path/to/model_config.yaml)',
default=None,
type=str
)
parser.add_argument(
'--wts',
dest='weights',
help='weights model file (/path/to/model_weights.pkl)',
default=None,
type=str
)
parser.add_argument(
'--output_dir',
dest='output_dir',
help='output dir name',
required=True,
type=str
)
parser.add_argument(
'--image-ext',
dest='image_ext',
help='image file name extension (default: jpg)',
default='jpg',
type=str
)
parser.add_argument(
'--bbox_file',
help="csv file from bottom-up attention model",
default=None
)
parser.add_argument(
'--total_group',
help="the number of group for exracting",
type=int,
default=1
)
parser.add_argument(
'--group_id',
help=" group id for current analysis, used to shard",
type=int,
default=0
)
parser.add_argument(
'--min_bboxes',
help=" min number of bboxes",
type=int,
default=10
)
parser.add_argument(
'--max_bboxes',
help=" min number of bboxes",
type=int,
default=100
)
parser.add_argument(
'--conf_thresh',
help=" confidentce",
type=float,
default=0.2
)
parser.add_argument(
'--total_split',
help=" confidentce",
type=int,
default=1
)
parser.add_argument(
'--one_giant_file',
help=" confidentce",
type=str,
default=None
)
parser.add_argument(
'--current_split',
help=" confidentce",
type=int,
default=0
)
parser.add_argument(
'--feat_name',
help=" the name of the feature to extract, default: gpu_0/fc7",
type=str,
default="gpu_0/fc7"
)
parser.add_argument(
'im_or_folder', help='image or folder of images', default=None
)
parser.add_argument(
'--no_id',
action='store_true'
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def get_detections_from_im(cfg, model, im, image_id, feat_blob_name,
MIN_BOXES, MAX_BOXES, conf_thresh=0.2, bboxes=None):
with c2_utils.NamedCudaScope(0):
scores, cls_boxes, im_scale = infer_engine.im_detect_bbox(model,
im,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
boxes=bboxes)
box_features = workspace.FetchBlob(feat_blob_name)
#print("ss")
#print(workspace.FetchBlob("gpu_0/fc7"))
cls_prob = workspace.FetchBlob("gpu_0/cls_prob")
rois = workspace.FetchBlob("gpu_0/rois")
max_conf = np.zeros((rois.shape[0]))
# unscale back to raw image space
cls_boxes = rois[:, 1:5] / im_scale
for cls_ind in range(1, cls_prob.shape[1]):
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
keep = np.array(nms(dets, cfg.TEST.NMS))
max_conf[keep] = np.where(cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep])
keep_boxes = np.where(max_conf >= conf_thresh)[0]
if len(keep_boxes) < MIN_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MIN_BOXES]
elif len(keep_boxes) > MAX_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MAX_BOXES]
objects = np.argmax(cls_prob[keep_boxes], axis=1)
#print(cls_boxes[keep_boxes])
#print("keep_boxes", keep_boxes)
#print("max_conf", max_conf)
#print("cls_boxes", cls_boxes[0])
#print("im_h", im.shape[0])
return box_features[keep_boxes], max_conf[keep_boxes], cls_boxes[keep_boxes]
#return {
# "image_id": image_id,
# "image_h": np.size(im, 0),
# "image_w": np.size(im, 1),
# 'num_boxes': len(keep_boxes),
# 'boxes': base64.b64encode(cls_boxes[keep_boxes]),
# 'features': base64.b64encode(box_features[keep_boxes]),
# 'object': base64.b64encode(objects)
#}
def extract_bboxes(bottom_up_csv_file):
image_bboxes = {}
with open(bottom_up_csv_file, "r") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t',
fieldnames=BOTTOM_UP_FIELDNAMES)
for item in reader:
item['num_boxes'] = int(item['num_boxes'])
image_id = int(item['image_id'])
image_w = float(item['image_w'])
image_h = float(item['image_h'])
bbox = np.frombuffer(
base64.b64decode(item['boxes']),
dtype=np.float32).reshape((item['num_boxes'], -1))
image_bboxes[image_id] = bbox
return image_bboxes
import os
def recurse_find_image(folder, image_list, image_ext):
files = os.listdir(folder)
files.sort()
for i in files:
path = os.path.join(folder, i)
if os.path.isdir(path):
recurse_find_image(path, image_list, image_ext)
else:
if path.endswith(image_ext):
image_list.append(path)
def main(args):
logger = logging.getLogger(__name__)
model = get_model()
start = timeit.default_timer()
im_list = []
recurse_find_image(args.im_or_folder, im_list, args.image_ext)
print(im_list[:10])
print("There are {} images to cache in total.".format(len(im_list)))
if args.total_split != 1:
im_lists = np.array_split(im_list, args.total_split)
im_list= im_lists[args.current_split]
print("Split {}: There are currently {} images to cache.".format(args.current_split ,len(im_list)))
'''if os.path.isdir(args.im_or_folder):
im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
else:
im_list = [args.im_or_folder]'''
#print("{} images in total.".format(len(im_list)))
# extract bboxes from bottom-up attention model
image_bboxes={}
if args.bbox_file is not None:
image_bboxes = extract_bboxes(args.bbox_file)
count = 0
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
one_giant_file = args.one_giant_file
if one_giant_file is not None:
giant_file = {}
for i, im_name in enumerate(im_list):
im_base_name = os.path.basename(im_name)
if not args.no_id:
image_id = int(im_base_name.split(".")[0].split("_")[-1]) # for COCO
else:
image_id = None
if not args.no_id:
'''if image_id % args.total_group == args.group_id:
if not args.no_id:
bbox = image_bboxes[image_id] if image_id in image_bboxes else None
else:
bbox = None
im = cv2.imread(im_name)
if im is not None:
outfile = os.path.join(args.output_dir,
im_base_name.replace('jpg', 'npy'))
lock_folder = outfile.replace('npy', 'lock')
if not os.path.exists(lock_folder) and os.path.exists(outfile):
continue
if not os.path.exists(lock_folder):
os.makedirs(lock_folder)
result = get_detections_from_im(cfg, model, im,
image_id,args.feat_name,
args.min_bboxes,
args.max_bboxes,
bboxes=bbox)
np.save(outfile, result)
os.rmdir(lock_folder)
second_result = np.load(outfile)
print(result[1])
print(second_result[1])
count += 1
if count % 100 == 0:
end = timeit.default_timer()
epoch_time = end - start
print('process {:d} images after {:.1f} s'.format(count, epoch_time))'''
assert(0)
else:
bbox = None
im = cv2.imread(im_name)
if im is not None:
outfile = os.path.join(args.output_dir, im_base_name) + ".npz"
lock_folder = outfile + '.lock'
if not os.path.exists(lock_folder) and os.path.exists(outfile):
continue
if not os.path.exists(lock_folder):
os.makedirs(lock_folder)
detection = detect_from_img(model, im)
#for i in detection:
# detection[i] = numpy.array(detection[i])
if one_giant_file is not None:
#box_features = torch.Tensor(box_features)
#cls_boxes = torch.Tensor(cls_boxes)
#max_conf = torch.Tensor(max_conf)
giant_file[im_base_name] = detection
#np.savez(outfile, box_features=box_features, max_conf=max_conf, cls_boxes=cls_boxes)
os.rmdir(lock_folder)
count += 1
if count % 100 == 0:
end = timeit.default_timer()
epoch_time = end - start
print('process {:d} images after {:.1f} s'.format(count, epoch_time))
if one_giant_file is not None:
torch.save(giant_file, one_giant_file)
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
detectron.utils.logging.setup_logging(__name__)
args = parse_args()
if args.group_id >= args.total_group:
exit("sharding group %d is greater than the total group %d" %(args.group_id, args.total_group ))
main(args)
| 12,823 | 31.383838 | 107 | py |
visualbert | visualbert-master/visualbert/utils/get_image_features/get_mask_utils.py | # Modified by Harold. Courtesy of the author of VCR
"""
Detect the images from a dataframe, saving masks to a json.
"""
from collections import defaultdict
import cv2 # NOQA (Must import before importing caffe2 due to bug in cv2)
import logging
import os
import time
from caffe2.python import workspace
from detectron.core.config import assert_and_infer_cfg
from detectron.core.config import cfg
from detectron.core.config import merge_cfg_from_file
from detectron.utils.io import cache_url
from detectron.utils.logging import setup_logging
from detectron.utils.timer import Timer
import detectron.core.test_engine as infer_engine
import detectron.datasets.dummy_datasets as dummy_datasets
import detectron.utils.c2 as c2_utils
from detectron.utils.vis import convert_from_cls_format, kp_connections, get_class_string
import detectron.utils.keypoints as keypoint_utils
from tqdm import tqdm
from detectron.utils.colormap import colormap
import pycocotools.mask as mask_util
import numpy as np
import json
import pickle as pkl
# Matplotlib requires certain adjustments in some environments
# Must happen before importing matplotlib
import detectron.utils.env as envu
envu.set_up_matplotlib()
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
THRESHOLD = 0.7
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
logger = logging.getLogger('__main__')
dummy_coco_dataset = dummy_datasets.get_coco_dataset()
def get_model(use_keypoints=False):
"""
Obtain model
:param use_keypoints: whether to use keypoints or mask rcnn
:return:
"""
if use_keypoints:
MODEL_CONFIG = '/home/rowan/tools/Detectron/configs/12_2017_baselines/e2e_keypoint_rcnn_X-101-32x8d-FPN_s1x.yaml'
MODEL_WEIGHTS = 'https://s3-us-west-2.amazonaws.com/detectron/37732318/12_2017_baselines/e2e_keypoint_rcnn_X-101-32x8d-FPN_s1x.yaml.16_55_09.Lx8H5JVu/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl'
else:
MODEL_CONFIG = '/local/harold/vqa/trained_detectron/e2e_mask_rcnn_X-101-64x4d-FPN_1x.yaml'
MODEL_WEIGHTS = '/local/harold/vqa/trained_detectron/e2e_mask_rcnn_X-101-64x4d-FPN_1x.pkl'
merge_cfg_from_file(MODEL_CONFIG)
cfg.NUM_GPUS = 1
cfg.MODEL.KEYPOINTS_ON = use_keypoints
cfg.MODEL.MASK_ON = not use_keypoints
weights_arg = cache_url(MODEL_WEIGHTS, cfg.DOWNLOAD_CACHE)
assert_and_infer_cfg(cache_urls=False)
assert not cfg.MODEL.RPN_ONLY, 'RPN models are not supported'
assert not cfg.TEST.PRECOMPUTED_PROPOSALS, 'Models that require precomputed proposals are not supported'
model = infer_engine.initialize_model_from_cfg(weights_arg)
return model
def detect_from_img(model, im, dets_pkl_fn=None, dets_json_fn=None, debug_img_fn=None):
"""
Detect the boxes and segmentations in an image. Currently doesn't do segmentation.
:param im: Image
:param dets_pkl_fn: We'll back up the detections to here
:param dets_json_fn: We'll save detections here (above THRESHOLD) for turking
:param debug_img_fn: We'll backup the detections in a nice image, to this file
:return: boxes, obj names, classes if successful, otherwise NONE NONE NONE.
"""
# logger.info('Processing {}'.format(img_fn))
# im = cv2.imread(img_fn)
timers = defaultdict(Timer)
t = time.time()
with c2_utils.NamedCudaScope(0):
cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
model, im, None, timers=timers
)
#logger.info('Inference time: {:.3f}s'.format(time.time() - t))
#for k, v in timers.items():
# logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
if not isinstance(cls_boxes, list) or not any([x.size > 0 for x in cls_boxes if hasattr(x, 'size')]):
print("Skip because of other things")
return None, None, None
# Get the mask for visualization. #TODO do keypoints
boxes, segms, keypoints, classes = convert_from_cls_format(
cls_boxes, cls_segms, cls_keyps)
inds = np.where(boxes[:, -1] > THRESHOLD)[0]
if inds.size == 0:
print("Skip because of harsh threshhold")
return None, None, None
if dets_pkl_fn is not None:
with open(dets_pkl_fn, 'wb') as f:
pkl.dump({'boxes': cls_boxes, 'segms': cls_segms, 'keyps': cls_keyps, 'im_shape': im.shape}, f)
boxes = boxes[inds]
segms = [segms[i] for i in inds.tolist()] if segms is not None else None
classes = np.array([classes[i] for i in inds.tolist()])
keypoints = [keypoints[i].tolist() for i in inds.tolist()] if keypoints is not None else None
contours = []
if segms is not None:
masks = mask_util.decode(segms).transpose((2, 0, 1))
for mask_slice in masks:
contour, hier = cv2.findContours(
mask_slice.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)
contours.append([c.squeeze(1).tolist() for c in contour])
# get the names
obj_names = []
for object_counter, obj_id in enumerate(classes):
obj_names.append('{} ({})'.format(object_counter+1, dummy_coco_dataset.classes[obj_id].replace(' ', '')))
if dets_json_fn is not None:
with open(dets_json_fn, 'w') as f:
json.dump({
'boxes': boxes.tolist(), # [num_boxes, dims]
'segms': contours, # [num_boxes, num_segms, num_points, 2]
'names': obj_names,
'width': int(im.shape[1]),
'height': int(im.shape[0]),
'keyps': keypoints,
}, f)
if debug_img_fn is not None:
vis_one_image(im[:, :, ::-1], debug_img_fn, boxes, contours, obj_names, keypoints,
dpi=200, box_alpha=0.3)
return {'boxes': boxes.tolist(), # [num_boxes, dims]
'segms': contours, # [num_boxes, num_segms, num_points, 2]
'names': obj_names,
'width': int(im.shape[1]),
'height': int(im.shape[0]),
'keyps': keypoints}
#return boxes, obj_names, classes
def vis_one_image(
im, im_name, boxes, segm_contours, obj_names, keypoints=None,
kp_thresh=2, dpi=200, box_alpha=0.0, show_class=True):
"""Visual debugging of detections. We assume that there are detections"""
dataset_keypoints, _ = keypoint_utils.get_keypoints()
color_list = colormap(rgb=True) / 255
kp_lines = kp_connections(dataset_keypoints)
cmap = plt.get_cmap('rainbow')
colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]
fig = plt.figure(frameon=False)
fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.axis('off')
fig.add_axes(ax)
ax.imshow(im)
assert boxes is not None
# Display in largest to smallest order to reduce occlusion
areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
sorted_inds = np.argsort(-areas)
for mask_color_id, i in enumerate(sorted_inds):
bbox = boxes[i, :4]
score = boxes[i, -1]
# show box
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1],
fill=False, edgecolor=color_list[mask_color_id % len(color_list)],
linewidth=3, alpha=box_alpha))
if show_class:
# TODO: Make some boxes BIGGER if they are far from other things
y_coord = bbox[1] - 2
fontsize = max(min(bbox[2] - bbox[0], bbox[3] - bbox[1]) / 40, 5)
if fontsize * 2 > y_coord:
y_coord += fontsize * 2 + 2
ax.text(
bbox[0], y_coord,
obj_names[i] + ' {:0.2f}'.format(score).lstrip('0'),
fontsize=fontsize,
family='serif',
bbox=dict(
facecolor=color_list[mask_color_id % len(color_list)],
alpha=0.4, pad=0, edgecolor='none'),
color='white')
# show mask
if len(segm_contours) > 0:
img = np.ones(im.shape)
color_mask = color_list[mask_color_id % len(color_list), 0:3]
w_ratio = .4
for c in range(3):
color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
for c in range(3):
img[:, :, c] = color_mask[c]
for segm_part in segm_contours[i]:
polygon = Polygon(
np.array(segm_part),
fill=True, facecolor=color_mask,
edgecolor='w', linewidth=1.2,
alpha=0.5)
ax.add_patch(polygon)
# show keypoints
if keypoints is not None and len(keypoints) > i:
kps = np.array(keypoints[i])
plt.autoscale(False)
for l in range(len(kp_lines)):
i1 = kp_lines[l][0]
i2 = kp_lines[l][1]
if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
x = [kps[0, i1], kps[0, i2]]
y = [kps[1, i1], kps[1, i2]]
line = plt.plot(x, y)
plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
if kps[2, i1] > kp_thresh:
plt.plot(
kps[0, i1], kps[1, i1], '.', color=colors[l],
markersize=3.0, alpha=0.7)
if kps[2, i2] > kp_thresh:
plt.plot(
kps[0, i2], kps[1, i2], '.', color=colors[l],
markersize=3.0, alpha=0.7)
# add mid shoulder / mid hip for better visualization
mid_shoulder = (
kps[:2, dataset_keypoints.index('right_shoulder')] +
kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
sc_mid_shoulder = np.minimum(
kps[2, dataset_keypoints.index('right_shoulder')],
kps[2, dataset_keypoints.index('left_shoulder')])
mid_hip = (
kps[:2, dataset_keypoints.index('right_hip')] +
kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
sc_mid_hip = np.minimum(
kps[2, dataset_keypoints.index('right_hip')],
kps[2, dataset_keypoints.index('left_hip')])
if (sc_mid_shoulder > kp_thresh and
kps[2, dataset_keypoints.index('nose')] > kp_thresh):
x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
line = plt.plot(x, y)
plt.setp(
line, color=colors[len(kp_lines)], linewidth=1.0, alpha=0.7)
if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
x = [mid_shoulder[0], mid_hip[0]]
y = [mid_shoulder[1], mid_hip[1]]
line = plt.plot(x, y)
plt.setp(
line, color=colors[len(kp_lines) + 1], linewidth=1.0,
alpha=0.7)
ext = im_name.split('.')[-1]
rest_of_the_fn = im_name[:-(len(ext) + 1)]
ext2use = 'png' if ext == 'jpg' else ext
output_name = rest_of_the_fn + '.' + ext2use
fig.savefig(output_name, dpi=dpi)
plt.close('all')
# Convert to JPG manually... ugh
if ext == 'jpg':
assert os.path.exists(output_name)
png_img = cv2.imread(output_name)
cv2.imwrite(rest_of_the_fn + '.' + ext, png_img, [int(cv2.IMWRITE_JPEG_QUALITY), 95])
os.remove(output_name)
def convert_detections(im_file, dets_pkl_fn, dets_json_fn=None, debug_img_fn=None):
"""
Update format for detections....
:param im: Image
:param dets_pkl_fn: We'll back up the detections to here
:param dets_json_fn: We'll save detections here (above THRESHOLD) for turking
:param debug_img_fn: We'll backup the detections in a nice image, to this file
:return: boxes, obj names, classes if successful, otherwise NONE NONE NONE.
"""
with open(dets_pkl_fn, 'rb') as f:
pkl_dict = pkl.load(f)
cls_boxes = pkl_dict['boxes']
cls_segms = pkl_dict['segms']
cls_keyps = pkl_dict['keyps']
im_shape = pkl_dict['im_shape']
if not isinstance(cls_boxes, list) or not any([x.size > 0 for x in cls_boxes if hasattr(x, 'size')]):
return None, None, None
# Get the mask for visualization. #TODO do keypoints
boxes, segms, keypoints, classes = convert_from_cls_format(
cls_boxes, cls_segms, cls_keyps)
inds = np.where(boxes[:, -1] > THRESHOLD)[0]
if inds.size == 0:
return None, None, None
boxes = boxes[inds]
segms = [segms[i] for i in inds.tolist()] if segms is not None else None
classes = np.array([classes[i] for i in inds.tolist()])
keypoints = [keypoints[i].tolist() for i in inds.tolist()] if keypoints is not None else None
contours = []
if segms is not None:
masks = mask_util.decode(segms).transpose((2, 0, 1))
for mask_slice in masks:
contour, hier = cv2.findContours(
mask_slice.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)
contours.append([c.squeeze(1).tolist() for c in contour])
# get the names
obj_names = []
for object_counter, obj_id in enumerate(classes):
obj_names.append('{} ({})'.format(object_counter+1, dummy_coco_dataset.classes[obj_id].replace(' ', '')))
# object_counter = defaultdict(int)
# obj_names = []
# for obj_id in classes:
# object_counter[obj_id] += 1
# obj_names.append('[{}{}]'.format(dummy_coco_dataset.classes[obj_id].replace(' ', ''),
# object_counter[obj_id]))
if dets_json_fn is not None:
with open(dets_json_fn, 'w') as f:
json.dump({
'boxes': boxes.tolist(), # [num_boxes, dims]
'segms': contours, # [num_boxes, num_segms, num_points, 2]
'names': obj_names,
'width': int(im_shape[1]),
'height': int(im_shape[0]),
'keyps': keypoints,
}, f)
if debug_img_fn is not None:
im = cv2.imread(im_file)
vis_one_image(im[:, :, ::-1], debug_img_fn, boxes, contours, obj_names, keypoints,
dpi=200, box_alpha=0.3)
return boxes, obj_names, classes
if __name__ == "__main__":
model = get_model()
return_dict = detect_from_img(model, im) | 14,915 | 38.989276 | 268 | py |
visualbert | visualbert-master/visualbert/utils/get_image_features/extract_image_features_nlvr.py | # Modified by Harold
#!/usr/bin/env python2
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Perform inference on a single image or all images with a certain extension
(e.g., .jpg) in a folder.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import argparse
import cv2 # NOQA (Must import before importing caffe2 due to bug in cv2)
import glob
import logging
import os
import sys
import numpy as np
import base64
import csv
import timeit
import json
import torch
from tqdm import tqdm
from detectron.utils.io import cache_url
import detectron.utils.c2 as c2_utils
c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
from caffe2.python import workspace
import caffe2
from detectron.core.config import assert_and_infer_cfg
from detectron.core.config import cfg
from detectron.core.config import merge_cfg_from_file
from detectron.utils.timer import Timer
import detectron.core.test_engine as model_engine
import detectron.core.test as infer_engine
import detectron.datasets.dummy_datasets as dummy_datasets
import detectron.utils.c2 as c2_utils
import detectron.utils.logging
import detectron.utils.vis as vis_utils
from detectron.utils.boxes import nms
c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
csv.field_size_limit(sys.maxsize)
BOTTOM_UP_FIELDNAMES = ['image_id', 'image_w', 'image_h',
'num_boxes', 'boxes', 'features']
FIELDNAMES = ['image_id', 'image_w', 'image_h', 'num_boxes',
'boxes', 'features', 'object']
def parse_args():
parser = argparse.ArgumentParser(description='End-to-end inference')
parser.add_argument(
'--cfg',
dest='cfg',
help='cfg model file (/path/to/model_config.yaml)',
default=None,
type=str
)
parser.add_argument(
'--wts',
dest='weights',
help='weights model file (/path/to/model_weights.pkl)',
default=None,
type=str
)
parser.add_argument(
'--output_dir',
dest='output_dir',
help='output dir name',
required=True,
type=str
)
parser.add_argument(
'--image-ext',
dest='image_ext',
help='image file name extension (default: jpg)',
default='jpg',
type=str
)
parser.add_argument(
'--bbox_file',
help="csv file from bottom-up attention model",
default=None
)
parser.add_argument(
'--total_group',
help="the number of group for exracting",
type=int,
default=1
)
parser.add_argument(
'--group_id',
help=" group id for current analysis, used to shard",
type=int,
default=0
)
parser.add_argument(
'--min_bboxes',
help=" min number of bboxes",
type=int,
default=10
)
parser.add_argument(
'--max_bboxes',
help=" min number of bboxes",
type=int,
default=100
)
parser.add_argument(
'--conf_thresh',
help=" confidentce",
type=float,
default=0.2
)
parser.add_argument(
'--total_split',
help=" confidentce",
type=int,
default=1
)
parser.add_argument(
'--one_giant_file',
help=" confidentce",
type=str,
default=None
)
parser.add_argument(
'--current_split',
help=" confidentce",
type=int,
default=0
)
parser.add_argument(
'--feat_name',
help=" the name of the feature to extract, default: gpu_0/fc7",
type=str,
default="gpu_0/fc7"
)
parser.add_argument(
'im_or_folder', help='image or folder of images', default=None
)
parser.add_argument(
'--no_id',
action='store_true'
)
parser.add_argument(
'--existing',
type=str,
default=None
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def get_detections_from_im(cfg, model, im, image_id, feat_blob_name,
MIN_BOXES, MAX_BOXES, conf_thresh=0.2, bboxes=None):
with c2_utils.NamedCudaScope(0):
scores, cls_boxes, im_scale = infer_engine.im_detect_bbox(model,
im,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
boxes=bboxes)
box_features = workspace.FetchBlob(feat_blob_name)
cls_prob = workspace.FetchBlob("gpu_0/cls_prob")
rois = workspace.FetchBlob("gpu_0/rois")
max_conf = np.zeros((rois.shape[0]))
# unscale back to raw image space
cls_boxes = rois[:, 1:5] / im_scale
for cls_ind in range(1, cls_prob.shape[1]):
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
keep = np.array(nms(dets, cfg.TEST.NMS))
max_conf[keep] = np.where(cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep])
keep_boxes = np.where(max_conf >= conf_thresh)[0]
if len(keep_boxes) < MIN_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MIN_BOXES]
elif len(keep_boxes) > MAX_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MAX_BOXES]
objects = np.argmax(cls_prob[keep_boxes], axis=1)
return box_features[keep_boxes], max_conf[keep_boxes], cls_boxes[keep_boxes]
#return {
# "image_id": image_id,
# "image_h": np.size(im, 0),
# "image_w": np.size(im, 1),
# 'num_boxes': len(keep_boxes),
# 'boxes': base64.b64encode(cls_boxes[keep_boxes]),
# 'features': base64.b64encode(box_features[keep_boxes]),
# 'object': base64.b64encode(objects)
#}
def extract_bboxes(bottom_up_csv_file):
image_bboxes = {}
with open(bottom_up_csv_file, "r") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t',
fieldnames=BOTTOM_UP_FIELDNAMES)
for item in reader:
item['num_boxes'] = int(item['num_boxes'])
image_id = int(item['image_id'])
image_w = float(item['image_w'])
image_h = float(item['image_h'])
bbox = np.frombuffer(
base64.b64decode(item['boxes']),
dtype=np.float32).reshape((item['num_boxes'], -1))
image_bboxes[image_id] = bbox
return image_bboxes
import os
def recurse_find_image(folder, image_list, image_ext):
files = os.listdir(folder)
files.sort()
for i in files:
path = os.path.join(folder, i)
if os.path.isdir(path):
recurse_find_image(path, image_list, image_ext)
else:
if path.endswith(image_ext):
image_list.append(path)
def main(args):
logger = logging.getLogger(__name__)
merge_cfg_from_file(args.cfg)
cfg.NUM_GPUS = 1
args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
assert_and_infer_cfg(cache_urls=False)
model = model_engine.initialize_model_from_cfg(args.weights)
start = timeit.default_timer()
im_list = []
recurse_find_image(args.im_or_folder, im_list, args.image_ext)
print("There are {} images to cache in total.".format(len(im_list)))
if args.total_split != 1:
im_lists = np.array_split(im_list, args.total_split)
im_list= im_lists[args.current_split]
print("Split {}: There are currently {} images to cache.".format(args.current_split ,len(im_list)))
# extract bboxes from bottom-up attention model
image_bboxes={}
if args.bbox_file is not None:
image_bboxes = extract_bboxes(args.bbox_file)
count = 0
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
one_giant_file = args.one_giant_file
if one_giant_file is not None:
giant_file = {}
if args.existing is not None:
giant_file = torch.load(args.existing)
print("Loaded {}".format(args.existing))
for i, im_name in enumerate(tqdm(im_list)):
im_base_name = os.path.basename(im_name)
if not args.no_id:
image_id = int(im_base_name.split(".")[0].split("_")[-1]) # for COCO
else:
image_id = None
bbox = None
if args.existing:
if im_base_name in giant_file:
continue
else:
print("Missing {}...".format(im_base_name))
im = cv2.imread(im_name)
if im is not None:
outfile = os.path.join(args.output_dir, im_base_name) + ".npz"
#lock_folder = outfile + '.lock'
#if not os.path.exists(lock_folder) and os.path.exists(outfile):
# print("Reading {} falied!".format(im_base_name))
# continue
#if not os.path.exists(lock_folder):
# os.makedirs(lock_folder)
box_features, max_conf, cls_boxes = get_detections_from_im(cfg, model, im,
image_id,args.feat_name,
args.min_bboxes,
args.max_bboxes,
bboxes=bbox)
if one_giant_file is not None:
box_features = torch.Tensor(box_features)
cls_boxes = torch.Tensor(cls_boxes)
max_conf = torch.Tensor(max_conf)
giant_file[im_base_name] = (box_features, cls_boxes, max_conf)
else:
np.savez(outfile, box_features=box_features, max_conf=max_conf, cls_boxes=cls_boxes)
#os.rmdir(lock_folder)
else:
print("Reading {} falied!".format(im_base_name))
if one_giant_file is not None:
print(len(giant_file))
torch.save(giant_file, one_giant_file)
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
detectron.utils.logging.setup_logging(__name__)
args = parse_args()
if args.group_id >= args.total_group:
exit("sharding group %d is greater than the total group %d" %(args.group_id, args.total_group ))
main(args)
| 11,369 | 31.485714 | 107 | py |
skccm | skccm-master/setup.py | import os
from distutils.core import setup
# Get version and release info, which is all stored in shablona/version.py
ver_file = os.path.join('skccm', 'version.py')
with open(ver_file) as f:
exec(f.read())
opts = dict(name=NAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
classifiers=CLASSIFIERS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
platforms=PLATFORMS,
version=VERSION,
packages=PACKAGES,
#package_data=PACKAGE_DATA,
requires=REQUIRES)
if __name__ == '__main__':
setup(**opts)
| 804 | 26.758621 | 74 | py |
skccm | skccm-master/docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# skccm documentation build configuration file, created by
# sphinx-quickstart on Tue Jan 24 16:48:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['numpy', 'numba','sklearn', 'scipy','pandas']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'skccm'
copyright = '2017, Nick Cortale'
author = 'Nick Cortale'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'skccmdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'skccm.tex', 'skccm Documentation',
'Nick Cortale', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'skccm', 'skccm Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'skccm', 'skccm Documentation',
author, 'skccm', 'One line description of project.',
'Miscellaneous'),
]
| 5,151 | 28.953488 | 79 | py |
skccm | skccm-master/skccm/data.py | #
# Data for analyzing causality.
# By Nick Cortale
#
# Paper:
# Detecting Causality in Complex Ecosystems
# George Sugihara et al. 2012
#
# Thanks to Kenneth Ells and Dylan McNamara
#
import numpy as np
from numpy import genfromtxt
from scipy import integrate
def coupled_logistic(rx1, rx2, b12, b21, ts_length,random_start=False):
"""Coupled logistic map.
Parameters
----------
rx1 : float
Parameter that determines chaotic behavior of the x1 series.
rx2 : float
Parameter that determines chatotic behavior of the x2 series.
b12 : float
Influence of x1 on x2.
b21 : float
Influence of x2 on x1.
ts_length : int
Length of the calculated time series.
random_start : bool
Random initialization of starting conditions.
Returns
-------
x1 : 1d array
Array of length (ts_length,) that stores the values of the x series.
x2 : 1d array
Array of length (ts_length,) that stores the values of the y series.
"""
# Initial conditions after McCracken (2014)
x1 = np.zeros(ts_length)
x2 = np.zeros(ts_length)
if random_start:
x1[0] = .15 + .1*np.random.rand()
x2[0] = .35 + .1 *np.random.rand()
else:
x1[0] = 0.2
x2[0] = 0.4
for i in range(ts_length-1):
x1[i+1] = x1[i] * (rx1 - rx1 * x1[i] - b21 * x2[i])
x2[i+1] = x2[i] * (rx2 - rx2 * x2[i] - b12 * x1[i])
return x1,x2
def driven_rand_logistic(rx2, b12, ts_length,random_start=False):
"""Logistic map with random forcing. x1 is the random array and x2 is the
logistic map.
Parameters
----------
rx2 : float
Parameter that determines chatotic behavior of the x2 series.
b12 : float
Influence of x1 on x2.
ts_length : int
Length of the calculated time series.
random_start : Boolean
Random initialization of starting conditions.
Returns
-------
x1 : array
Array of length (ts_length,)
x2 : array
Array of length (ts_length,)
"""
x1 = np.random.rand(ts_length)*.4
x2 = np.zeros(ts_length)
if random_start:
x2[0] = .35 + .1 *np.random.rand()
else:
x2[0] = 0.4
for i in range(ts_length-1):
x2[i+1] = x2[i] * (rx2 - rx2 * x2[i] - b12 * x1[i])
return x1,x2
def driving_sin(rx2, b12, ts_length, random_start=False):
"""Sine wave driving a logistic map.
Parameters
----------
rx2 : float
Parameter that determines chatotic behavior of the x2 series.
b12 : float
Influence of x1 on x2.
ts_length : int
Length of the calculated time series.
random_start : Boolean
Random initialization of starting conditions.
Returns
-------
x1 : array
Array of length (ts_length,) that stores the values of the x series.
x2 : array
Array of length (ts_length,) that stores the values of the y series.
"""
x1 = np.sin(np.linspace(0,100*np.pi,ts_length))*.4
x2 = np.zeros(ts_length)
if random_start:
x2[0] = .35 + .1 *np.random.rand()
else:
x2[0] = 0.4
for i in range(ts_length-1):
x2[i+1] = x2[i] * (rx2 - rx2 * x2[i] - b12 * x1[i])
return x1,x2
def lagged_coupled_logistic(rx1, rx2, b12, b21, ts_length, random_start=False):
"""Coupled logistic map. x1 is driven by random lags of x2.
Parameters
----------
rx1 : float
Parameter that determines chaotic behavior of the x1 series.
rx2 : float
Parameter that determines chatotic behavior of the x2 series.
b12 : float
Influence of x1 on x2.
b21 : float
Influence of x2 on x1.
ts_length : int
Length of the calculated time series.
random_start : Boolean
Random initialization of starting conditions.
Returns
-------
x1 : array
Array of length (ts_length,) that stores the values of the x series.
x2 : array
Array of length (ts_length,) that stores the values of the y series.
"""
# Initial conditions after McCracken (2014)
x1 = np.zeros(ts_length)
x2 = np.zeros(ts_length)
if random_start:
x1[0] = .15 + .1*np.random.rand()
x2[0] = .35 + .1 *np.random.rand()
else:
x1[0] = 0.2
x2[0] = 0.4
for i in range(ts_length-1):
try:
randi = np.random.randint(1,10)
x1[i+1] = x1[i] * (rx1 - rx1 * x1[i] - b21 * x2[i-randi])
except:
x1[i+1] = x1[i] * (rx1 - rx1 * x1[i] - b21 * x2[i])
x2[i+1] = x2[i] * (rx2 - rx2 * x2[i] - b12 * x1[i])
return x1,x2
def lorenz(sz=10000, noise=0, max_t=100.):
"""Integrates the lorenz equation.
Parameters
----------
sz : int
Length of the time series to be integrated.
noise : float
Amplitude of noise to be added to the lorenz equation.
max_t : float
Length of time to solve the lorenz equation over.
Returns
-------
X : 2D array
Solutions to the Lorenz equations. Columns are X,Y,Z.
"""
def lorenz_deriv(xyz, t0, sigma=10., beta=8./3, rho=28.0):
x,y,z = xyz
return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z]
x0 = [1, 1, 1] # starting vector
t = np.linspace(0, max_t, sz) # one thousand time steps
X = integrate.odeint(lorenz_deriv, x0, t) + noise*np.random.rand(sz,3)
return X
| 5,461 | 24.170507 | 79 | py |
skccm | skccm-master/skccm/skccm.py | #
# Data for analyzing causality.
# By Nick Cortale
#
# Classes:
# ccm
# embed
#
# Paper:
# Detecting Causality in Complex Ecosystems
# George Sugihara et al. 2012
#
# Thanks to Kenneth Ells and Dylan McNamara
#
# Notes:
# Originally I thought this can be made way faster by only calculting the
# distances once and then chopping it to a specific library length. It turns out
# that calculating the distances is cheaper than filtering the indices.
#
import numpy as np
from sklearn import neighbors
from sklearn import metrics
from . import utilities
import pandas as pd
import time
class CCM:
"""Convergent cross mapping for two embedded time series.
Parameters
----------
weights : str
Weighting scheme for predictions. Options:
- 'exp' : exponential weighting
score : str
How to score the predictions. Options:
- 'score'
- 'corrcoef'
verbose : bool
Prints out calculation status.
"""
def __init__(self, weights='exp', verbose=False):
self.weights = weights
self.verbose = verbose
def fit(self, X1_train, X2_train):
"""Fit the training data for ccm. Can be thought of as reconstructing the
shadow manifolds of each time series.
Amount of near neighbors is set to be embedding dimension plus one.
Creates seperate near neighbor regressors for X1 and X2 independently.
Parameters
----------
X1_train : 2d array
Embed time series of shape (num_samps,embed_dim).
X2_train : 2d array
Embed time series of shape (num_samps,embed_dim).
"""
# Save X1_train and X2_train for prediction later. Confusing,
# but we need to make predictions about our testing set using these.
self.X1_train = X1_train
self.X2_train = X2_train
#to sorround a point, there must be ndim + 1 points
near_neighs = X1_train.shape[1] + 1
self.knn1 = neighbors.KNeighborsRegressor(near_neighs)
self.knn2 = neighbors.KNeighborsRegressor(near_neighs)
def predict(self, X1_test, X2_test, lib_lengths):
"""Make a prediction.
Parameters
----------
X1_test : 2d array
Embed time series of shape (num_samps,embed_dim).
X2_test : 2d array
Embed time series of shape (num_samps,embed_dim).
lib_lengths : 1d array of ints
Library lengths to test.
Returns
-------
X1_pred : list of 2d arrays
Predictions for each library length.
X2_pred : list of 2d arrays
Predictions for each library length.
"""
#store X1_test and X2_test for use later
self.X1_test = X1_test
self.X2_test = X2_test
X1_pred = []
X2_pred = []
for liblen in lib_lengths:
x1_p = np.empty(X1_test.shape)
x2_p = np.empty(X2_test.shape)
#keep only the indices that are less than library length
self.knn1.fit(self.X1_train[:liblen], self.X1_train[:liblen])
self.knn2.fit(self.X2_train[:liblen], self.X2_train[:liblen])
dist1,ind1 = self.knn1.kneighbors(X1_test)
dist2,ind2 = self.knn2.kneighbors(X2_test)
for j in range(self.X1_train.shape[1]):
W1 = utilities.exp_weight(dist1)
W2 = utilities.exp_weight(dist2)
#flip the weights and indices
x1_p[:, j] = np.sum(self.X1_train[ind2, j] * W2, axis=1)
x2_p[:, j] = np.sum(self.X2_train[ind1, j] * W1, axis=1)
X1_pred.append(x1_p)
X2_pred.append(x2_p)
self.X1_pred = X1_pred
self.X2_pred = X2_pred
return X1_pred, X2_pred
def score(self, score_metric='corrcoef'):
"""Evalulate the predictions.
Parameters
----------
how : string
How to score the predictions. Options:
- 'score'
- 'corrcoef'
Returns
-------
score_1 : 2d array
Scores for the first time series using the weights from the second
time series.
score_2 : 2d array
Scores for the second time series using the weights from the first
time series.
"""
num_preds = self.X1_train.shape[1]
score_1 = []
score_2 = []
for x1_p, x2_p in zip(self.X1_pred, self.X2_pred):
sc1 = np.empty(num_preds)
sc2 = np.empty(num_preds)
for ii in range(num_preds):
p1 = x1_p[:,ii]
p2 = x2_p[:,ii]
if score_metric == 'score':
sc1[ii] = utilities.score(p1,self.X1_test[:,ii])
sc2[ii] = utilities.score(p2,self.X2_test[:,ii])
if score_metric == 'corrcoef':
sc1[ii] = utilities.corrcoef(p1,self.X1_test[:,ii])
sc2[ii] = utilities.corrcoef(p2,self.X2_test[:,ii])
score_1.append( np.mean(sc1) )
score_2.append( np.mean(sc2) )
return score_1, score_2
class Embed:
"""Embed a time series.
Parameters
----------
X : 1D array
Time series to be embed.
"""
def __init__(self,X):
if type(X) is pd.pandas.core.frame.DataFrame:
self.df = X
else:
self.X = X
def df_mutual_information(self, max_lag):
"""Calculates the mutual information along each column of a dataframe.
Ensure that the time series is continuous in time and sampled regularly.
You can resample it hourly, daily, minutely etc. if needed.
Parameters
----------
max_lag : int
maximum amount to shift the time series
Returns
-------
mi : dataframe
columns are the columns of the original dataframe with rows being
the mutual information. shape(max_lag,num_cols)
"""
cols = self.df.columns
mi = np.empty((max_lag, len(cols)))
for i,col in enumerate(cols):
self.X = self.df[col].values
mi[:,i] = self.mutual_information(max_lag)
mi = pd.DataFrame(mi,columns=cols)
return mi
def mutual_information(self, max_lag):
"""Calculates the mutual information between the an unshifted time
series and a shifted time series.
Utilizes scikit-learn's implementation of the mutual information found
in sklearn.metrics.
Parameters
----------
max_lag : integer
Maximum amount to shift the time series.
Returns
-------
m_score : 1-D array
Mutual information at between the unshifted time series and the
shifted time series,
"""
#number of bins - say ~ 20 pts / bin for joint distribution
#and that at least 4 bins are required
N = max(self.X.shape)
num_bins = max(4.,np.floor(np.sqrt(N/20)))
num_bins = int(num_bins)
m_score = np.zeros((max_lag))
for jj in range(max_lag):
lag = jj+1
ts = self.X[0:-lag]
ts_shift = self.X[lag::]
min_ts = np.min(self.X)
max_ts = np.max(self.X)+.0001 #needed to bin them up
bins = np.linspace(min_ts,max_ts,num_bins+1)
bin_tracker = np.zeros_like(ts)
bin_tracker_shift = np.zeros_like(ts_shift)
for ii in range(num_bins):
locs = np.logical_and( ts>=bins[ii], ts<bins[ii+1] )
bin_tracker[locs] = ii
locs_shift = np.logical_and( ts_shift>=bins[ii], ts_shift<bins[ii+1] )
bin_tracker_shift[locs_shift]=ii
m_score[jj] = metrics.mutual_info_score(bin_tracker,bin_tracker_shift)
return m_score
def embed_vectors_1d(self, lag, embed):
"""Embeds vectors from a one dimensional time series in m-dimensional
space.
Parameters
----------
X : 1d array
Training or testing set.
lag : int
Lag value as calculated from the first minimum of the mutual info.
embed : int
Embedding dimension. How many lag values to take.
predict : int
Distance to forecast (see example).
Returns
-------
features : 2d array
Contains all of the embedded vectors. Shape (num_vectors,embed).
Example
-------
>>> X = [0,1,2,3,4,5,6,7,8,9,10]
em = 3
lag = 2
predict=3
>>> embed_vectors_1d
features = [[0,2,4], [1,3,5], [2,4,6], [3,5,7]]
"""
tsize = self.X.shape[0]
t_iter = tsize-(lag*(embed-1))
features = np.zeros((t_iter,embed))
for ii in range(t_iter):
end_val = ii+lag*(embed-1)+1
part = self.X[ii : end_val]
features[ii,:] = part[::lag]
return features
| 9,086 | 25.80531 | 86 | py |
skccm | skccm-master/skccm/utilities.py | #
# Metrics for scoring predictions from CCM
#
import numpy as np
from scipy import stats as stats
def corrcoef(preds, actual):
"""Correlation Coefficient between predicted and actual values.
Parameters
----------
preds : 1d array
Predicted values.
actual : 1d array
Actual values from the testing set.
Returns
-------
cc : float
Returns the correlation coefficient between preds and actual.
"""
cc = np.corrcoef(preds,actual)[0,1]
return cc
def variance_explained(preds, actual):
"""Explained variance between predicted values and actual values.
Parameters
----------
preds : 1d array
Predicted values.
actual : 1d array
Actual values from the testing set.
Returns
-------
cc : float
Returns the explained variance between preds and actual.
"""
cc = np.var(preds - actual) / np.var(actual)
return cc
def score(preds, actual):
"""The coefficient R^2 is defined as (1 - u/v), where u is the regression
sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
sum of squares ((y_true - y_true.mean()) ** 2).sum(). Best possible
score is 1.0, lower values are worse.
Parameters
----------
preds : 1d array
Predicted values.
actual : 1d array
Actual values from the testing set.
Returns
-------
cc : float
Returns the coefficient of determiniation between preds and actual.
"""
u = np.square(actual - preds ).sum()
v = np.square(actual - actual.mean()).sum()
r2 = 1 - u/v
return r2
def feature_scale(X):
"""Scales features between 0 and 1.
Parameters
----------
X : 1d array
Time series values to be scaled.
Returns
-------
scaled : 1d array
Scaled array.
"""
top = X - np.min(X)
bot = np.max(X) - np.min(X)
scaled = top/bot
return scaled
def train_test_split(x1, x2, percent=.75):
"""Splits the embedded time series into a training set and testing set.
Parameters
----------
x1 : 2D array
Embed time series.
x2 : 2D array
Embed time series.
percent : float
Percent to use for training set.
Returns
-------
x1tr : 2D array
x1te : 2D array
x2tr : 2D array
x2te : 2D array
"""
if len(x1) != len(x2):
print("X1 and X2 are different lengths!")
split = int(len(x1)*percent)
x1tr = x1[:split]
x2tr = x2[:split]
x1te = x1[split:]
x2te = x2[split:]
return x1tr, x1te, x2tr, x2te
def exp_weight(X):
"""Calculates the weights based on the distances.
e^(-distances/min(distances,axis=1))
Parameters
----------
X : 2D array
Distances from the training set to the testing set.
Returns
-------
W : 2D array
Exponentially weighted and normalized weights.
"""
#add a small number so it stays defined
norm = X[:,[0]] +.00001
numer = np.exp(-X/norm)
denom = np.sum(numer,axis=1,keepdims=True)
W = numer/denom
return W
def in_library_len(ind, dist, lib_len):
"""Returns the filtered indices and distances that are in that specific
library length. This allows the distances to only be calculated once.
This was created in an attempt to speed up the algorithm. It turns out the
naive implementation was faster.
Parameters
----------
ind : 2d array
Indices to be filtered.
dist : 2d array
Distances to be filtered.
lib_len : int
What indices to keep.
Returns
-------
filt_ind : 2d array
Filtered indices.
filt_dist : 2d array
Filtered distances.
"""
mask = ind < lib_len
filt_ind = ind[mask].reshape(-1,lib_len)
filt_dist = dist[mask].reshape(-1,lib_len)
# this was slower :(
# r,c = np.where(ind<lib_len)
#
# r = r.reshape(-1,lib_len)[:,:keep].ravel()
# c = c.reshape(-1,lib_len)[:,:keep].ravel()
#
# filt_ind = ind[r,c].reshape(-1,keep)
# filt_dist = dist[r,c].reshape(-1,keep)
return filt_ind, filt_dist
def in_library_len_keep(ind, dist, lib_len, keep):
"""Returns the filtered indices and distances that are in that specific
library length. Only returns the top n depending on the value of keep.
This allows the distances to only be calculated once. This algorithm is
slow for large matrices. The naive implementation of this algorithm is
actually faster.
Parameters
----------
ind : 2d array
Indices to be filtered.
dist : 2d array
Distances to be filtered.
lib_len : int
What indices to keep.
keep : int
How much of the matrix to keep.
Returns
-------
filt_ind : 2d array
Filtered indices.
filt_dist : 2d array
Filtered distances.
"""
ind_store = []
dist_store = []
for i in range(len(ind)):
mask = ind[i] < lib_len
ind_store.append( ind[i][mask] )
dist_store.append( dist[i][mask] )
ind_store = [x[:keep] for x in ind_store]
dist_store = [x[:keep] for x in dist_store]
return np.vstack(ind_store), np.vstack(dist_store)
def throw_out_nn_indices(ind, dist, Xind):
"""Throw out near neighbor indices that are used to embed the time series.
This is an attempt to get around the problem of autocorrelation.
Parameters
----------
ind : 2d array
Indices to be filtered.
dist : 2d array
Distances to be filtered.
Xind : int
Indices to filter.
Returns
-------
filt_ind : 2d array
Filtered indices.
filt_dist : 2d array
Filtered distances.
"""
ind_store = []
dist_store = []
#iterate through each row
for i in range(len(Xind)):
xrow = Xind[i]
indrow = ind[i]
distrow = dist[i]
mask = np.ones(len(indrow),dtype=bool)
for val in xrow:
mask[indrow == val] = False
ind_store.append( indrow[mask] )
dist_store.append(distrow[mask])
#keep up to the shortest mask. This is so that we can vstack them
ind_len = min( [len(m) for m in ind_store] )
#make all lists the same size for concatenation
ind_store = [m[:ind_len] for m in ind_store]
dist_store = [m[:ind_len] for m in dist_store]
ind_store = np.vstack(ind_store)
dist_store = np.vstack(dist_store)
return dist_store, ind_store
def conflicting_indices(X):
"""Finds where the indices are in the rest of feature matrix. This assures
that the correct indices are dropped.
Parameters
----------
X : 2D array
The embed indices. This is the same shape as the actual embedded time
series.
Returns
-------
conf_ind : 1d array
Conflicting indices to be dropped.
"""
conf_ind = []
for i in range(len(X)):
inds = [] #where to store
#iterate through all other rows
for j in range(len(X)):
#check where they intersect
if len(set( X[i] ).intersection( X[j] ))>0:
inds.append(j)
conf_ind.append(inds)
return conf_ind
| 7,238 | 21.481366 | 78 | py |
skccm | skccm-master/skccm/version.py | from os.path import join as pjoin
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 2
_version_micro = '' # use '' for first of series, number for 1 and above
_version_extra = 'dev'
#_version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
# Description should be a one-liner:
description = "skccm: Convergent Cross Mapping with a simple api"
# Long description will go up on the pypi page
long_description = """
skccm
========
This package is an implementation of convergent cross mapping (CCM)
using scikit-learn's api. More specifically it is an implementation from the
paper, **Detecting Causality in Complex Ecosystems**. It reconstructs
phase spaces to analyze the effects of one system on the other.
"""
NAME = "skccm"
MAINTAINER = "Nick Cortale"
MAINTAINER_EMAIL = "nickcortale@gmail.com"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "https://github.com/NickC1/skccm"
DOWNLOAD_URL = "https://github.com/NickC1/skccm/tarball/0.1"
LICENSE = "MIT"
AUTHOR = "Nick Cortale"
AUTHOR_EMAIL = "nickcortale@gmail.com"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGES = ['skccm']
PACKAGE_DATA = ""
REQUIRES = ["numpy"]
| 1,871 | 30.728814 | 76 | py |
skccm | skccm-master/skccm/__init__.py | from .skccm import CCM, Embed
| 30 | 14.5 | 29 | py |
skccm | skccm-master/skccm/paper.py | #
# Data for analyzing causality.
# By Nick Cortale
#
# Classes:
# ccm
# embed
#
# Paper:
# Detecting Causality in Complex Ecosystems
# George Sugihara et al. 2012
#
# Thanks to Kenneth Ells and Dylan McNamara
#
# Notes:
# Originally I thought this can be made way faster by only calculting the
# distances once and then chopping it to a specific library length. It turns out
# that calculating the distances is cheaper than filtering the indices.
#
import numpy as np
from sklearn import neighbors
from sklearn import metrics
import skccm.utilities as ut
import pandas as pd
import time
class CCM:
"""
Convergent cross mapping for two embedded time series
"""
def __init__(self, weights='exp', score_metric='corrcoef', verbose=False):
"""
Parameters
----------
weights : weighting scheme for predictions
- exp : exponential weighting
score : how to score the predictions
-'score'
-'corrcoef'
verbose : prints out calculation status
"""
self.weights = weights
self.score_metric = score_metric
self.verbose = verbose
def fit(self,X1,X2):
"""
Fit the training data for ccm. Creates seperate near neighbor regressors
for X1 and X2 independently.
X1 : embedded time series of shape (num_samps,embed_dim)
X2 : embedded time series of shape (num_samps,embed_dim)
near_neighs : string
- 'sorround' : this is what the paper uses
- 'all' : calculate the distance to all near neighbors
"""
# Save X1_train and X2_train for prediction later. Confusing,
# but we need to make predictions about our testing set using these.
self.X1 = X1
self.X2 = X2
#to sorround a point, there must be ndim + 1 points
# we add two here because the closest neighbor is itself. so that is
# going to be dropped.
near_neighs = X1.shape[1] + 2
self.knn1 = neighbors.KNeighborsRegressor(near_neighs)
self.knn2 = neighbors.KNeighborsRegressor(near_neighs)
def predict_no_drop(self,lib_lengths):
"""
Make a prediction
Parameters
----------
X1_test : test set
X2_test : test set
lib_lengths : list of library lengths to test
"""
X1_pred = []
X2_pred = []
for liblen in lib_lengths:
x1_p = np.empty(self.X1.shape)
x2_p = np.empty(self.X2.shape)
#keep only the indices that are less than library length
self.knn1.fit(self.X1[:liblen], self.X1[:liblen])
self.knn2.fit(self.X2[:liblen], self.X2[:liblen])
dist1,ind1 = self.knn1.kneighbors(self.X1)
dist2,ind2 = self.knn2.kneighbors(self.X2)
#drop indices and distances to themselves
dist1 = dist1[:,1:]
dist2 = dist2[:,1:]
ind1 = ind1[:,1:]
ind2 = ind2[:,1:]
for j in range(self.X1.shape[1]):
W1 = ut.exp_weight(dist1)
W2 = ut.exp_weight(dist2)
#flip the weights and indices
x1_p[:, j] = np.sum(self.X1[ind2, j] * W2, axis=1)
x2_p[:, j] = np.sum(self.X2[ind1, j] * W1, axis=1)
X1_pred.append(x1_p)
X2_pred.append(x2_p)
self.X1_pred = X1_pred
self.X2_pred = X2_pred
return X1_pred, X2_pred
def predict_drop_in_list(self,lib_lengths,emb_ind1,emb_ind2):
"""
Make a prediction, but the same indices cant be matched with each other.
Parameters
----------
lib_lengths : library lengths to Test
e_ind1 : indices of the first embed time series.
e_ind2 : indices of the second embed time series.
"""
X1_pred = []
X2_pred = []
#need to reset the class ot use all neighbors so that the appropriate
# neighbors can be dropped for each class
self.knn1 = neighbors.KNeighborsRegressor(len(self.X1))
self.knn2 = neighbors.KNeighborsRegressor(len(self.X2))
self.knn1.fit(self.X1, self.X1)
self.knn2.fit(self.X2, self.X2)
dist1,ind1 = self.knn1.kneighbors(self.X1)
dist2,ind2 = self.knn2.kneighbors(self.X2)
#find the conflicting indices
conf1 = ut.conflicting_indices(emb_ind1)
conf2 = ut.conflicting_indices(emb_ind2)
#throw out the indices that are in the embedding
dist1, ind1 = ut.throw_out_nn_indices(dist1,ind1,conf1)
dist2, ind2 = ut.throw_out_nn_indices(dist2,ind2,conf2)
n_sorround = self.X1.shape[1] + 1
#flipping allows for a faster implentation as we can feed
# ut.in_libary_len smaller and smaller arrays
for liblen in lib_lengths:
#keep only the indices that are less than library length
#t0 = time.time()
i_1, d_1 = ut.in_library_len_keep(ind1, dist1, liblen,n_sorround)
i_2, d_2 = ut.in_library_len_keep(ind2, dist2, liblen,n_sorround)
#t1 = time.time()
#t0 = time.time()
W1 = ut.exp_weight(d_1)
W2 = ut.exp_weight(d_2)
x1_p = np.empty(self.X1.shape)
x2_p = np.empty(self.X2.shape)
for j in range(self.X1.shape[1]):
#flip the weights and indices
x1_p[:, j] = np.sum(self.X1[i_2, j] * W2, axis=1)
x2_p[:, j] = np.sum(self.X2[i_1, j] * W1, axis=1)
#t1 = time.time()
#print('second_loop:',np.around(t1-t0,4))
X1_pred.append(x1_p)
X2_pred.append(x2_p)
self.X1_pred = X1_pred
self.X2_pred = X2_pred
if self.verbose: print("predictions made")
return X1_pred, X2_pred
def score(self,how='corrcoef'):
"""
Evalulate the predictions. Calculates the skill down each column
and averages them together to get the total skill.
how : how to score the predictions
-'score'
-'corrcoef'
"""
num_preds = self.X1.shape[1]
score_1 = []
score_2 = []
for x1_p, x2_p in zip(self.X1_pred, self.X2_pred):
sc1 = np.empty(num_preds)
sc2 = np.empty(num_preds)
for ii in range(num_preds):
p1 = x1_p[:,ii]
p2 = x2_p[:,ii]
if self.score_metric == 'score':
sc1[ii] = ut.score(p1,self.X1[:,ii])
sc2[ii] = ut.score(p2,self.X2[:,ii])
if self.score_metric == 'corrcoef':
sc1[ii] = ut.corrcoef(p1,self.X1[:,ii])
sc2[ii] = ut.corrcoef(p2,self.X2[:,ii])
score_1.append( np.mean(sc1) )
score_2.append( np.mean(sc2) )
return score_1, score_2
class Embed:
def __init__(self,X):
"""
Parameters
----------
X : series or dataframe,
"""
if type(X) is pd.pandas.core.frame.DataFrame:
self.df = X
else:
self.X = X
def df_mutual_information(self,max_lag):
"""
Calculates the mutual information along each row of a time series.
Ensure that the time series is continuous in time and sampled regularly.
You can resample it hourly, daily, minutely etc. if needed.
Parameters
----------
max_lag : int
maximum amount to shift the time series
Returns
-------
mi : dataframe, shape(max_lag,num_cols)
columns are the columns of the original dataframe with rows being
the mutual information
"""
cols = self.df.columns
mi = np.empty((max_lag, len(cols)))
for i,col in enumerate(cols):
self.X = self.df[col].values
mi[:,i] = self.mutual_information(max_lag)
mi = pd.DataFrame(mi,columns=cols)
return mi
def mutual_information(self,max_lag):
"""
Calculates the mutual information between the an unshifted time series
and a shifted time series. Utilizes scikit-learn's implementation of
the mutual information found in sklearn.metrics.
Parameters
----------
max_lag : integer
maximum amount to shift the time series
Returns
-------
m_score : 1-D array
mutual information at between the unshifted time series and the
shifted time series
"""
#number of bins - say ~ 20 pts / bin for joint distribution
#and that at least 4 bins are required
N = max(self.X.shape)
num_bins = max(4.,np.floor(np.sqrt(N/20)))
num_bins = int(num_bins)
m_score = np.zeros((max_lag))
for jj in range(max_lag):
lag = jj+1
ts = self.X[0:-lag]
ts_shift = self.X[lag::]
min_ts = np.min(self.X)
max_ts = np.max(self.X)+.0001 #needed to bin them up
bins = np.linspace(min_ts,max_ts,num_bins+1)
bin_tracker = np.zeros_like(ts)
bin_tracker_shift = np.zeros_like(ts_shift)
for ii in range(num_bins):
locs = np.logical_and( ts>=bins[ii], ts<bins[ii+1] )
bin_tracker[locs] = ii
locs_shift = np.logical_and( ts_shift>=bins[ii], ts_shift<bins[ii+1] )
bin_tracker_shift[locs_shift]=ii
m_score[jj] = metrics.mutual_info_score(bin_tracker,bin_tracker_shift)
return m_score
def embed_indices(self,lag,embed):
"""
Gets the indices of the embedded time series. This assumes that the
time series is sequential. Non-sequential time series are currently
not supported.
Parameters
----------
lag : int
lag values as calculated from the first minimum of the mutual info.
embed : int
embedding dimension, how many lag values to take
"""
tsize = self.X.shape[0]
X = np.arange(0,tsize)
t_iter = tsize-(lag*(embed-1))
features = np.zeros((t_iter,embed))
for ii in range(t_iter):
end_val = ii+lag*(embed-1)+1
part = X[ii : end_val]
features[ii,:] = part[::lag]
return features
def embed_vectors_1d(self,lag,embed):
"""
Embeds vectors from a one dimensional time series in
m-dimensional space.
Parameters
----------
lag : int
lag values as calculated from the first minimum of the mutual info.
embed : int
embedding dimension, how many lag values to take
Returns
-------
features : array of shape [num_vectors,embed]
A 2-D array containing all of the embedded vectors
Example
-------
X = [0,1,2,3,4,5,6,7,8,9,10]
em = 3
lag = 2
predict=3
returns:
features = [[0,2,4], [1,3,5], [2,4,6], [3,5,7]]
"""
tsize = self.X.shape[0]
t_iter = tsize-(lag*(embed-1))
features = np.zeros((t_iter,embed))
for ii in range(t_iter):
end_val = ii+lag*(embed-1)+1
part = self.X[ii : end_val]
features[ii,:] = part[::lag]
return features
| 9,583 | 22.093976 | 80 | py |
skccm | skccm-master/skccm/tests/test_utilities.py | import os.path as op
import numpy as np
import numpy.testing as npt
import skccm.utilities as ut
def test_exp_weight():
#ensure it sums to one
X = np.array([ [0.1,0.2,.3,.4],
[.3,.3,.7,.7]])
W = ut.exp_weight(distances)
np.testing.assert_array_almost_equal(np.array([1.,1.]),W.sum(axis=1))
| 329 | 18.411765 | 73 | py |
skccm | skccm-master/skccm/tests/__init__.py | 0 | 0 | 0 | py | |
skccm | skccm-master/skccm/tests/test_skccm.py | import os.path as op
import numpy as np
import numpy.testing as npt
import skccm as ccm
def test_regression_dist_calc():
X = np.array([
[ 0.3, 0.6],
[ 0.2, 1.4],
[ 1.2, 0.2]])
y = X.sum(axis=1,keepdims=True)
R = edm.Regression()
R.fit(X,y)
R.dist_calc(X)
d = np.array([[ 0., 0.80622577, 0.98488578],
[ 0., 0.80622577, 1.56204994],
[ 0., 0.98488578, 1.56204994]])
i = np.array([[0, 1, 2],
[1, 0, 2],
[2, 0, 1]])
npt.assert_almost_equal(R.dist, d)
npt.assert_equal(R.ind, i)
def test_uniform_regression():
"""
Tests a full regression using a uniform weighting
"""
X = np.array([
[ 0.3, 0.6],
[ 0.2, 1.4],
[ 1.2, 0.2]])
y = X.sum(axis=1,keepdims=True)
R = edm.Regression()
R.fit(X,y)
p = R.predict(X,[1,2,3])
#p[0] should just return y. The closest neighbor is itself
npt.assert_array_almost_equal(p[0],y)
#p[1] will be an average between itself and its closest neighbor
p1_test = np.empty((3,1))
p1_test[0] = (0.9 + 1.6)/2
p1_test[1] = (1.6 + 0.9)/2
p1_test[2] = (1.4 + 0.9)/2
npt.assert_array_almost_equal(p1_test,p[1])
#p[2] should be an average of them all
p2_test = np.mean(y)
p2_test = np.array([p2_test]*3).reshape(3,1) #convert it to the same shape
npt.assert_array_almost_equal(p2_test,p[2])
def test_weighted_regression():
"""
Tests a regression by weighting the neighbors
"""
Xtr = np.array([
[ 0.3, 0.6],
[ 0.2, 1.4],
[ 1.2, 0.2]])
Xte = np.array([
[ 0.7, 1.6],
[ 1.3, 0.4],
])
ytr = np.array([[ 0.9],
[ 1.6],
[ 1.4]])
yte = np.array([[ 2.3],
[ 1.7]])
R = edm.Regression(weights='distance')
R.fit(Xtr,ytr)
p = R.predict(Xte,[1,2,3])
#calculated distances:
dist = np.array([[ 0.53851648, 1.07703296, 1.48660687],
[ 0.2236068 , 1.0198039 , 1.48660687]])
#near neighbor indices:
ind = np.array([[1, 0, 2],
[2, 0, 1]])
#p[0] should return the nearest neighbor
npt.assert_array_almost_equal(p[0], np.array([[1.6],
[1.4]]), decimal=4 )
#p[1] should return a weighted average of the two nearest
W = 1/dist[:,0:2]
W/= np.sum(W,axis=1,keepdims=True)
w_avg = np.sum(W*ytr[R.ind[:,0:2],0],axis=1).reshape(-1,1)
npt.assert_array_almost_equal(p[1], w_avg,decimal=4)
#p[2] should return a weighted average of them all
W = 1/dist
W/= np.sum(W,axis=1,keepdims=True) #normalize
w_avg = np.sum(W*ytr[R.ind,0],axis=1).reshape(-1,1)
npt.assert_array_almost_equal(p[2], w_avg,decimal=4)
def test_uniform_classification_():
Xtr = np.array([
[ 3, 6],
[ 2, 1],
[ 1, 3]])
Xte = np.array([
[ 3, 5],
[ 2, 2]])
ytr = np.array([[9],
[3],
[9]])
R = edm.Classification()
R.fit(Xtr,ytr)
p = R.predict(Xte,[1,2,3])
ind = np.array([[0, 1, 2],
[1, 0, 2]])
#p[0] should just be the nearest neighbor
yp = np.array([[9.],
[3.]])
npt.assert_equal(p[0], yp)
# p[1] should be the mode of the first two, but how its set up it will
# always take the first one if there is no clear mean
yp = np.array([[9.],
[3.]])
npt.assert_equal(p[1], yp)
# p[2] should be the mode of all three,
yp = np.array([[9.],
[9.]])
npt.assert_equal(p[2], yp)
def test_weighted_classification():
Xtr = np.array([
[ 3, 5],
[ 2, 1],
[ 1, 3]])
Xte = np.array([
[ 3, 5],
[ 2, 2],
])
ytr = np.array([[9],
[3],
[9]])
R = edm.Classification(weights='distance')
R.fit(Xtr,ytr)
p = R.predict(Xte,[1,2,3])
dist = np.array([[ 0. , 1. , 1. ],
[ 0.5, 1. , 1. ]])
W = 1/(dist+.00001) #make sure to not divide by zero
#p[0] should just be the nearest neighbor
yp = np.array([[9.],
[3.]])
npt.assert_equal(p[0], yp)
# p[1] should be the mode of the first two, since the first one is
# closer, it will be chosen.
yp = np.array([[9.],
[3.]])
npt.assert_equal(p[1], yp)
# p[2] should be the weighted mode of all three. Due to rounding, W[1,1]
# and W[1,2] is actually slightly larger than W[1,0]
yp = np.array([[9.],
[9.]])
npt.assert_equal(p[2], yp)
| 4,742 | 24.918033 | 78 | py |
EcoSVM | EcoSVM-master/SIcode/python_scripts/SVDD/linear_EcoSVDD_numerics.py | #Owen Howell, July 15, 2019
#olh20@bu.edu, https://owenhowell20.github.io
#Optimized linear EcoSVDD code
#In case of norm 1 kernel K( x, x) = 1 reduces to FISVDD algorithm: https://arxiv.org/abs/1709.00139
#In paper we focus on kernel functions K(x,x) = 1, however this code works for any kernel function K(x,y)
#This code runs EcoSVDD algoritm and compares with batch SVDD
#Compares accuracy vs time
#Import standard python packages
import numpy as np
import matplotlib.pyplot as plt
import sys
#QP is done with CVXOPT packages
from cvxopt import matrix, solvers
import numpy as np
solvers.options['show_progress'] = False
#A global error threshold, any small number can be used
thresh = 1e-3
#Generate data from a gaussian distribution
#Returns dataset
def generate_data(N, dimension):
#The mean of the distribtuion
mean = np.random.uniform( 0.5, 0.5, dimension)
#The covarience matrix of the distribution
cov = np.zeros([dimension,dimension])
for i in range(dimension):
cov[i,i] = .01
xvals = np.random.multivariate_normal(mean,cov,N)
#xvals = np.random.uniform(0.2,0.6,[N,dimension])
return xvals
#The kernel function
def kernel(x,y):
return np.dot(x,np.transpose(y)) / np.sqrt( np.dot(x,np.transpose(x)) * np.dot(y,np.transpose(y)) )
#Intilize the EcoSVDD, compute support vectors for first N_start points
#Inputs are the datapoints
#Returns the set of active datapoints, support vector values
def EcoSVDD_initialize(xvals):
N_start = len(xvals[:,0])
#Function to generate the intial kernel matrix
def intial_kernel_matrix():
#Compute the intial kernel matrix
Qmat = np.zeros([N_start,N_start])
for i in range(N_start):
for j in range(N_start):
s = kernel(xvals[i,:],xvals[j,:])
Qmat[i,j] = 2.0*s
return Qmat
Qmat = intial_kernel_matrix()
#Convert to CVXOPT format
Q = matrix(Qmat)
p = np.zeros(N_start)
for i in range(N_start):
p[i] = - kernel(xvals[i,:],xvals[i,:])
p = matrix(p)
G = np.zeros([N_start,N_start])
for i in range(N_start):
G[i,i] = -1
G = matrix(G)
h = np.zeros([N_start])
h = matrix(h)
A = np.zeros(N_start)
for i in range(N_start):
A[i] = 1
A = matrix(A,(1,N_start),'d')
b = matrix(1.0)
sol = solvers.qp(Q, p, G, h, A, b)
#the intial values of solution
#KKT values a_{i}
KKT = np.array( sol['x'] )
#only care about non-zero values
for i in range(N_start):
if (KKT[i] < thresh) :
KKT[i] = 0
#Only need to keep non-zero KKT values, also know as support vectors
#Find intial support vector values and support vector indices
support_vects_inds = np.array( np.ndarray.nonzero(KKT)[0] )
support_vects = KKT[support_vects_inds]
#the set of active datapoints
active_data_x = intial_xvals[support_vects_inds,:]
#Check that there is at least one active support vector
if ( len(support_vects_inds) == 0 ):
print("Not enough intial points, no active support vector found. Make sure adaquate kernel function is used")
quit()
return active_data_x, support_vects
#Run the EcoSVDD algorithm on a single new point
#Inputs are datapoint X, active datapoints, set of support vectors and the dataset dimension
#Returns the new set of data points and the new set of support vectors
def point_Run_EcoSVDD( X, active_data_x , support_vects , dimension ):
numsupportvects = len(active_data_x[:,0])
#sum varible
s = 0
for i in range(numsupportvects):
Qval = kernel( X , active_data_x[i,:] ) - kernel( active_data_x[0,:] , active_data_x[i,:] )
s = s + Qval*support_vects[i]
#Compute the invasion condition
inv = kernel( X, X ) - kernel( active_data_x[0,:] , active_data_x[0,:] ) - s
if (inv>=0):
#The new species can invade. Recompute the steady state using QP
Qp = np.zeros([numsupportvects+1,numsupportvects+1])
for i in range(numsupportvects):
for j in range(numsupportvects):
s = kernel(active_data_x[i,:],active_data_x[j,:])
Qp[i,j] = 2*s
for i in range(numsupportvects):
s = kernel(active_data_x[i,:], X)
Qp[i,numsupportvects] = 2*s
Qp[numsupportvects,i] = 2*s
s = kernel(X,X)
Qp[numsupportvects,numsupportvects] = 2*s
Qp = matrix(Qp)
p = np.zeros(numsupportvects+1)
for i in range(numsupportvects):
p[i] = - kernel(active_data_x[i,:],active_data_x[i,:])
p[numsupportvects] = - kernel( X, X )
p = matrix(p)
G = np.zeros([numsupportvects+1,numsupportvects+1])
for i in range(numsupportvects+1):
G[i,i] = -1
G = matrix(G)
h = np.zeros([numsupportvects+1])
h = matrix(h)
A = np.zeros(numsupportvects+1)
for i in range(numsupportvects):
A[i] = 1
A[numsupportvects] = 1
A = matrix(A,(1,numsupportvects+1),'d')
b = matrix(1.0)
#Call QP function
sol = solvers.qp(Qp, p, G, h, A, b)
#QP solution as array, all KKT values
KKT = np.array( sol['x'] )
#Get the new support vector indices and values
#only care about non-zero support vectors
countnew = 0
for i in range(len(KKT)):
if (KKT[i] < thresh):
KKT[i] = 0
countnew = countnew + 1
countnew = len(KKT) - countnew
#the set of new support vectors and support vector indices
new_active_data_x = np.zeros([countnew, dimension])
newsuppvects = np.zeros([countnew])
auxcount = 0
auxcount2 = 0
for i in range(len(KKT)-1):
if (KKT[i] > thresh):
new_active_data_x[auxcount,:] = active_data_x[auxcount2,:]
newsuppvects[auxcount] = KKT[i]
auxcount = auxcount + 1
auxcount2 = auxcount2 + 1
if (KKT[i]<thresh):
auxcount2 = auxcount2 + 1
if (KKT[len(KKT)-1]>thresh):
new_active_data_x[auxcount,:] = X
newsuppvects[auxcount] = KKT[len(KKT)-1]
auxcount = auxcount + 1
#New support vector values and indices
support_vects = newsuppvects
#zero array because it can change shape
active_data_x = np.zeros( [len(support_vects) , dimension ] )
active_data_x = new_active_data_x
return active_data_x, support_vects
#Run the EcoSVM algorithm
#Inputs are datapoints and intial support vector values
#Returns the set of active datapoints, the support vector values
def Run_EcoSVDD( xvals, active_data_x, support_vects ):
radii = np.zeros([ N - N_start ])
overlap = np.zeros([ N - N_start ])
#the dataset dimension
dimension = len(xvals[0,:])
#Run the EcoSVM algorithm over all points
for point in range(N_start,N):
radii[ point - N_start ] = get_radius(active_data_x, support_vects)
overlap[ point - N_start] = sim_metric( active_data_x, support_vects, batch_data_x, batch_support_vects )
X = xvals[point,:]
#Run the EcoSVM algorithm on a single point
active_data_x, support_vects = point_Run_EcoSVDD( X , active_data_x , support_vects, dimension )
return active_data_x, support_vects , radii, overlap
#Run a batch SVDD on all data
#Input is all training data
#Output is the set of active datapoints and support vector values
def batchSVDD( xvals ):
#the number of datapoints
N = len(xvals[:,0])
#the full kernel matrix for batch SVM
Qfull = np.zeros([N,N])
for i in range(N):
for j in range(N):
#using a linear kernel
s = kernel(xvals[i],xvals[j])
Qfull[i,j] = 2*s
#The full batch SVM solution with QP
#Convert into CVXOPT format
Qf = matrix(Qfull)
pf = np.zeros(N)
for i in range(N):
pf[i] = -kernel(xvals[i,:],xvals[i,:])
pf = matrix(pf)
Gf = np.zeros([N,N])
for i in range(N):
Gf[i,i] = -1
Gf = matrix(Gf)
hf = np.zeros([N])
hf = matrix(hf)
Af = np.zeros(N)
for i in range(N):
Af[i] = 1
Af = matrix(Af,(1,N),'d')
bf = matrix(1.0)
sol = solvers.qp(Qf, pf, Gf, hf, Af, bf)
evars = np.array( sol['x'] )
#only care about non-zero support vectors
for i in range(N):
if (evars[i] < thresh):
evars[i] = 0
#Find support vectors and support vector indices for Batch SVM
supvectsindsfull = np.array( np.ndarray.nonzero(evars)[0] )
supvectsfull = evars[supvectsindsfull]
active_data_x = xvals[ supvectsindsfull ,:]
return active_data_x, supvectsfull
#Function to get the radius of the trained SVDD
#Inputs are active data and set of support vectors
#Returns the SVDD radius
def get_radius(active_data_x,support_vects):
dists = np.zeros([len(support_vects)])
for i in range(len(support_vects)):
d1 = 0
d2 = 0
for j in range(len(support_vects)):
d1 = d1 + kernel( active_data_x[i,:] , active_data_x[j,:] ) * support_vects[j]
for k in range(len(support_vects)):
d2 = d2 + kernel( active_data_x[j,:] , active_data_x[k,:] ) * support_vects[j] * support_vects[k]
dists[i] = kernel( active_data_x[i,:] , active_data_x[i,:] ) - 2*d1 + d2
R = np.sqrt( min(dists) )
return R
#A similiarity metric between two SVDDs
#Returns the normilized dot product of the two SVDD sphere centers
def sim_metric( active_data_x, support_vects, batch_data_x, batch_support_vects ):
lenth_1 = len(support_vects)
lenth_2 = len(batch_support_vects)
val = 0
for i in range(lenth_1):
for j in range(lenth_2):
val = val + support_vects[i]*batch_support_vects[j] * kernel( active_data_x[i,:] , batch_data_x[j,:] )
norm1 = 0
for i in range(lenth_1):
for j in range(lenth_1):
norm1 = norm1 + support_vects[i]*support_vects[j] * kernel( active_data_x[i,:], active_data_x[j,:] )
norm2 = 0
for i in range(lenth_2):
for j in range(lenth_2):
norm2 = norm2 + batch_support_vects[i]*batch_support_vects[j] * kernel( batch_data_x[i,:], batch_data_x[j,:] )
metric = val / np.sqrt( norm1 * norm2 )
metric = np.float(metric)
return metric
#These parameters are chosen to create a dataset
#Dimension of the dataset
dimension = 100
#Total number of training points
N = 600
#Intial number of points used to compute steady state, can be user entered
N_start = 20
#Number of realizations
N_reals = 20
#The realization accuracies
radii_data = np.zeros([N_reals,N - N_start])
overlap_data = np.zeros([N_reals,N - N_start])
avg_batch_radius = 0
#loop over realizations
for r in range(N_reals):
#get the training set
xvals = generate_data(N,dimension)
#Get the full batch solution to compare
batch_data_x, batch_support_vects = batchSVDD( xvals )
#The batch SVDD radius
batch_radius = get_radius( batch_data_x, batch_support_vects )
avg_batch_radius = avg_batch_radius + batch_radius
#the intial datapoints and labels
intial_xvals = xvals[0:N_start,:]
#Get the intial set of active datapoints, support vector values and the Lagrange multiplier
intial_active_data_x, intial_support_vects = EcoSVDD_initialize(intial_xvals)
#Run the EcoSVM algorithm on the dataset
active_data_x, support_vects, radii, overlap = Run_EcoSVDD( xvals, intial_active_data_x, intial_support_vects )
radii_data[r,:] = radii
overlap_data[r,:] = overlap
#averge batch radius
avg_batch_radius = avg_batch_radius/N_reals
#make accuracy plots vs time
import os
os.environ["PATH"] += ':/usr/local/texlive/2015/bin/x86_64-darwin'
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.tick_params(labelsize=16)
fontsize = 22
avg_radius = np.zeros([N - N_start])
avg_overlap = np.zeros([N - N_start])
for i in range(N - N_start):
avg_radius[i] = np.average( radii_data[:,i] )
avg_overlap[i] = np.average( overlap_data[:,i] )
for r in range(N_reals):
#Make plots of EcoSVM accuracy vs time
plt.plot( radii_data[r,:] , linewidth=2, color = 'k')
plt.plot( avg_radius , linewidth=6, color = 'b')
plt.axhline(y = avg_batch_radius,linestyle='--',linewidth=6)
plt.ylabel("$ R(T) $",fontsize = fontsize + 2)
plt.xlabel("$ T $" ,fontsize=fontsize + 2)
plt.grid()
plt.tick_params(labelsize=fontsize+2)
plt.tight_layout()
plt.show()
#plt.savefig("./graphs/SVDD_radii")
plt.clf()
for r in range(N_reals):
#Make plots of EcoSVM number of support vectors vs time
plt.plot(overlap_data[r,:], linewidth=2 , color = 'k')
plt.plot(avg_overlap, linewidth=6, color = 'b')
#plt.ylim(0.5,1.03)
plt.ylabel("$ S(T) $",fontsize = fontsize + 2)
plt.xlabel("$ T $" ,fontsize=fontsize + 2)
plt.tick_params(labelsize=fontsize + 2)
plt.grid()
plt.tight_layout()
plt.show()
#plt.savefig("./graphs/SVDD_sim")
plt.clf()
| 12,103 | 21.332103 | 114 | py |
EcoSVM | EcoSVM-master/SIcode/python_scripts/SVDD/linear_EcoSVDD.py | #Owen Howell, July 15, 2019
#olh20@bu.edu, https://owenhowell20.github.io
#Optimized linear EcoSVDD code
#In case of norm 1 kernel K( x, x) = 1 reduces to FISVDD algorithm: https://arxiv.org/abs/1709.00139
#In paper we focus on kernel functions K(x,x) = 1, however this code works for any kernel function K(x,y)
#This code runs EcoSVDD algoritm and compares with batch SVDD
#Import standard python packages
import numpy as np
import matplotlib.pyplot as plt
import sys
#QP is done with CVXOPT packages
from cvxopt import matrix, solvers
import numpy as np
solvers.options['show_progress'] = False
#A global error threshold, any small number can be used
thresh = 1e-3
#Generate data from a gaussian distribution
#Returns dataset
def generate_data(N, dimension):
#The mean of the distribtuion
mean = np.random.uniform( 0.5, 0.5, dimension)
#The covarience matrix of the distribution
cov = np.zeros([dimension,dimension])
for i in range(dimension):
cov[i,i] = .01
xvals = np.random.multivariate_normal(mean,cov,N)
#xvals = np.random.uniform(0.2,0.6,[N,dimension])
return xvals
#The kernel function
def kernel(x,y):
#using a gaussian kernel
sigma = 1.0
return np.exp( -1* np.dot( ( x - y ) , ( x - y ) ) /(2*sigma ) )
#Intilize the EcoSVDD, compute support vectors for first N_start points
#Inputs are the datapoints
#Returns the set of active datapoints, support vector values
def EcoSVDD_initialize(xvals):
N_start = len(xvals[:,0])
#Function to generate the intial kernel matrix
def intial_kernel_matrix():
#Compute the intial kernel matrix
Qmat = np.zeros([N_start,N_start])
for i in range(N_start):
for j in range(N_start):
s = kernel(xvals[i,:],xvals[j,:])
Qmat[i,j] = 2.0*s
return Qmat
Qmat = intial_kernel_matrix()
#Convert to CVXOPT format
Q = matrix(Qmat)
p = np.zeros(N_start)
for i in range(N_start):
p[i] = - kernel(xvals[i,:],xvals[i,:])
p = matrix(p)
G = np.zeros([N_start,N_start])
for i in range(N_start):
G[i,i] = -1
G = matrix(G)
h = np.zeros([N_start])
h = matrix(h)
A = np.zeros(N_start)
for i in range(N_start):
A[i] = 1
A = matrix(A,(1,N_start),'d')
b = matrix(1.0)
sol = solvers.qp(Q, p, G, h, A, b)
#the intial values of solution
#KKT values a_{i}
KKT = np.array( sol['x'] )
#only care about non-zero values
for i in range(N_start):
if (KKT[i] < thresh) :
KKT[i] = 0
#Only need to keep non-zero KKT values, also know as support vectors
#Find intial support vector values and support vector indices
support_vects_inds = np.array( np.ndarray.nonzero(KKT)[0] )
support_vects = KKT[support_vects_inds]
#the set of active datapoints
active_data_x = intial_xvals[support_vects_inds,:]
#Check that there is at least one active support vector
if ( len(support_vects_inds) == 0 ):
print("Not enough intial points, no active support vector found. Make sure adaquate kernel function is used")
quit()
return active_data_x, support_vects
#Run the EcoSVDD algorithm on a single new point
#Inputs are datapoint X, active datapoints, set of support vectors and the dataset dimension
#Returns the new set of data points and the new set of support vectors
def point_Run_EcoSVDD( X, active_data_x , support_vects , dimension ):
numsupportvects = len(active_data_x[:,0])
#sum varible
s = 0
for i in range(numsupportvects):
Qval = kernel( X , active_data_x[i,:] ) - kernel( active_data_x[0,:] , active_data_x[i,:] )
s = s + Qval*support_vects[i]
#Compute the invasion condition
inv = kernel( X, X ) - kernel( active_data_x[0,:] , active_data_x[0,:] ) - s
if (inv>=0):
#The new species can invade. Recompute the steady state using QP
Qp = np.zeros([numsupportvects+1,numsupportvects+1])
for i in range(numsupportvects):
for j in range(numsupportvects):
s = kernel(active_data_x[i,:],active_data_x[j,:])
Qp[i,j] = 2*s
for i in range(numsupportvects):
s = kernel(active_data_x[i,:], X)
Qp[i,numsupportvects] = 2*s
Qp[numsupportvects,i] = 2*s
s = kernel(X,X)
Qp[numsupportvects,numsupportvects] = 2*s
Qp = matrix(Qp)
p = np.zeros(numsupportvects+1)
for i in range(numsupportvects):
p[i] = - kernel(active_data_x[i,:],active_data_x[i,:])
p[numsupportvects] = - kernel( X, X )
p = matrix(p)
G = np.zeros([numsupportvects+1,numsupportvects+1])
for i in range(numsupportvects+1):
G[i,i] = -1
G = matrix(G)
h = np.zeros([numsupportvects+1])
h = matrix(h)
A = np.zeros(numsupportvects+1)
for i in range(numsupportvects):
A[i] = 1
A[numsupportvects] = 1
A = matrix(A,(1,numsupportvects+1),'d')
b = matrix(1.0)
#Call QP function
sol = solvers.qp(Qp, p, G, h, A, b)
#QP solution as array, all KKT values
KKT = np.array( sol['x'] )
#Get the new support vector indices and values
#only care about non-zero support vectors
countnew = 0
for i in range(len(KKT)):
if (KKT[i] < thresh):
KKT[i] = 0
countnew = countnew + 1
countnew = len(KKT) - countnew
#the set of new support vectors and support vector indices
new_active_data_x = np.zeros([countnew, dimension])
newsuppvects = np.zeros([countnew])
auxcount = 0
auxcount2 = 0
for i in range(len(KKT)-1):
if (KKT[i] > thresh):
new_active_data_x[auxcount,:] = active_data_x[auxcount2,:]
newsuppvects[auxcount] = KKT[i]
auxcount = auxcount + 1
auxcount2 = auxcount2 + 1
if (KKT[i]<thresh):
auxcount2 = auxcount2 + 1
if (KKT[len(KKT)-1]>thresh):
new_active_data_x[auxcount,:] = X
newsuppvects[auxcount] = KKT[len(KKT)-1]
auxcount = auxcount + 1
#New support vector values and indices
support_vects = newsuppvects
#zero array because it can change shape
active_data_x = np.zeros( [len(support_vects) , dimension ] )
active_data_x = new_active_data_x
return active_data_x, support_vects
#Run the EcoSVM algorithm
#Inputs are datapoints and intial support vector values
#Returns the set of active datapoints, the support vector values
def Run_EcoSVDD( xvals, active_data_x, support_vects ):
#the dataset dimension
dimension = len(xvals[0,:])
#Run the EcoSVM algorithm over all points
for point in range(N_start,N):
X = xvals[point,:]
#Run the EcoSVM algorithm on a single point
active_data_x, support_vects = point_Run_EcoSVDD( X , active_data_x , support_vects, dimension )
return active_data_x, support_vects
#Run a batch SVDD on all data
#Input is all training data
#Output is the set of active datapoints and support vector values
def batchSVDD( xvals ):
#the number of datapoints
N = len(xvals[:,0])
#the full kernel matrix for batch SVM
Qfull = np.zeros([N,N])
for i in range(N):
for j in range(N):
#using a linear kernel
s = kernel(xvals[i],xvals[j])
Qfull[i,j] = 2*s
#The full batch SVM solution with QP
#Convert into CVXOPT format
Qf = matrix(Qfull)
pf = np.zeros(N)
for i in range(N):
pf[i] = -kernel(xvals[i,:],xvals[i,:])
pf = matrix(pf)
Gf = np.zeros([N,N])
for i in range(N):
Gf[i,i] = -1
Gf = matrix(Gf)
hf = np.zeros([N])
hf = matrix(hf)
Af = np.zeros(N)
for i in range(N):
Af[i] = 1
Af = matrix(Af,(1,N),'d')
bf = matrix(1.0)
sol = solvers.qp(Qf, pf, Gf, hf, Af, bf)
evars = np.array( sol['x'] )
#only care about non-zero support vectors
for i in range(N):
if (evars[i] < thresh):
evars[i] = 0
#Find support vectors and support vector indices for Batch SVM
supvectsindsfull = np.array( np.ndarray.nonzero(evars)[0] )
supvectsfull = evars[supvectsindsfull]
active_data_x = xvals[ supvectsindsfull ,:]
return active_data_x, supvectsfull
#Function to get the radius of the trained SVDD
#Inputs are active data and set of support vectors
#Returns the SVDD radius
def get_radius(active_data_x,support_vects):
dists = np.zeros([len(support_vects)])
for i in range(len(support_vects)):
d1 = 0
d2 = 0
for j in range(len(support_vects)):
d1 = d1 + kernel( active_data_x[i,:] , active_data_x[j,:] ) * support_vects[j]
for k in range(len(support_vects)):
d2 = d2 + kernel( active_data_x[j,:] , active_data_x[k,:] ) * support_vects[j] * support_vects[k]
dists[i] = kernel( active_data_x[i,:] , active_data_x[i,:] ) - 2*d1 + d2
R = np.sqrt( min(dists) )
return R
#A similiarity metric between two SVDDs
#Returns the normilized dot product of the two SVDD sphere centers
def sim_metric( active_data_x, support_vects, batch_data_x, batch_support_vects ):
lenth_1 = len(support_vects)
lenth_2 = len(batch_support_vects)
val = 0
for i in range(lenth_1):
for j in range(lenth_2):
val = val + support_vects[i]*batch_support_vects[j] * kernel( active_data_x[i,:] , batch_data_x[j,:] )
norm1 = 0
for i in range(lenth_1):
for j in range(lenth_1):
norm1 = norm1 + support_vects[i]*support_vects[j] * kernel( active_data_x[i,:], active_data_x[j,:] )
norm2 = 0
for i in range(lenth_2):
for j in range(lenth_2):
norm2 = norm2 + batch_support_vects[i]*batch_support_vects[j] * kernel( batch_data_x[i,:], batch_data_x[j,:] )
metric = val / np.sqrt( norm1 * norm2 )
metric = np.float(metric)
return metric
#These parameters are chosen to create a dataset
#Dimension of the dataset
dimension = 2
#Total number of training points
N = 500
#Get train dataset, this can be also be user entered
xvals = generate_data(N,dimension)
#the number of intial points
N_start = 20
#the intial datapoints and labels
intial_xvals = xvals[0:N_start,:]
#Get the intial set of active datapoints, support vector values and the Lagrange multiplier
intial_active_data_x, intial_support_vects = EcoSVDD_initialize(intial_xvals)
#Run the EcoSVM algorithm on the dataset
active_data_x, support_vects = Run_EcoSVDD( xvals, intial_active_data_x, intial_support_vects )
#Get the full batch solution to compare
batch_data_x, batch_support_vects = batchSVDD( xvals )
radius = get_radius( active_data_x, support_vects )
batch_radius = get_radius( batch_data_x, batch_support_vects )
#the final radius
print('EcoSVDD radius:', radius)
print('Batch SVDD radius:', batch_radius)
metric = sim_metric( active_data_x, support_vects, batch_data_x, batch_support_vects )
#the simularity score
print('Eco SVDD and Batch SVDD simularity score:', metric)
#Function to make prediction plots
#See main text for detail
#only make plots in two dimensions
def make_plot():
#Only make plots for two dimensional data
if (dimension!=2):
quit()
#plot all train datapoints
for i in range(100):
plt.plot(xvals[i,0],xvals[i,1],'.',c='k',marker='X',markersize=8,markeredgecolor='black')
for i in range(len(batch_support_vects)):
plt.plot(batch_data_x[i,0],batch_data_x[i,1],'.',c='r',marker='P',markersize=22,markeredgecolor='black')
#also plot active support vectors in larger markers
for i in range(len(support_vects)):
plt.plot(active_data_x[i,0],active_data_x[i,1],'.',c='g',marker='*',markersize=22,markeredgecolor='black')
import matplotlib.lines as mlines
blue_star = mlines.Line2D([], [], color='green', marker='*', linestyle='None', markersize=10, label='EcoSVDD Support Vectors')
red_square = mlines.Line2D([], [], color='red', marker='P', linestyle='None', markersize=10, label='Batch Support Vectors')
purple_triangle = mlines.Line2D([], [], color='black', marker='X', linestyle='None', markersize=10, label='Datapoints')
plt.legend(handles=[blue_star, red_square, purple_triangle])
fontsize = 22
plt.ylim(0,1)
plt.xlim(0,1)
plt.grid()
plt.tick_params(labelsize=fontsize-5)
plt.xlabel("$X_{1}$",size=fontsize)
plt.ylabel("$X_{2}$",size = fontsize)
from pylab import rcParams
rcParams['figure.figsize'] = 500, 500
plt.tight_layout()
plt.show()
make_plot()
| 11,814 | 21.6341 | 127 | py |
EcoSVM | EcoSVM-master/SIcode/python_scripts/MNIST/EcoSVM_MNIST.py | #Owen Howell, July 20, 2019
#olh20@bu.edu, https://owenhowell20.github.io
#This code runs Eco_SVM on MNIST dataset
#Note: This code takes significant computational time (+1 days aprox) , for the plots made in paper each realization was done in parallel
#Note: The memory requirments are also large for full dataset. For running on personal compute please subsample data
#Many thanks to https://www.bu.edu/tech/support/research/ for their advice on optimization
#Import standard python packages
import numpy as np
import matplotlib.pyplot as plt
import sys
#QP is done with CVXOPT packages
from cvxopt import matrix, solvers
import numpy as np
solvers.options['show_progress'] = False
#A global error threshold, any small number
thresh = 1e-3
#Note: For each realization the C slack hyperparameter and gamma RBF hyperparameter should be tuned to minimize out of sample error
#it is approxmitly five for most realizations
C = 5.321
#using the 'auto' scikit learn SVM parameters
gamma = 1/(28*28)
#defining a RBF kernel function, tunable parameter sigma
def kernel(x,y):
return np.exp( - gamma * np.dot( ( x - y ) , np.transpose(x - y) ) )
#Intilize the EcoSVM, compute support vectors for first N_start points
#Inputs are the datapoints, data label and Slack value
#Returns the set of active datapoints, active datapoint labels, support vector values and an active index value
def EcoSVM_initialize(xvals,yvals ):
N_start = len(yvals)
#Function to generate the intial kernel matrix
def intial_kernel_matrix():
#Compute the intial kernel matrix
Qmat = np.zeros([N_start,N_start])
for i in range(N_start):
for j in range(N_start):
#using a linear kernel
s = kernel(xvals[i,:],xvals[j,:])
Qmat[i,j] = s*yvals[i]*yvals[j]
return Qmat
Qmat = intial_kernel_matrix()
#Convert to CVXOPT format
Q = matrix(Qmat)
p = - np.ones(N_start)
p = matrix(p)
G = np.zeros([2*N_start,N_start])
for i in range(N_start):
G[i,i] = -1
for i in range(N_start):
G[i+N_start,i] = +1
G = matrix(G)
h = np.zeros([2*N_start])
for i in range(N_start,2*N_start):
h[i] = C
h = matrix(h)
A = np.zeros(N_start)
for i in range(N_start):
A[i] = yvals[i]
A = matrix(A,(1,N_start),'d')
b = matrix(0.0)
sol = solvers.qp(Q, p, G, h, A, b)
#the intial values of solution
#KKT values a_{i}
KKT = np.array( sol['x'] )
#only care about non-zero values
for i in range(N_start):
if (KKT[i] < thresh) :
KKT[i] = 0.0
#Only need to keep non-zero KKT values, also know as support vectors
#Find intial support vector values and support vector indices
support_vects_inds = np.array( np.ndarray.nonzero(KKT)[0] )
support_vects = KKT[support_vects_inds]
#the set of active datapoints
active_data_x = intial_xvals[support_vects_inds,:]
active_data_y = intial_yvals[support_vects_inds]
#Check that there is at least one active support vector
num_active = 0
for i in range(len(support_vects_inds)):
if ( (support_vects[i] - C)**2 > thresh ):
num_active = num_active + 1
if ( num_active == 0 ):
print("No active support vector found. Make sure that there are both +1 and -1 examples. Increase the number of intial points. Increase the slack.")
quit()
#Find the active index
test_vals = (support_vects - C/2.0 )**2
index_val = np.argmin(test_vals)
return active_data_x, active_data_y, support_vects, index_val
#Run the EcoSVM algorithm on a single new point
#Inputs are datapoint X, datalabel Y, active datapoints, active data labels, set of support vectors Lagrange Multiplier, dataset dimension and Slack value
def point_Run_EcoSVM( X, Y , active_data_x , active_data_y , support_vects , index_val , dimension ):
numsupportvects = len(active_data_y)
#Find the active index
test_vals = (support_vects - C/2.0 )**2
index_val = np.argmin(test_vals)
s = 0
for i in range(numsupportvects):
s = s + active_data_y[i]*Y*( kernel( active_data_x[i,:], active_data_x[index_val,:] ) - kernel( active_data_x[i,:], X ) )
#Compute the invasion condition
inv = 1 - Y*active_data_y[index_val] + s
if (inv>=0):
#The new species can invade. Recompute the steady state using QP
Qp = np.zeros([numsupportvects+1,numsupportvects+1])
for i in range(numsupportvects):
for j in range(numsupportvects):
s = kernel(active_data_x[i,:],active_data_x[j,:])
Qp[i,j] = s*active_data_y[i]*active_data_y[j]
for i in range(numsupportvects):
s = kernel(active_data_x[i,:], X)
Qp[i,numsupportvects] = s*active_data_y[i]*Y
Qp[numsupportvects,i] = s*active_data_y[i]*Y
s = kernel(X,X)
Qp[numsupportvects,numsupportvects] = s*Y * Y
Qp = matrix(Qp)
p = - np.ones(numsupportvects+1)
p = matrix(p)
G = np.zeros([2*numsupportvects+2,numsupportvects+1])
for i in range(numsupportvects+1):
G[i,i] = -1
for i in range(numsupportvects+1):
G[i+numsupportvects+1,i] = +1
G = matrix(G)
h = np.zeros([2*numsupportvects+2])
for i in range(numsupportvects+1,2*numsupportvects+2):
h[i] = C
h = matrix(h)
A = np.zeros(numsupportvects+1)
for i in range(numsupportvects):
A[i] = active_data_y[i]
A[numsupportvects] = Y
A = matrix(A,(1,numsupportvects+1),'d')
b = matrix(0.0)
#Call QP function
sol = solvers.qp(Qp, p, G, h, A, b)
#QP solution as array, all KKT values
KKT = np.array( sol['x'] )
#Get the new support vector indices and values
#only care about non-zero support vectors
countnew = 0
for i in range(len(KKT)):
if (KKT[i] < thresh):
KKT[i] = 0
countnew = countnew + 1
countnew = len(KKT) - countnew
#the set of new support vectors and support vector indices
new_active_data_x = np.zeros([countnew, dimension])
new_active_data_y = np.zeros([countnew])
newsuppvects = np.zeros([countnew])
auxcount = 0
auxcount2 = 0
for i in range(len(KKT)-1):
if (KKT[i] > thresh):
new_active_data_x[auxcount,:] = active_data_x[auxcount2,:]
new_active_data_y[auxcount] = active_data_y[auxcount2]
auxcount2 = auxcount2 + 1
newsuppvects[auxcount] = KKT[i]
auxcount = auxcount + 1
if (KKT[i]<thresh):
auxcount2 = auxcount2 + 1
if (KKT[len(KKT)-1] > thresh):
new_active_data_x[auxcount,:] = X
new_active_data_y[auxcount] = Y
newsuppvects[auxcount] = KKT[len(KKT)-1]
auxcount = auxcount + 1
#New support vector values and indices
support_vects = newsuppvects
active_data_y = new_active_data_y
#zero array because it can change shape
active_data_x = np.zeros( [len(support_vects) , dimension ] )
active_data_x = new_active_data_x
return active_data_x, active_data_y, support_vects, index_val
#Run the EcoSVM algorithm
#Inputs are datapoints and labels, set of intial support vector indices, intial support vector values, intial Lagrange Multiplier and Slack Value
#Returns the set of active datapoints, the active data labels, the support vector values and the final lagrange multiplier
def Run_EcoSVM( xvals, yvals, active_data_x, active_data_y, support_vects, index_val ):
N = len(yvals)
test_accuracy = np.zeros([ N - N_start])
number_active = np.zeros([ N - N_start])
#the dataset dimension
dimension = len(xvals[0,:])
#Run the EcoSVM algorithm over all points
for point in range(N_start,N):
#compute the b value
b = b_value(active_data_x,active_data_y,support_vects)
#Compute performance errors
EcoSVMerror = SVM_error(test_xvals,test_yvals, active_data_x, active_data_y, support_vects,b)
test_accuracy[point - N_start] = 1 - EcoSVMerror
print( 1 - EcoSVMerror )
count_active = 0
for i in range(len(active_data_y)):
if ( support_vects[i] > thresh and ( support_vects[i] - C )**2 > thresh**2 ):
count_active = count_active + 1
number_active[ point - N_start ] = count_active
X = xvals[point ,:]
Y = yvals[point ]
#Run the EcoSVM algorithm on a single point
active_data_x, active_data_y, support_vects, index_val = point_Run_EcoSVM( X , Y , active_data_x, active_data_y , support_vects , index_val , dimension )
return active_data_x, active_data_y , support_vects , index_val , test_accuracy, number_active
#Run a batch SVM on all data
#input is all training data, training labels and Slack value
#output is the set of active datapoints and data labels and support vector values
def batchSVM( xvals , yvals ):
#the number of datapoints
N = len(yvals)
#the full kernel matrix for batch SVM
Qfull = np.zeros([N,N])
for i in range(N):
for j in range(N):
#using a linear kernel
s = kernel(xvals[i,:],xvals[j,:])
Qfull[i,j] = s*yvals[i]*yvals[j]
#The full batch SVM solution with QP
#Convert into CVXOPT format
Qf = matrix(Qfull)
pf = - np.ones(N)
pf = matrix(pf)
Gf = np.zeros([2*N,N])
for i in range(N):
Gf[i,i] = -1
for i in range(N):
Gf[N+i,i] = +1
Gf = matrix(Gf)
hf = np.zeros([2*N])
for i in range(N,2*N):
hf[i] = C
hf = matrix(hf)
Af = np.zeros(N)
for i in range(N):
Af[i] = yvals[i]
Af = matrix(Af,(1,N),'d')
bf = matrix(0.0)
sol = solvers.qp(Qf, pf, Gf, hf, Af, bf)
evars = np.array( sol['x'] )
#only care about non-zero support vectors
for i in range(N):
if (evars[i] < thresh):
evars[i] = 0.0
#Find support vectors and support vector indices for Batch SVM
supvectsindsfull = np.array( np.ndarray.nonzero(evars)[0] )
supvectsfull = evars[supvectsindsfull]
active_data_x = xvals[ supvectsindsfull , :]
active_data_y = yvals[supvectsindsfull ]
return active_data_x, active_data_y, supvectsfull
#Compute the B value for an SVM
#Inputs are indices and support vector values
def b_value(active_data_x, active_data_y,supportvectors ):
s = 0
bp = 0
for i in range(len(supportvectors)):
bp = bp + 1
s = s + active_data_y[i]
for j in range(len(supportvectors)):
s = s - supportvectors[j] * active_data_y[j] * kernel( active_data_x[i,:] , active_data_x[j,:] )
b = 0
if (bp!=0):
b = 1/float(bp) * s
return b
#the SVM prediction function
#Inputs are datapoint x to make prediction on, set of indices, set of support vectors and b value
#Output is the prediction value +1 or -1
def pred(x , active_data_x, active_data_y , supportvectors , b):
s = 0
for i in range(len(supportvectors)):
s = s + active_data_y[i] * supportvectors[i] * kernel(x , active_data_x[i,:] )
s = s + b
return s
#Function to compute test error
#Inputs are testing data and labels, set of support vector indices and support vector values
#Returns test error
def SVM_error( test_xvals , test_yvals , active_data_x, active_data_y , support_vects, b):
#the number of test points
N_test = len( test_yvals )
#Compute the EcoSVM error, # of missclassified points
error = 0
for i in range(N_test):
if ( test_yvals[i] != np.sign( pred( test_xvals[i] , active_data_x, active_data_y, support_vects, b ) ) ):
error = error + 1
return error/N_test
#The MNIST dataset, https://ci.nii.ac.jp/naid/10027939599/en/ for more information
from keras.datasets import mnist
#each image is 28x28 , dimension of each image
dimension = 28 * 28
#Function to get MNIST dataset
def getMNIST():
(all_xvals, all_yvals), (all_test_xvals , all_test_yvals) = mnist.load_data()
#reshape data into useable form
all_xvals = np.reshape(all_xvals,(60000,28*28))
all_test_xvals = np.reshape(all_test_xvals,(10000,28*28))
#Count the number of digits:
countA = 0
countB = 0
for i in range( len(all_yvals) ):
if ( all_yvals[i] == 1 ):
countA = countA + 1
if ( all_yvals[i] == 4 ):
countB = countB + 1
xvals = np.zeros([countA+countB, 28*28])
yvals = np.zeros([countA + countB])
count = 0
for i in range( len(all_yvals) ):
if ( all_yvals[i] == 1 ):
xvals[count,:] = all_xvals[i,:]
yvals[count] = +1
count = count + 1
if ( all_yvals[i] == 4 ):
xvals[count,:] = all_xvals[i,:]
yvals[count] = -1
count = count + 1
countA_test = 0
countB_test = 0
for i in range( len(all_test_yvals) ):
if ( all_test_yvals[i] == 1 ):
countA_test = countA_test + 1
if ( all_test_yvals[i] == 4 ):
countB_test = countB_test + 1
test_xvals = np.zeros([countA_test+countB_test, 28*28])
test_yvals = np.zeros([countA_test + countB_test])
count = 0
for i in range( len(all_test_yvals) ):
if ( all_test_yvals[i] == 1 ):
test_xvals[count,:] = all_test_xvals[i,:]
test_yvals[count] = +1
count = count + 1
if ( all_test_yvals[i] == 4 ):
test_xvals[count,:] = all_test_xvals[i,:]
test_yvals[count] = -1
count = count + 1
#Essential to rescale data as MNIST set has rank less than dimension
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(xvals)
xvals = scaler.transform( xvals )
test_xvals = scaler.transform( test_xvals )
return xvals, yvals, test_xvals, test_yvals
xvals, yvals, test_xvals, test_yvals = getMNIST()
#Shuffle the order of the training values
from sklearn.utils import shuffle
xvals , yvals = shuffle( xvals , yvals)
#the labels
yvals = np.array(yvals)
test_yvals = np.array(test_yvals)
#Total number of training points
N = len(yvals)
#Total number of test points
N_test = len(test_yvals)
#Intial number of points used to compute steady state, can be user entered
#This should be much greater than dataset dimension especily if dataset is highly non-linear
#For MNIST dataset with RBF kernel the data set is essentialy linear so there is no problem with using small number of points
N_start = 5
#the intial datapoints and labels
intial_xvals = xvals[0:N_start,:]
intial_yvals = yvals[0:N_start]
#subsample to run in reasonable time
#for results in paper please use whole dataset
Ntrun = 200
xvals = xvals[0:Ntrun,:]
yvals = yvals[0:Ntrun]
#Get the intial set of active datapoints, active datapoint labels, support vector values and the Lagrange multiplier
intial_active_data_x, intial_active_data_y, intial_support_vects , intial_index = EcoSVM_initialize(intial_xvals,intial_yvals)
#Run the EcoSVM algorithm on the dataset
active_data_x, active_data_y , support_vects, index_val, test_accuracy, number_active = Run_EcoSVM( xvals, yvals, intial_active_data_x, intial_active_data_y, intial_support_vects , intial_index)
#Get the full batch solution to compare
batch_data_x, batch_data_y, batch_support_vects = batchSVM( xvals,yvals)
batch_number_active = len(batch_data_y)
#compute the batch b value
bfull = b_value(batch_data_x,batch_data_y,batch_support_vects)
#compute the batch accuracy
batcherror = SVM_error(test_xvals,test_yvals, batch_data_x, batch_data_y, batch_support_vects,bfull)
#averge batch error, averge number of batch support vectors
batcherror = batcherror
batch_number_active = batch_number_active
#make accuracy plots vs time
import os
os.environ["PATH"] += ':/usr/local/texlive/2015/bin/x86_64-darwin'
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.tick_params(labelsize=16)
fontsize = 22
plt.plot( test_accuracy , linewidth=2, color = 'k')
plt.axhline(y = 1 - batcherror,linestyle='--',linewidth=6)
plt.ylim(0.5,1.1)
plt.ylabel("$A(T) $",fontsize = fontsize + 2)
plt.xlabel("$ T $" ,fontsize=fontsize + 2)
plt.grid()
plt.tick_params(labelsize=fontsize + 2)
plt.tight_layout()
plt.show()
#plt.savefig("./graphs/mnistacc")
plt.clf()
plt.plot( number_active , linewidth=2 , color = 'k')
plt.axhline(y = batch_number_active,linestyle='--',linewidth=6)
plt.ylabel("$N(T) $",fontsize = fontsize + 2)
plt.xlabel("$ T $" ,fontsize=fontsize + 2)
plt.tick_params(labelsize=fontsize + 2)
plt.grid()
plt.tight_layout()
plt.show()
#plt.savefig("./graphs/mnistnum")
plt.clf()
| 15,622 | 22.671212 | 195 | py |
EcoSVM | EcoSVM-master/SIcode/python_scripts/Nonlinear_SVM/nonlinear_EcoSVM_numerics.py | #Owen Howell, July 20, 2019
#olh20@bu.edu, https://owenhowell20.github.io
#Optimized nonlinear EcoSVM code
#Nothing is precomputed
#Easy online implementation
#Import standard python packages
import numpy as np
import matplotlib.pyplot as plt
import sys
#QP is done with CVXOPT packages
from cvxopt import matrix, solvers
import numpy as np
solvers.options['show_progress'] = False
#A global error threshold, any small number
thresh = 1e-3
#function to generate the training data. Any weakly nonlinearly seperable dataset can be used.
#Input is number of points, dimension and nonlinearity
#Returns dataset and dateset labels
def train_data(N, dimension, nonlinearity):
#Draw set of random points
xvals = np.random.uniform(0,1,[N,dimension])
yvals = np.zeros([N])
for i in range(N):
e = 1
for j in range(1,dimension):
e = e * np.sin(2*np.pi*xvals[i,j])
e = nonlinearity*e + 0.5
if (xvals[i,0]<e):
yvals[i] = +1
if (xvals[i,0]>=e):
yvals[i] = -1
return xvals, yvals
#function to generate the test data. Any weakly nonlinearly seperable dataset can be used.
#Input is number of points, dimension and nonlinearity
#Returns dataset and dateset labels
def test_data(N_test,dimension, nonlinearity):
#Draw set of random points
xvals = np.random.uniform(0,1,[N_test,dimension])
yvals = np.zeros([N_test])
for i in range(N_test):
e = 1
for j in range(1,dimension):
e = e * np.sin(2*np.pi*xvals[i,j])
e = nonlinearity*e + 0.5
if (xvals[i,0]<e):
yvals[i] = +1
if (xvals[i,0]>=e):
yvals[i] = -1
return xvals, yvals
#defining a linear kernel function
def kernel(x,y):
return np.dot(x,np.transpose(y))
#Intilize the EcoSVM, compute support vectors for first N_start points
#Inputs are the datapoints, data label and Slack value
#Returns the set of active datapoints, active datapoint labels, support vector values and an active index value
def EcoSVM_initialize(xvals,yvals,C):
N_start = len(yvals)
#Function to generate the intial kernel matrix
def intial_kernel_matrix():
#Compute the intial kernel matrix
Qmat = np.zeros([N_start,N_start])
for i in range(N_start):
for j in range(N_start):
#using a linear kernel
s = kernel(xvals[i,:],xvals[j,:])
Qmat[i,j] = s*yvals[i]*yvals[j]
return Qmat
Qmat = intial_kernel_matrix()
#Convert to CVXOPT format
Q = matrix(Qmat)
p = - np.ones(N_start)
p = matrix(p)
G = np.zeros([2*N_start,N_start])
for i in range(N_start):
G[i,i] = -1
for i in range(N_start):
G[i+N_start,i] = +1
G = matrix(G)
h = np.zeros([2*N_start])
for i in range(N_start,2*N_start):
h[i] = C
h = matrix(h)
A = np.zeros(N_start)
for i in range(N_start):
A[i] = yvals[i]
A = matrix(A,(1,N_start),'d')
b = matrix(0.0)
sol = solvers.qp(Q, p, G, h, A, b)
#the intial values of solution
#KKT values a_{i}
KKT = np.array( sol['x'] )
#only care about non-zero values
for i in range(N_start):
if (KKT[i] < thresh) :
KKT[i] = 0.0
#Only need to keep non-zero KKT values, also know as support vectors
#Find intial support vector values and support vector indices
support_vects_inds = np.array( np.ndarray.nonzero(KKT)[0] )
support_vects = KKT[support_vects_inds]
#the set of active datapoints
active_data_x = intial_xvals[support_vects_inds,:]
active_data_y = intial_yvals[support_vects_inds]
#Check that there is at least one active support vector
num_active = 0
for i in range(len(support_vects_inds)):
if ( (support_vects[i] - C)**2 > thresh ):
num_active = num_active + 1
if ( num_active == 0 ):
print("No active support vector found. Make sure that there are both +1 and -1 examples. Increase the number of intial points. Increase the slack.")
quit()
#Find the active index
test_vals = (support_vects - C/2.0 )**2
index_val = np.argmin(test_vals)
return active_data_x, active_data_y, support_vects, index_val
#Run the EcoSVM algorithm on a single new point
#Inputs are datapoint X, datalabel Y, active datapoints, active data labels, set of support vectors Lagrange Multiplier, dataset dimension and Slack value
def point_Run_EcoSVM( X, Y , active_data_x , active_data_y , support_vects , index_val , dimension , C ):
numsupportvects = len(active_data_y)
#Find the active index
test_vals = (support_vects - C/2.0 )**2
index_val = np.argmin(test_vals)
s = 0
for i in range(numsupportvects):
s = s + active_data_y[i]*Y*( kernel( active_data_x[i,:], active_data_x[index_val,:] ) - kernel( active_data_x[i,:], X ) )
#Compute the invasion condition
inv = 1 - Y*active_data_y[index_val] + s
if (inv>=0):
#The new species can invade. Recompute the steady state using QP
Qp = np.zeros([numsupportvects+1,numsupportvects+1])
for i in range(numsupportvects):
for j in range(numsupportvects):
s = kernel(active_data_x[i,:],active_data_x[j,:])
Qp[i,j] = s*active_data_y[i]*active_data_y[j]
for i in range(numsupportvects):
s = kernel(active_data_x[i,:], X)
Qp[i,numsupportvects] = s*active_data_y[i]*Y
Qp[numsupportvects,i] = s*active_data_y[i]*Y
s = kernel(X,X)
Qp[numsupportvects,numsupportvects] = s*Y * Y
Qp = matrix(Qp)
p = - np.ones(numsupportvects+1)
p = matrix(p)
G = np.zeros([2*numsupportvects+2,numsupportvects+1])
for i in range(numsupportvects+1):
G[i,i] = -1
for i in range(numsupportvects+1):
G[i+numsupportvects+1,i] = +1
G = matrix(G)
h = np.zeros([2*numsupportvects+2])
for i in range(numsupportvects+1,2*numsupportvects+2):
h[i] = C
h = matrix(h)
A = np.zeros(numsupportvects+1)
for i in range(numsupportvects):
A[i] = active_data_y[i]
A[numsupportvects] = Y
A = matrix(A,(1,numsupportvects+1),'d')
b = matrix(0.0)
#Call QP function
sol = solvers.qp(Qp, p, G, h, A, b)
#QP solution as array, all KKT values
KKT = np.array( sol['x'] )
#Get the new support vector indices and values
#only care about non-zero support vectors
countnew = 0
for i in range(len(KKT)):
if (KKT[i] < thresh):
KKT[i] = 0
countnew = countnew + 1
countnew = len(KKT) - countnew
#the set of new support vectors and support vector indices
new_active_data_x = np.zeros([countnew, dimension])
new_active_data_y = np.zeros([countnew])
newsuppvects = np.zeros([countnew])
auxcount = 0
auxcount2 = 0
for i in range(len(KKT)-1):
if (KKT[i] > thresh):
new_active_data_x[auxcount,:] = active_data_x[auxcount2,:]
new_active_data_y[auxcount] = active_data_y[auxcount2]
auxcount2 = auxcount2 + 1
newsuppvects[auxcount] = KKT[i]
auxcount = auxcount + 1
if (KKT[i]<thresh):
auxcount2 = auxcount2 + 1
if (KKT[len(KKT)-1] > thresh):
new_active_data_x[auxcount,:] = X
new_active_data_y[auxcount] = Y
newsuppvects[auxcount] = KKT[len(KKT)-1]
auxcount = auxcount + 1
#New support vector values and indices
support_vects = newsuppvects
active_data_y = new_active_data_y
#zero array because it can change shape
active_data_x = np.zeros( [len(support_vects) , dimension ] )
active_data_x = new_active_data_x
return active_data_x, active_data_y, support_vects, index_val
#Run the EcoSVM algorithm
#Inputs are datapoints and labels, set of intial support vector indices, intial support vector values, intial Lagrange Multiplier and Slack Value
#Returns the set of active datapoints, the active data labels, the support vector values and the final lagrange multiplier
def Run_EcoSVM( xvals, yvals, active_data_x, active_data_y, support_vects, index_val , C ):
test_accuracy = np.zeros([ N - N_start])
number_active = np.zeros([ N - N_start])
#the dataset dimension
dimension = len(xvals[0,:])
#Run the EcoSVM algorithm over all points
for point in range(N_start,N):
#compute the b value
b = b_value(active_data_x,active_data_y,support_vects,C)
#Compute performance errors
EcoSVMerror = SVM_error(test_xvals,test_yvals, active_data_x, active_data_y, support_vects,b)
test_accuracy[point - N_start] = 1 - EcoSVMerror
number_active[ point - N_start ] = len( active_data_y )
X = xvals[point,:]
Y = yvals[point]
#Run the EcoSVM algorithm on a single point
active_data_x, active_data_y, support_vects, index_val = point_Run_EcoSVM( X , Y , active_data_x, active_data_y , support_vects , index_val , dimension , C)
return active_data_x, active_data_y , support_vects , index_val , test_accuracy, number_active
#Run a batch SVM on all data
#input is all training data, training labels and Slack value
#output is the set of active datapoints and data labels and support vector values
def batchSVM( xvals , yvals , C ):
#the number of datapoints
N = len(yvals)
#the full kernel matrix for batch SVM
Qfull = np.zeros([N,N])
for i in range(N):
for j in range(N):
#using a linear kernel
s = kernel(xvals[i,:],xvals[j,:])
Qfull[i,j] = s*yvals[i]*yvals[j]
#The full batch SVM solution with QP
#Convert into CVXOPT format
Qf = matrix(Qfull)
pf = - np.ones(N)
pf = matrix(pf)
Gf = np.zeros([2*N,N])
for i in range(N):
Gf[i,i] = -1
for i in range(N):
Gf[N+i,i] = +1
Gf = matrix(Gf)
hf = np.zeros([2*N])
for i in range(N,2*N):
hf[i] = C
hf = matrix(hf)
Af = np.zeros(N)
for i in range(N):
Af[i] = yvals[i]
Af = matrix(Af,(1,N),'d')
bf = matrix(0.0)
sol = solvers.qp(Qf, pf, Gf, hf, Af, bf)
evars = np.array( sol['x'] )
#only care about non-zero support vectors
for i in range(N):
if (evars[i] < thresh):
evars[i] = 0.0
#Find support vectors and support vector indices for Batch SVM
supvectsindsfull = np.array( np.ndarray.nonzero(evars)[0] )
supvectsfull = evars[supvectsindsfull]
active_data_x = xvals[ supvectsindsfull , :]
active_data_y = yvals[supvectsindsfull ]
return active_data_x, active_data_y, supvectsfull
#Compute the B value for an SVM
#Inputs are indices and support vector values
def b_value(active_data_x, active_data_y,supportvectors,C):
s = 0
bp = 0
for i in range(len(supportvectors)):
bp = bp + 1
s = s + active_data_y[i]
for j in range(len(supportvectors)):
s = s - supportvectors[j] * active_data_y[j] * kernel( active_data_x[i,:] , active_data_x[j,:] )
b = 0
if (bp!=0):
b = 1/float(bp) * s
return b
#the SVM prediction function
#Inputs are datapoint x to make prediction on, set of indices, set of support vectors and b value
#Output is the prediction value +1 or -1
def pred(x , active_data_x, active_data_y , supportvectors , b):
s = 0
for i in range(len(supportvectors)):
s = s + active_data_y[i] * supportvectors[i] * kernel(x , active_data_x[i,:] )
s = s + b
return s
#Function to compute test error
#Inputs are testing data and labels, set of support vector indices and support vector values
#Returns test error
def SVM_error( test_xvals , test_yvals , active_data_x, active_data_y , support_vects, b):
#the number of test points
N_test = len( test_yvals )
#Compute the EcoSVM error, # of missclassified points
error = 0
for i in range(N_test):
if ( test_yvals[i] != np.sign( pred( test_xvals[i] , active_data_x, active_data_y, support_vects, b ) ) ):
error = error + 1
return error/N_test
#Slack value, this is a hyperparameter than should be tuned to minimize batch loss
#For this case just set to 10
C = 10.0
#These parameters are chosen to create a dataset
#Dimension of the dataset
dimension = 2
#nonlinearlity factor of the dataset
nonlinearity = 0.1
#Total number of training points
N = 800
#Total number of test points
N_test = 600
#Intial number of points used to compute steady state, can be user entered
N_start = 10
#Number of realizations
N_reals = 5
#The realization accuracies
test_accuracys = np.zeros([N_reals,N - N_start])
number_actives = np.zeros([N_reals,N - N_start])
batcherror, batch_number_active = 0 , 0
#loop over realizations
for r in range(N_reals):
#get the training and test set
xvals , yvals = train_data(N,dimension,nonlinearity)
test_xvals , test_yvals = test_data(N_test,dimension,nonlinearity)
#the intial datapoints and labels
intial_xvals = xvals[0:N_start,:]
intial_yvals = yvals[0:N_start]
#Get the intial set of active datapoints, active datapoint labels, support vector values and the Lagrange multiplier
intial_active_data_x, intial_active_data_y, intial_support_vects , index_val = EcoSVM_initialize(intial_xvals,intial_yvals,C)
#Run the EcoSVM algorithm on the dataset
active_data_x, active_data_y , support_vects, index_val, test_accuracy, number_active = Run_EcoSVM( xvals, yvals, intial_active_data_x, intial_active_data_y, intial_support_vects , index_val, C )
test_accuracys[r,:] = test_accuracy
number_actives[r,:] = number_active
#Get the full batch solution to compare
batch_data_x, batch_data_y, batch_support_vects = batchSVM( xvals,yvals , C)
batch_count_active = 0
batch_count_active = len(batch_data_y)
batch_number_active = batch_number_active + batch_count_active
#compute the batch b value
bfull = b_value(batch_data_x,batch_data_y,batch_support_vects,C)
#compute the batch accuracy
batcherror = batcherror + SVM_error(test_xvals,test_yvals, batch_data_x, batch_data_y, batch_support_vects,bfull)
#averge batch error, averge number of batch support vectors
batcherror = batcherror / N_reals
batch_number_active = batch_number_active / N_reals
#make accuracy plots vs time
import os
os.environ["PATH"] += ':/usr/local/texlive/2015/bin/x86_64-darwin'
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.tick_params(labelsize=16)
fontsize = 22
avg_test_accuarcy = np.zeros([N - N_start])
avg_number_active = np.zeros([N - N_start])
for i in range(N - N_start):
avg_test_accuarcy[i] = np.average( test_accuracys[:,i] )
avg_number_active[i] = np.average( number_actives[:,i] )
for r in range(N_reals):
#Make plots of EcoSVM accuracy vs time
plt.plot(test_accuracys[r,:], linewidth=2, color = 'k')
plt.plot(avg_test_accuarcy, linewidth=4, color = 'b')
plt.axhline(y = 1 - batcherror,linestyle='--',linewidth=6)
plt.ylim(0.5,1.1)
plt.ylabel("$A(T) $",fontsize = fontsize + 2)
plt.xlabel("$ T $" ,fontsize=fontsize + 2)
plt.grid()
plt.tick_params(labelsize=fontsize + 2)
plt.tight_layout()
plt.show()
plt.savefig("./graphs/nonacc")
plt.clf()
plt.close()
for r in range(N_reals):
#Make plots of EcoSVM number of support vectors vs time
plt.plot(number_actives[r,:], linewidth=2 , color = 'k')
plt.plot(avg_number_active, linewidth=4, color = 'b')
plt.ylim(0, max( np.amax(number_active) , batch_number_active ) + 10 )
plt.axhline(y = batch_number_active,linestyle='--',linewidth=6)
plt.ylabel("$N(T) $",fontsize = fontsize + 2)
plt.xlabel("$ T $" ,fontsize=fontsize + 2)
plt.tick_params(labelsize=fontsize + 2)
plt.grid()
plt.tight_layout()
plt.show()
plt.savefig("./graphs/nonnum")
plt.clf()
plt.close()
| 14,890 | 22.711783 | 197 | py |
EcoSVM | EcoSVM-master/SIcode/python_scripts/Nonlinear_SVM/nonlinear_EcoSVM.py | #Owen Howell, July 20, 2019
#olh20@bu.edu, https://owenhowell20.github.io
#Optimized nonlinear EcoSVM code
#Nothing is precomputed
#Easy online implementation
#Import standard python packages
import numpy as np
import matplotlib.pyplot as plt
import sys
#QP is done with CVXOPT packages
from cvxopt import matrix, solvers
import numpy as np
solvers.options['show_progress'] = False
#A global error threshold, any small number
thresh = 1e-3
#function to generate the training data. Any weakly nonlinearly seperable dataset can be used.
#Input is number of points, dimension and nonlinearity
#Returns dataset and dateset labels
def train_data(N, dimension, nonlinearity):
#Draw set of random points
xvals = np.random.uniform(0,1,[N,dimension])
yvals = np.zeros([N])
for i in range(N):
e = 1
for j in range(1,dimension):
e = e * np.sin(2*np.pi*xvals[i,j])
e = nonlinearity*e + 0.5
if (xvals[i,0]<e):
yvals[i] = +1
if (xvals[i,0]>=e):
yvals[i] = -1
return xvals, yvals
#function to generate the test data. Any weakly nonlinearly seperable dataset can be used.
#Input is number of points, dimension and nonlinearity
#Returns dataset and dateset labels
def test_data(N_test,dimension, nonlinearity):
#Draw set of random points
xvals = np.random.uniform(0,1,[N_test,dimension])
yvals = np.zeros([N_test])
for i in range(N_test):
e = 1
for j in range(1,dimension):
e = e * np.sin(2*np.pi*xvals[i,j])
e = nonlinearity*e + 0.5
if (xvals[i,0]<e):
yvals[i] = +1
if (xvals[i,0]>=e):
yvals[i] = -1
return xvals, yvals
#defining kernel function
def kernel(x,y):
#Linear
#np.dot(x,np.transpose(y))
#RBF
#sigma = 1.0
#np.exp( -1/(2*sigma**2) * np.dot(x - y,np.transpose(x - y)) )
return np.dot(x,np.transpose(y))
#Intilize the EcoSVM, compute support vectors for first N_start points
#Inputs are the datapoints, data label and Slack value
#Returns the set of active datapoints, active datapoint labels, support vector values and an active index value
def EcoSVM_initialize(xvals,yvals,C):
N_start = len(yvals)
#Function to generate the intial kernel matrix
def intial_kernel_matrix():
#Compute the intial kernel matrix
Qmat = np.zeros([N_start,N_start])
for i in range(N_start):
for j in range(N_start):
#using a linear kernel
s = kernel(xvals[i,:],xvals[j,:])
Qmat[i,j] = s*yvals[i]*yvals[j]
return Qmat
Qmat = intial_kernel_matrix()
#Convert to CVXOPT format
Q = matrix(Qmat)
p = - np.ones(N_start)
p = matrix(p)
G = np.zeros([2*N_start,N_start])
for i in range(N_start):
G[i,i] = -1
for i in range(N_start):
G[i+N_start,i] = +1
G = matrix(G)
h = np.zeros([2*N_start])
for i in range(N_start,2*N_start):
h[i] = C
h = matrix(h)
A = np.zeros(N_start)
for i in range(N_start):
A[i] = yvals[i]
A = matrix(A,(1,N_start),'d')
b = matrix(0.0)
sol = solvers.qp(Q, p, G, h, A, b)
#the intial values of solution
#KKT values a_{i}
KKT = np.array( sol['x'] )
#only care about non-zero values
for i in range(N_start):
if (KKT[i] < thresh) :
KKT[i] = 0.0
#Only need to keep non-zero KKT values, also know as support vectors
#Find intial support vector values and support vector indices
support_vects_inds = np.array( np.ndarray.nonzero(KKT)[0] )
support_vects = KKT[support_vects_inds]
#the set of active datapoints
active_data_x = intial_xvals[support_vects_inds,:]
active_data_y = intial_yvals[support_vects_inds]
#Check that there is at least one active support vector
num_active = 0
for i in range(len(support_vects_inds)):
if ( (support_vects[i] - C)**2 > thresh ):
num_active = num_active + 1
if ( num_active == 0 ):
print("No active support vector found. Make sure that there are both +1 and -1 examples. Increase the number of intial points. Increase the slack.")
quit()
#Find the active index
test_vals = (support_vects - C/2.0 )**2
index_val = np.argmin(test_vals)
return active_data_x, active_data_y, support_vects, index_val
#Run the EcoSVM algorithm on a single new point
#Inputs are datapoint X, datalabel Y, active datapoints, active data labels, set of support vectors Lagrange Multiplier, dataset dimension and Slack value
def point_Run_EcoSVM( X, Y , active_data_x , active_data_y , support_vects , index_val , dimension , C ):
numsupportvects = len(active_data_y)
#Find the active index
test_vals = (support_vects - C/2.0 )**2
index_val = np.argmin(test_vals)
s = 0
for i in range(numsupportvects):
s = s + active_data_y[i]*Y*( kernel( active_data_x[i,:], active_data_x[index_val,:] ) - kernel( active_data_x[i,:], X ) )
#Compute the invasion condition
inv = 1 - Y*active_data_y[index_val] + s
if (inv>=0):
#The new species can invade. Recompute the steady state using QP
Qp = np.zeros([numsupportvects+1,numsupportvects+1])
for i in range(numsupportvects):
for j in range(numsupportvects):
s = kernel(active_data_x[i,:],active_data_x[j,:])
Qp[i,j] = s*active_data_y[i]*active_data_y[j]
for i in range(numsupportvects):
s = kernel(active_data_x[i,:], X)
Qp[i,numsupportvects] = s*active_data_y[i]*Y
Qp[numsupportvects,i] = s*active_data_y[i]*Y
s = kernel(X,X)
Qp[numsupportvects,numsupportvects] = s*Y * Y
Qp = matrix(Qp)
p = - np.ones(numsupportvects+1)
p = matrix(p)
G = np.zeros([2*numsupportvects+2,numsupportvects+1])
for i in range(numsupportvects+1):
G[i,i] = -1
for i in range(numsupportvects+1):
G[i+numsupportvects+1,i] = +1
G = matrix(G)
h = np.zeros([2*numsupportvects+2])
for i in range(numsupportvects+1,2*numsupportvects+2):
h[i] = C
h = matrix(h)
A = np.zeros(numsupportvects+1)
for i in range(numsupportvects):
A[i] = active_data_y[i]
A[numsupportvects] = Y
A = matrix(A,(1,numsupportvects+1),'d')
b = matrix(0.0)
#Call QP function
sol = solvers.qp(Qp, p, G, h, A, b)
#QP solution as array, all KKT values
KKT = np.array( sol['x'] )
#Get the new support vector indices and values
#only care about non-zero support vectors
countnew = 0
for i in range(len(KKT)):
if (KKT[i] < thresh):
KKT[i] = 0
countnew = countnew + 1
countnew = len(KKT) - countnew
#the set of new support vectors and support vector indices
new_active_data_x = np.zeros([countnew, dimension])
new_active_data_y = np.zeros([countnew])
newsuppvects = np.zeros([countnew])
auxcount = 0
auxcount2 = 0
for i in range(len(KKT)-1):
if (KKT[i] > thresh):
new_active_data_x[auxcount,:] = active_data_x[auxcount2,:]
new_active_data_y[auxcount] = active_data_y[auxcount2]
auxcount2 = auxcount2 + 1
newsuppvects[auxcount] = KKT[i]
auxcount = auxcount + 1
if (KKT[i]<thresh):
auxcount2 = auxcount2 + 1
if (KKT[len(KKT)-1] > thresh):
new_active_data_x[auxcount,:] = X
new_active_data_y[auxcount] = Y
newsuppvects[auxcount] = KKT[len(KKT)-1]
auxcount = auxcount + 1
#New support vector values and indices
support_vects = newsuppvects
active_data_y = new_active_data_y
#zero array because it can change shape
active_data_x = np.zeros( [len(support_vects) , dimension ] )
active_data_x = new_active_data_x
return active_data_x, active_data_y, support_vects, index_val
#Run the EcoSVM algorithm
#Inputs are datapoints and labels, set of intial support vector indices, intial support vector values, intial Lagrange Multiplier and Slack Value
#Returns the set of active datapoints, the active data labels, the support vector values and the final lagrange multiplier
def Run_EcoSVM( xvals, yvals, active_data_x, active_data_y, support_vects, index_val , C ):
test_accuracy = np.zeros([N - N_start])
#the dataset dimension
dimension = len(xvals[0,:])
#Run the EcoSVM algorithm over all points
for point in range(N_start,N):
#compute the b value
b = b_value(active_data_x,active_data_y,support_vects,C)
#Compute performance errors
EcoSVMerror = SVM_error(test_xvals,test_yvals, active_data_x, active_data_y, support_vects,b)
test_accuracy[point - N_start] = 1 - EcoSVMerror
print(1- EcoSVMerror)
X = xvals[point,:]
Y = yvals[point]
# make_plot2(point, active_data_x, active_data_y, support_vects)
#Run the EcoSVM algorithm on a single point
active_data_x, active_data_y, support_vects , index_val = point_Run_EcoSVM( X , Y , active_data_x, active_data_y , support_vects, index_val , dimension , C)
return active_data_x, active_data_y , support_vects, test_accuracy
#Run a batch SVM on all data
#input is all training data, training labels and Slack value
#output is the set of active datapoints and data labels and support vector values
def batchSVM( xvals , yvals , C ):
#the number of datapoints
N = len(yvals)
#the full kernel matrix for batch SVM
Qfull = np.zeros([N,N])
for i in range(N):
for j in range(N):
#using a linear kernel
s = kernel(xvals[i,:],xvals[j,:])
Qfull[i,j] = s*yvals[i]*yvals[j]
#The full batch SVM solution with QP
#Convert into CVXOPT format
Qf = matrix(Qfull)
pf = - np.ones(N)
pf = matrix(pf)
Gf = np.zeros([2*N,N])
for i in range(N):
Gf[i,i] = -1
for i in range(N):
Gf[N+i,i] = +1
Gf = matrix(Gf)
hf = np.zeros([2*N])
for i in range(N,2*N):
hf[i] = C
hf = matrix(hf)
Af = np.zeros(N)
for i in range(N):
Af[i] = yvals[i]
Af = matrix(Af,(1,N),'d')
bf = matrix(0.0)
sol = solvers.qp(Qf, pf, Gf, hf, Af, bf)
evars = np.array( sol['x'] )
#only care about non-zero support vectors
for i in range(N):
if (evars[i] < thresh):
evars[i] = 0.0
#Find support vectors and support vector indices for Batch SVM
supvectsindsfull = np.array( np.ndarray.nonzero(evars)[0] )
supvectsfull = evars[supvectsindsfull]
active_data_x = xvals[ supvectsindsfull , :]
active_data_y = yvals[supvectsindsfull ]
return active_data_x, active_data_y, supvectsfull
#Compute the B value for an SVM
#Inputs are indices and support vector values
def b_value(active_data_x, active_data_y,supportvectors,C):
s = 0
bp = 0
for i in range(len(supportvectors)):
bp = bp + 1
s = s + active_data_y[i]
for j in range(len(supportvectors)):
s = s - supportvectors[j] * active_data_y[j] * kernel( active_data_x[i,:] , active_data_x[j,:] )
b = 0
if (bp!=0):
b = 1/float(bp) * s
return b
#the SVM prediction function
#Inputs are datapoint x to make prediction on, set of indices, set of support vectors and b value
#Output is the prediction value +1 or -1
def pred(x , active_data_x, active_data_y , supportvectors , b):
s = 0
for i in range(len(supportvectors)):
s = s + active_data_y[i] * supportvectors[i] * kernel(x , active_data_x[i,:] )
s = s + b
return s
#Function to compute test error
#Inputs are testing data and labels, set of support vector indices and support vector values
#Returns test error
def SVM_error( test_xvals , test_yvals , active_data_x, active_data_y , support_vects, b):
#the number of test points
N_test = len( test_yvals )
#Compute the EcoSVM error, # of missclassified points
error = 0
for i in range(N_test):
if ( test_yvals[i] != np.sign( pred( test_xvals[i] , active_data_x, active_data_y, support_vects, b ) ) ):
error = error + 1
return error/N_test
#These parameters are chosen to create a dataset
#Dimension of the dataset
dimension = 2
#nonlinearlity factor of the dataset
nonlinearity = 0.1
#Total number of training points
N = 200
#Get train and test datasets, this can be user entered
xvals, yvals = train_data(N,dimension,nonlinearity)
#Total number of test points
N_test = 150
test_xvals , test_yvals = test_data(N_test,dimension,nonlinearity)
#The batch SVM loss as a function of slack value
def batch_loss(C):
#Get the full batch solution to compare
batch_data_x, batch_data_y, batch_support_vects = batchSVM( xvals,yvals , C )
bfull = b_value(batch_data_x,batch_data_y,batch_support_vects,C)
batcherror = SVM_error(test_xvals,test_yvals, batch_data_x, batch_data_y, batch_support_vects,bfull)
return batcherror
import scipy.optimize
#Tune over slack value
C = scipy.optimize.golden(batch_loss)
print(C)
#Intial number of points used to compute steady state, can be user entered
N_start = 20
#the intial datapoints and labels
intial_xvals = xvals[0:N_start,:]
intial_yvals = yvals[0:N_start]
#Get the intial set of active datapoints, active datapoint labels, support vector values and the Lagrange multiplier
intial_active_data_x, intial_active_data_y, intial_support_vects, index_val = EcoSVM_initialize(intial_xvals,intial_yvals,C)
#Run the EcoSVM algorithm on the dataset
active_data_x, active_data_y , support_vects , test_accuracy = Run_EcoSVM( xvals, yvals, intial_active_data_x, intial_active_data_y, intial_support_vects, index_val, C )
#Get the full batch solution to compare
batch_data_x, batch_data_y, batch_support_vects = batchSVM( xvals,yvals , C )
#compute the b value
b = b_value(active_data_x,active_data_y,support_vects,C)
bfull = b_value(batch_data_x,batch_data_y,batch_support_vects,C)
#Compute performance errors
EcoSVMerror = SVM_error(xvals,yvals, active_data_x, active_data_y, support_vects,b)
print('EcoSVM train error:',EcoSVMerror)
EcoSVMerror = SVM_error(test_xvals,test_yvals, active_data_x, active_data_y, support_vects,b)
print('EcoSVM test error:',EcoSVMerror)
print('')
batcherror = SVM_error(xvals,yvals, batch_data_x, batch_data_y , batch_support_vects,bfull)
print('Batch SVM train error:', batcherror)
batcherror = SVM_error(test_xvals,test_yvals, batch_data_x, batch_data_y, batch_support_vects,bfull)
print('Batch SVM test error:', batcherror)
#Function to make prediction plots
#See main text for detail
#only make plots in two dimensions
def make_plot():
#Only make plots for two dimensional data
if (dimension!=2):
quit()
#Make prediction plots
k = 200
batch_preds = np.zeros([k,k])
EcoSVM_preds = np.zeros([k,k])
x = 0
y = 0
dl = 1/k
for i in range(k):
x = 0
for j in range(k):
batch_preds[i,j] = np.sign( pred([x,y],batch_data_x,batch_data_y, batch_support_vects,bfull) )
EcoSVM_preds[i,j] = np.sign( pred( [x,y] , active_data_x, active_data_y, support_vects,b ) )
x = x + dl
y = y + dl
diffs = 0*batch_preds
x = 0
y = 0
dl = 1/k
for i in range(k):
x = 0
for j in range(k):
if (batch_preds[i,j] == EcoSVM_preds[i,j]):
if (EcoSVM_preds[i,j]==1):
diffs[i,j] = -1
if (EcoSVM_preds[i,j]==-1):
diffs[i,j] = 1
if (batch_preds[i,j]!=EcoSVM_preds[i,j]):
diffs[i,j] = 3
x = x + dl
y = y + dl
#plot first 100 train datapoints
for i in range(100):
if (yvals[i]==1):
plt.plot(xvals[i,0],xvals[i,1],'.',c='g',marker='P',markersize=8,markeredgecolor='black')
if (yvals[i]==-1):
plt.plot(xvals[i,0],xvals[i,1],'.',c='r',marker='o',markersize=8,markeredgecolor='black')
#also plot active support vectors in larger markers
for i in range(len(support_vects)):
if ( (support_vects[i] - C)**2 > thresh ):
if (active_data_y[i]==1):
plt.plot(active_data_x[i,0],active_data_x[i,1],'.',c='g',marker='P',markersize=22,markeredgecolor='black')
if (active_data_y[i]==-1):
plt.plot(active_data_x[i,0], active_data_x[i,1],'.',c='r',marker='o',markersize=22,markeredgecolor='black')
if ( (support_vects[i] - C )**2 < thresh ):
if (active_data_y[i]==1):
plt.plot(active_data_x[i,0],active_data_x[i,1],'.',c='g',marker='P',markersize=22,markeredgecolor='black')
if (active_data_y[i]==-1):
plt.plot(active_data_x[i,0], active_data_x[i,1],'.',c='r',marker='o',markersize=22,markeredgecolor='black')
fontsize = 20
plt.ylim(0,1)
plt.xlim(0,1)
plt.grid()
plt.tick_params(labelsize=fontsize)
plt.xlabel("$X_{1}$",size=fontsize+2)
plt.ylabel("$X_{2}$",size = fontsize+2)
from pylab import rcParams
rcParams['figure.figsize'] = 500, 500
gspace = np.linspace(0,1,100)
yspace = 0.5 + nonlinearity*np.sin(2*np.pi*gspace)
plt.plot(yspace,gspace,linestyle='--',linewidth=5,color='k')
plt.imshow(diffs,origin='lower',extent=(0,1,0,1),cmap='cool')
plt.tight_layout()
plt.show()
make_plot()
| 16,212 | 21.240055 | 171 | py |
EcoSVM | EcoSVM-master/SIcode/python_scripts/Linear_SVM/linear_EcoSVM.py | #Owen Howell, July 15, 2019
#olh20@bu.edu, https://owenhowell20.github.io
#Optimized linear EcoSVM code
#Nothing is precomputed
#This code runs EcoSVM algoritm and compares with batch SVM
#Import standard python packages
import numpy as np
import matplotlib.pyplot as plt
import sys
#QP is done with CVXOPT packages
from cvxopt import matrix, solvers
import numpy as np
solvers.options['show_progress'] = False
#A global error threshold, any small number can be used
thresh = 1e-3
#Function to generate the training data. Any linearly seperable dataset can be used.
#Returns dataset and dateset labels
def train_data(N, dimension):
#Draw set of random points
xvals = np.random.uniform(0,1,[N,dimension])
yvals = np.ones([N])
for i in range(N):
#Linearly Seperable
if (xvals[i,0]>0.5):
yvals[i] = -1
return xvals, yvals
#Function to generate the test data. Any linearly seperable dataset can be used.
#Returns dataset and dateset labels
def test_data(N_test,dimension):
#Draw set of random points
xvals = np.random.uniform(0,1,[N_test,dimension])
yvals = np.ones([N_test])
for i in range(N_test):
#Linearly Seperable
if (xvals[i,0] > 0.5):
yvals[i] = -1
return xvals, yvals
#Defining a linear kernel function
def kernel(x,y):
return np.dot(x,np.transpose(y))
#Intilize the EcoSVM, compute support vectors for first N_start points
#Inputs are the datapoints, data labels
#Returns the set of active datapoints, active datapoint labels, support vector values
def EcoSVM_initialize(xvals,yvals):
N_start = len(yvals)
#Function to generate the intial kernel matrix
def intial_kernel_matrix():
#Compute the intial kernel matrix
Qmat = np.zeros([N_start,N_start])
for i in range(N_start):
for j in range(N_start):
#using a linear kernel
s = kernel(xvals[i],xvals[j])
Qmat[i,j] = s*yvals[i]*yvals[j]
return Qmat
Qmat = intial_kernel_matrix()
#Convert to CVXOPT format
Q = matrix(Qmat)
p = - np.ones(N_start)
p = matrix(p)
G = np.zeros([N_start,N_start])
for i in range(N_start):
G[i,i] = -1
G = matrix(G)
h = np.zeros([N_start])
h = matrix(h)
A = np.zeros(N_start)
for i in range(N_start):
A[i] = yvals[i]
A = matrix(A,(1,N_start),'d')
b = matrix(0.0)
sol = solvers.qp(Q, p, G, h, A, b)
#the intial values of solution
#KKT values a_{i}
KKT = np.array( sol['x'] )
#only care about non-zero values
for i in range(N_start):
if (KKT[i] < thresh) :
KKT[i] = 0
#Only need to keep non-zero KKT values, also know as support vectors
#Find intial support vector values and support vector indices
support_vects_inds = np.array( np.ndarray.nonzero(KKT)[0] )
support_vects = KKT[support_vects_inds]
#the set of active datapoints
active_data_x = intial_xvals[support_vects_inds,:]
active_data_y = intial_yvals[support_vects_inds]
#Check that there is at least one active support vector
if ( len(support_vects_inds) == 0 ):
print("Not enough intial points, no active support vector found. Make sure that there are both +1 and -1 examples.")
quit()
return active_data_x, active_data_y, support_vects
#Run the EcoSVM algorithm on a single new point
#Inputs are datapoint X, datalabel Y, active datapoints, active data labels, set of support vectors and the dataset dimension
#Returns the new set of data points and labels, the new set of support vectors
def point_Run_EcoSVM( X, Y , active_data_x , active_data_y , support_vects , dimension ):
numsupportvects = len(active_data_y)
s = 0
for i in range(numsupportvects):
Qval = Y*active_data_y[i]*( kernel( X , active_data_x[i,:] ) - kernel( active_data_x[0,:], active_data_x[i,:] ) )
s = s + Qval*support_vects[i]
#Compute the invasion condition
inv = 1 - Y * active_data_y[0] - s
if (inv>=0):
#The new species can invade. Recompute the steady state using QP
Qp = np.zeros([numsupportvects+1,numsupportvects+1])
for i in range(numsupportvects):
for j in range(numsupportvects):
s = kernel(active_data_x[i,:],active_data_x[j,:])
Qp[i,j] = s*active_data_y[i]*active_data_y[j]
for i in range(numsupportvects):
s = kernel(active_data_x[i,:], X)
Qp[i,numsupportvects] = s*active_data_y[i]*Y
Qp[numsupportvects,i] = s*active_data_y[i]*Y
s = kernel(X,X)
Qp[numsupportvects,numsupportvects] = s*Y * Y
Qp = matrix(Qp)
p = - np.ones(numsupportvects+1)
p = matrix(p)
G = np.zeros([numsupportvects+1,numsupportvects+1])
for i in range(numsupportvects+1):
G[i,i] = -1
G = matrix(G)
h = np.zeros([numsupportvects+1])
h = matrix(h)
A = np.zeros(numsupportvects+1)
for i in range(numsupportvects):
A[i] = active_data_y[i]
A[numsupportvects] = Y
A = matrix(A,(1,numsupportvects+1),'d')
b = matrix(0.0)
#Call QP function
sol = solvers.qp(Qp, p, G, h, A, b)
#QP solution as array, all KKT values
KKT = np.array( sol['x'] )
#Get the new support vector indices and values
#only care about non-zero support vectors
countnew = 0
for i in range(len(KKT)):
if (KKT[i] < thresh):
KKT[i] = 0
countnew = countnew + 1
countnew = len(KKT) - countnew
# #set up some indices to check
# indexarray = np.zeros([numsupportvects+1])
# for i in range(numsupportvects):
# indexarray[i] = support_vects_inds[i]
# indexarray[numsupportvects] = point
#the set of new support vectors and support vector indices
new_active_data_x = np.zeros([countnew, dimension])
new_active_data_y = np.zeros([countnew])
newsuppvects = np.zeros([countnew])
auxcount = 0
auxcount2 = 0
for i in range(len(KKT)-1):
if (KKT[i] > thresh):
new_active_data_x[auxcount,:] = active_data_x[auxcount2,:]
new_active_data_y[auxcount] = active_data_y[auxcount2]
newsuppvects[auxcount] = KKT[i]
auxcount = auxcount + 1
auxcount2 = auxcount2 + 1
if (KKT[i]<thresh):
auxcount2 = auxcount2 + 1
if (KKT[len(KKT)-1]>thresh):
new_active_data_x[auxcount,:] = X
new_active_data_y[auxcount] = Y
newsuppvects[auxcount] = KKT[len(KKT)-1]
auxcount = auxcount + 1
#New support vector values and indices
support_vects = newsuppvects
active_data_y = new_active_data_y
#zero array because it can change shape
active_data_x = np.zeros( [len(support_vects) , dimension ] )
active_data_x = new_active_data_x
return active_data_x, active_data_y, support_vects
#Run the EcoSVM algorithm
#Inputs are datapoints and labels, set of intial support vector indices, intial support vector values
#Returns the set of active datapoints, the active data labels, the support vector values
def Run_EcoSVM( xvals, yvals, active_data_x, active_data_y, support_vects ):
#the dataset dimension
dimension = len(xvals[0,:])
#Run the EcoSVM algorithm over all points
for point in range(N_start,N):
X = xvals[point,:]
Y = yvals[point]
#Run the EcoSVM algorithm on a single point
active_data_x, active_data_y, support_vects = point_Run_EcoSVM( X , Y , active_data_x, active_data_y , support_vects , dimension )
return active_data_x, active_data_y , support_vects
#Run a batch SVM on all data
#input is all training data and training labels
#output is the set of active datapoints and data labels and support vector values
def batchSVM( xvals , yvals ):
#the number of datapoints
N = len(yvals)
#the full kernel matrix for batch SVM
Qfull = np.zeros([N,N])
for i in range(N):
for j in range(N):
#using a linear kernel
s = kernel(xvals[i],xvals[j])
Qfull[i,j] = s*yvals[i]*yvals[j]
#The full batch SVM solution with QP
#Convert into CVXOPT format
Qf = matrix(Qfull)
pf = - np.ones(N)
pf = matrix(pf)
Gf = np.zeros([N,N])
for i in range(N):
Gf[i,i] = -1
Gf = matrix(Gf)
hf = np.zeros([N])
hf = matrix(hf)
Af = np.zeros(N)
for i in range(N):
Af[i] = yvals[i]
Af = matrix(Af,(1,N),'d')
bf = matrix(0.0)
sol = solvers.qp(Qf, pf, Gf, hf, Af, bf)
evars = np.array( sol['x'] )
#only care about non-zero support vectors
for i in range(N):
if (evars[i] < thresh):
evars[i] = 0
#Find support vectors and support vector indices for Batch SVM
supvectsindsfull = np.array( np.ndarray.nonzero(evars)[0] )
supvectsfull = evars[supvectsindsfull]
active_data_x = xvals[ supvectsindsfull ,:]
active_data_y = yvals[supvectsindsfull ]
return active_data_x, active_data_y, supvectsfull
#Compute the b value for an SVM
#Inputs are set of active datapoints and data labels and support vector values
def b_value(active_data_x,active_data_y,supportvectors):
b = 0
#Compute the b value
s = 0
for i in range(len(supportvectors)):
s = s + active_data_y[i]
for j in range(len(supportvectors)):
s = s - supportvectors[j] * active_data_y[j] * kernel( active_data_x[i,:] , active_data_x[j,:] )
size = float( len(supportvectors) )
if (size!=0):
b = 1/size * s
if (size==0):
print("ERROR")
return b
#the SVM prediction function
#Inputs are datapoint x to make prediction on, set of indices, set of support vectors and b value
#Output is the prediction value +1 or -1
def pred(x , active_data_x, active_data_y , supportvectors , b):
s = 0
for i in range(len(supportvectors)):
s = s + active_data_y[i] * supportvectors[i] * kernel(x , active_data_x[i,:] )
s = s + b
return s
#Function to compute test error
#Inputs are testing data and labels, set of support vector indices and support vector values
#Returns test error
def SVM_error( test_xvals , test_yvals , active_data_x, active_data_y , support_vects,b):
#the number of test points
N_test = len( test_yvals )
#Compute the EcoSVM error, # of missclassified points
error = 0
for i in range(N_test):
if ( test_yvals[i] != np.sign( pred( test_xvals[i] , active_data_x, active_data_y, support_vects, b ) ) ):
error = error + 1
return error/N_test
#These parameters are chosen to create a dataset
#Dimension of the dataset
dimension = 2
#Total number of training points
N = 200
#Get train and test datasets, this can be user entered
xvals, yvals = train_data(N,dimension)
#Total number of test points
N_test = 1000
test_xvals , test_yvals = test_data(N_test,dimension)
#Intial number of points used to compute steady state, can be user entered
N_start = 10
#the intial datapoints and labels
intial_xvals = xvals[0:N_start,:]
intial_yvals = yvals[0:N_start]
#Get the intial set of active datapoints, active datapoint labels, support vector values and the Lagrange multiplier
intial_active_data_x, intial_active_data_y, intial_support_vects = EcoSVM_initialize(intial_xvals,intial_yvals)
#Run the EcoSVM algorithm on the dataset
active_data_x, active_data_y , support_vects = Run_EcoSVM( xvals, yvals, intial_active_data_x, intial_active_data_y, intial_support_vects )
#Get the full batch solution to compare
batch_data_x, batch_data_y, batch_support_vects = batchSVM( xvals,yvals )
#compute the b value
bfull = b_value(batch_data_x,batch_data_y,batch_support_vects)
b = b_value(active_data_x,active_data_y,support_vects)
#Compute performance errors
EcoSVMerror = SVM_error(xvals,yvals, active_data_x, active_data_y, support_vects,b)
print('EcoSVM train error:',EcoSVMerror)
EcoSVMerror = SVM_error(test_xvals,test_yvals, active_data_x, active_data_y, support_vects,b)
print('EcoSVM test error:',EcoSVMerror)
print('')
batcherror = SVM_error(xvals,yvals, batch_data_x, batch_data_y , batch_support_vects,bfull)
print('Batch SVM train error:', batcherror)
batcherror = SVM_error(test_xvals,test_yvals, batch_data_x, batch_data_y, batch_support_vects,bfull)
print('Batch SVM test error:', batcherror)
#Function to make prediction plots
#See main text for detail
#only make plots in two dimensions
def make_plot():
#Only make plots for two dimensional data
if (dimension!=2):
quit()
#Make prediction plots
k = 200
batch_preds = np.zeros([k,k])
EcoSVM_preds = np.zeros([k,k])
x = 0
y = 0
dl = 1/k
for i in range(k):
x = 0
for j in range(k):
batch_preds[i,j] = np.sign( pred([x,y],batch_data_x,batch_data_y, batch_support_vects,bfull) )
EcoSVM_preds[i,j] = np.sign( pred( [x,y] , active_data_x, active_data_y, support_vects,b ) )
x = x + dl
y = y + dl
diffs = 0*batch_preds
x = 0
y = 0
dl = 1/k
for i in range(k):
x = 0
for j in range(k):
if (batch_preds[i,j] == EcoSVM_preds[i,j]):
if (EcoSVM_preds[i,j]==1):
diffs[i,j] = -1
if (EcoSVM_preds[i,j]==-1):
diffs[i,j] = 1
if (batch_preds[i,j]!=EcoSVM_preds[i,j]):
diffs[i,j] = 3
x = x + dl
y = y + dl
#plot all train datapoints
for i in range(N):
if (yvals[i]==1):
plt.plot(xvals[i,0],xvals[i,1],'.',c='g',marker='P',markersize=8,markeredgecolor='black')
if (yvals[i]==-1):
plt.plot(xvals[i,0],xvals[i,1],'.',c='r',marker='o',markersize=8,markeredgecolor='black')
#also plot active support vectors in larger markers
for i in range(len(support_vects)):
if (active_data_y[i]==1):
plt.plot(active_data_x[i,0],active_data_x[i,1],'.',c='g',marker='P',markersize=22,markeredgecolor='black')
if (active_data_y[i]==-1):
plt.plot(active_data_x[i,0], active_data_x[i,1],'.',c='r',marker='o',markersize=22,markeredgecolor='black')
fontsize = 20
plt.ylim(0,1)
plt.xlim(0,1)
plt.grid()
plt.tick_params(labelsize=fontsize)
plt.xlabel("$X_{1}$",size=fontsize+2)
plt.ylabel("$X_{2}$",size = fontsize+2)
from pylab import rcParams
rcParams['figure.figsize'] = 500, 500
plt.axvline(x=0.5,linestyle='--',linewidth=5,color='k')
plt.imshow(diffs,origin='lower',extent=(0,1,0,1),cmap='cool')
plt.tight_layout()
plt.show()
make_plot()
| 13,646 | 21.557025 | 140 | py |
EcoSVM | EcoSVM-master/SIcode/python_scripts/Linear_SVM/linear_EcoSVM_numerics.py | #Owen Howell, July 14, 2019
#olh20@bu.edu, https://owenhowell20.github.io
#linear EcoSVM code for numerical experements
#This code produces plots showing how EcoSVM test accuracy and number of support vectors depend on training epoch
#Import standard python packages
import numpy as np
import matplotlib.pyplot as plt
import sys
#QP is done with CVXOPT packages
from cvxopt import matrix, solvers
import numpy as np
solvers.options['show_progress'] = False
#A global error threshold, any small number can be used
thresh = 1e-3
#Function to generate the training data. Any linearly seperable dataset can be used.
#Returns dataset and dateset labels
def train_data(N, dimension):
#Draw set of random points
xvals = np.random.uniform(0,1,[N,dimension])
yvals = np.ones([N])
for i in range(N):
#Linearly Seperable
if (xvals[i,0]>0.5):
yvals[i] = -1
return xvals, yvals
#Function to generate the test data. Any linearly seperable dataset can be used.
#Returns dataset and dateset labels
def test_data(N_test, dimension):
#Draw set of random points
xvals = np.random.uniform(0,1,[N_test,dimension])
yvals = np.ones([N_test])
for i in range(N_test):
#Linearly Seperable
if (xvals[i,0]>0.5):
yvals[i] = -1
return xvals, yvals
#Defining a linear kernel function
def kernel(x,y):
return np.dot(x,np.transpose(y))
#Intilize the EcoSVM, compute support vectors for first N_start points
#Inputs are the datapoints, data labels
#Returns the set of active datapoints, active datapoint labels, support vector values
def EcoSVM_initialize(xvals,yvals):
N_start = len(yvals)
#Function to generate the intial kernel matrix
def intial_kernel_matrix():
#Compute the intial kernel matrix
Qmat = np.zeros([N_start,N_start])
for i in range(N_start):
for j in range(N_start):
#using a linear kernel
s = kernel(xvals[i],xvals[j])
Qmat[i,j] = s*yvals[i]*yvals[j]
return Qmat
Qmat = intial_kernel_matrix()
#Convert to CVXOPT format
Q = matrix(Qmat)
p = - np.ones(N_start)
p = matrix(p)
G = np.zeros([N_start,N_start])
for i in range(N_start):
G[i,i] = -1
G = matrix(G)
h = np.zeros([N_start])
h = matrix(h)
A = np.zeros(N_start)
for i in range(N_start):
A[i] = yvals[i]
A = matrix(A,(1,N_start),'d')
b = matrix(0.0)
sol = solvers.qp(Q, p, G, h, A, b)
#the intial values of solution
#KKT values a_{i}
KKT = np.array( sol['x'] )
#only care about non-zero values
for i in range(N_start):
if (KKT[i] < thresh) :
KKT[i] = 0
#Only need to keep non-zero KKT values, also know as support vectors
#Find intial support vector values and support vector indices
support_vects_inds = np.array( np.ndarray.nonzero(KKT)[0] )
support_vects = KKT[support_vects_inds]
#the set of active datapoints
active_data_x = intial_xvals[support_vects_inds,:]
active_data_y = intial_yvals[support_vects_inds]
#Check that there is at least one active support vector
if ( len(support_vects_inds) == 0 ):
print("Not enough intial points, no active support vector found. Make sure that there are both +1 and -1 examples.")
quit()
return active_data_x, active_data_y, support_vects
#Run the EcoSVM algorithm on a single new point
#Inputs are datapoint X, datalabel Y, active datapoints, active data labels, set of support vectors and the dataset dimension
#Returns the new set of data points and labels, the new set of support vectors
def point_Run_EcoSVM( X, Y , active_data_x , active_data_y , support_vects , dimension ):
numsupportvects = len(active_data_y)
s = 0
for i in range(numsupportvects):
Qval = Y*active_data_y[i]*( kernel( X , active_data_x[i,:] ) - kernel( active_data_x[0,:], active_data_x[i,:] ) )
s = s + Qval*support_vects[i]
#Compute the invasion condition
inv = 1 - Y * active_data_y[0] - s
if (inv>=0):
#The new species can invade. Recompute the steady state using QP
Qp = np.zeros([numsupportvects+1,numsupportvects+1])
for i in range(numsupportvects):
for j in range(numsupportvects):
s = kernel(active_data_x[i,:],active_data_x[j,:])
Qp[i,j] = s*active_data_y[i]*active_data_y[j]
for i in range(numsupportvects):
s = kernel(active_data_x[i,:], X)
Qp[i,numsupportvects] = s*active_data_y[i]*Y
Qp[numsupportvects,i] = s*active_data_y[i]*Y
s = kernel(X,X)
Qp[numsupportvects,numsupportvects] = s*Y * Y
Qp = matrix(Qp)
p = - np.ones(numsupportvects+1)
p = matrix(p)
G = np.zeros([numsupportvects+1,numsupportvects+1])
for i in range(numsupportvects+1):
G[i,i] = -1
G = matrix(G)
h = np.zeros([numsupportvects+1])
h = matrix(h)
A = np.zeros(numsupportvects+1)
for i in range(numsupportvects):
A[i] = active_data_y[i]
A[numsupportvects] = Y
A = matrix(A,(1,numsupportvects+1),'d')
b = matrix(0.0)
#Call QP function
sol = solvers.qp(Qp, p, G, h, A, b)
#QP solution as array, all KKT values
KKT = np.array( sol['x'] )
#Get the new support vector indices and values
#only care about non-zero support vectors
countnew = 0
for i in range(len(KKT)):
if (KKT[i] < thresh):
KKT[i] = 0
countnew = countnew + 1
countnew = len(KKT) - countnew
# #set up some indices to check
# indexarray = np.zeros([numsupportvects+1])
# for i in range(numsupportvects):
# indexarray[i] = support_vects_inds[i]
# indexarray[numsupportvects] = point
#the set of new support vectors and support vector indices
new_active_data_x = np.zeros([countnew, dimension])
new_active_data_y = np.zeros([countnew])
newsuppvects = np.zeros([countnew])
auxcount = 0
auxcount2 = 0
for i in range(len(KKT)-1):
if (KKT[i] > thresh):
new_active_data_x[auxcount,:] = active_data_x[auxcount2,:]
new_active_data_y[auxcount] = active_data_y[auxcount2]
newsuppvects[auxcount] = KKT[i]
auxcount = auxcount + 1
auxcount2 = auxcount2 + 1
if (KKT[i]<thresh):
auxcount2 = auxcount2 + 1
if (KKT[len(KKT)-1]>thresh):
new_active_data_x[auxcount,:] = X
new_active_data_y[auxcount] = Y
newsuppvects[auxcount] = KKT[len(KKT)-1]
auxcount = auxcount + 1
#New support vector values and indices
support_vects = newsuppvects
active_data_y = new_active_data_y
#zero array because it can change shape
active_data_x = np.zeros( [len(support_vects) , dimension ] )
active_data_x = new_active_data_x
return active_data_x, active_data_y, support_vects
#Run the EcoSVM algorithm
#Inputs are datapoints and labels, set of intial support vector indices, intial support vector values
#Returns the set of active datapoints, the active data labels, the support vector values the accuracy and number of support vectors over time
def Run_EcoSVM( xvals, yvals, active_data_x, active_data_y, support_vects ):
test_accuracy = np.zeros([ N - N_start])
number_active = np.zeros([ N - N_start])
#the dataset dimension
dimension = len(xvals[0,:])
#Run the EcoSVM algorithm over all points
for point in range(N_start,N):
#compute the b value
b = b_value(active_data_x,active_data_y,support_vects)
#Compute performance errors
EcoSVMerror = SVM_error(test_xvals,test_yvals, active_data_x, active_data_y, support_vects,b)
test_accuracy[point - N_start] = 1 - EcoSVMerror
number_active[ point - N_start ] = len( active_data_y )
X = xvals[point,:]
Y = yvals[point]
#Run the EcoSVM algorithm on a single point
active_data_x, active_data_y, support_vects = point_Run_EcoSVM( X , Y , active_data_x, active_data_y , support_vects , dimension )
return active_data_x, active_data_y , support_vects , test_accuracy , number_active
#Run a batch SVM on all data
#input is all training data and training labels
#output is the set of active datapoints and data labels and support vector values
def batchSVM( xvals , yvals ):
#the number of datapoints
N = len(yvals)
#the full kernel matrix for batch SVM
Qfull = np.zeros([N,N])
for i in range(N):
for j in range(N):
#using a linear kernel
s = kernel(xvals[i],xvals[j])
Qfull[i,j] = s*yvals[i]*yvals[j]
#The full batch SVM solution with QP
#Convert into CVXOPT format
Qf = matrix(Qfull)
pf = - np.ones(N)
pf = matrix(pf)
Gf = np.zeros([N,N])
for i in range(N):
Gf[i,i] = -1
Gf = matrix(Gf)
hf = np.zeros([N])
hf = matrix(hf)
Af = np.zeros(N)
for i in range(N):
Af[i] = yvals[i]
Af = matrix(Af,(1,N),'d')
bf = matrix(0.0)
sol = solvers.qp(Qf, pf, Gf, hf, Af, bf)
evars = np.array( sol['x'] )
#only care about non-zero support vectors
for i in range(N):
if (evars[i] < thresh):
evars[i] = 0
#Find support vectors and support vector indices for Batch SVM
supvectsindsfull = np.array( np.ndarray.nonzero(evars)[0] )
supvectsfull = evars[supvectsindsfull]
active_data_x = xvals[ supvectsindsfull ,:]
active_data_y = yvals[supvectsindsfull ]
return active_data_x, active_data_y, supvectsfull
#Compute the b value for an SVM
#Inputs are set of active datapoints and data labels and support vector values
def b_value(active_data_x,active_data_y,supportvectors):
b = 0
#Compute the b value
s = 0
for i in range(len(supportvectors)):
s = s + active_data_y[i]
for j in range(len(supportvectors)):
s = s - supportvectors[j] * active_data_y[j] * kernel( active_data_x[i,:] , active_data_x[j,:] )
size = float( len(supportvectors) )
if (size!=0):
b = 1/size * s
if (size==0):
print("ERROR")
return b
#the SVM prediction function
#Inputs are datapoint x to make prediction on, set of indices, set of support vectors and b value
#Output is the prediction value +1 or -1
def pred(x , active_data_x, active_data_y , supportvectors , b):
s = 0
for i in range(len(supportvectors)):
s = s + active_data_y[i] * supportvectors[i] * kernel(x , active_data_x[i,:] )
s = s + b
return s
#Function to compute test error
#Inputs are testing data and labels, set of support vector indices and support vector values
#Returns test error
def SVM_error( test_xvals , test_yvals , active_data_x, active_data_y , support_vects,b):
#the number of test points
N_test = len( test_yvals )
#Compute the EcoSVM error, # of missclassified points
error = 0
for i in range(N_test):
if ( test_yvals[i] != np.sign( pred( test_xvals[i] , active_data_x, active_data_y, support_vects, b ) ) ):
error = error + 1
return error/N_test
#These parameters are chosen to create a dataset
#Dimension of the dataset
dimension = 80
#Total number of training points
N = 1000
#Total number of test points
N_test = 700
#Intial number of points used to compute steady state, can be user entered
N_start = 30
#Number of realizations
N_reals = 25
#The realization accuracies
test_accuracys = np.zeros([N_reals,N - N_start])
number_actives = np.zeros([N_reals,N - N_start])
batcherror, batch_number_active = 0 , 0
#loop over realizations
for r in range(N_reals):
#get the training and test set
xvals , yvals = train_data(N,dimension)
test_xvals , test_yvals = test_data(N_test,dimension)
#the intial datapoints and labels
intial_xvals = xvals[0:N_start,:]
intial_yvals = yvals[0:N_start]
#Get the intial set of active datapoints, active datapoint labels, support vector values and the Lagrange multiplier
intial_active_data_x, intial_active_data_y, intial_support_vects = EcoSVM_initialize(intial_xvals,intial_yvals)
#Run the EcoSVM algorithm on the dataset
active_data_x, active_data_y , support_vects, test_accuracy, number_active = Run_EcoSVM( xvals, yvals, intial_active_data_x, intial_active_data_y, intial_support_vects )
test_accuracys[r,:] = test_accuracy
number_actives[r,:] = number_active
#Get the full batch solution to compare
batch_data_x, batch_data_y, batch_support_vects = batchSVM( xvals,yvals )
batch_number_active = batch_number_active + len(batch_data_y)
#compute the batch b value
bfull = b_value(batch_data_x,batch_data_y,batch_support_vects)
#compute the batch accuracy
batcherror = batcherror + SVM_error(test_xvals,test_yvals, batch_data_x, batch_data_y, batch_support_vects,bfull)
#averge batch error, averge number of batch support vectors
batcherror = batcherror / N_reals
batch_number_active = batch_number_active / N_reals
#make accuracy plots vs time
import os
os.environ["PATH"] += ':/usr/local/texlive/2015/bin/x86_64-darwin'
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.tick_params(labelsize=16)
fontsize = 22
avg_test_accuarcy = np.zeros([N - N_start])
avg_number_active = np.zeros([N - N_start])
for i in range(N - N_start):
avg_test_accuarcy[i] = np.average( test_accuracys[:,i] )
avg_number_active[i] = np.average( number_actives[:,i] )
for r in range(N_reals):
#Make plots of EcoSVM accuracy vs time
plt.plot(test_accuracys[r,:], linewidth=2, color = 'k')
plt.plot(avg_test_accuarcy, linewidth=6, color = 'b')
plt.axhline(y = 1 - batcherror,linestyle='--',linewidth=6)
plt.ylim(0.5,1.1)
plt.ylabel("$A(T) $",fontsize = fontsize + 2)
plt.xlabel("$ T $" ,fontsize=fontsize + 2)
plt.grid()
plt.tick_params(labelsize=fontsize+2)
plt.tight_layout()
plt.show()
#plt.savefig( "./graphs/linear.acc.png " )
plt.clf()
for r in range(N_reals):
#Make plots of EcoSVM number of support vectors vs time
plt.plot(number_actives[r,:], linewidth=2 , color = 'k')
plt.plot(avg_number_active, linewidth=6, color = 'b')
plt.ylim(0, max( np.amax(number_active) , batch_number_active ) + 10 )
plt.axhline(y = batch_number_active,linestyle='--',linewidth=6)
plt.ylabel("$N(T) $",fontsize = fontsize + 2)
plt.xlabel("$ T $" ,fontsize=fontsize + 2)
plt.tick_params(labelsize=fontsize+2)
plt.grid()
plt.tight_layout()
plt.show()
#plt.savefig( "./graphs/linear.numb.png " )
plt.clf()
| 13,742 | 23.026224 | 171 | py |
TapNet | TapNet-master/tieredImageNet_TapNet/scripts/train_TapNet_tieredImageNet.py | import os
import sys
sys.path.append('../')
import argparse
import numpy as np
import scipy.io as sio
import chainer.functions as F
from chainer import optimizers
from chainer import cuda
from chainer import serializers
from utils.generators import tieredImageNetGenerator
from utils.model_TapNet_ResNet12 import TapNet
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0,
help='gpu device number. -1 for cpu.')
parser.add_argument('--n_shot', type=int, default=5,
help='Number of shots.')
parser.add_argument('--nb_class_train', type=int, default=20,
help='Number of training classes .')
parser.add_argument('--nb_class_test', type=int, default=5,
help='Number of test classes .')
parser.add_argument('--n_query_train', type=int, default=8,
help='Number of queries per class in training.')
parser.add_argument('--n_query_test', type=int, default=15,
help='Number of queries per class in test.')
parser.add_argument('--wd_rate', type=float, default=0,
help='Weight decay rate in Adam optimizer')
# set params
# -----------
args = parser.parse_args()
if args.gpu < 0:
xp = np
else:
import cupy as cp
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="%d" %args.gpu
xp = cp
dimension=512
max_iter=50001
lrdecay = True
lrstep = 40000
n_shot=args.n_shot
n_query=args.n_query_train
n_query_test = args.n_query_test
nb_class_train=args.nb_class_train
nb_class_test=args.nb_class_test
wd_rate=args.wd_rate
savefile_name='save/TapNet_tieredImageNet_ResNet12.mat'
filename_5shot='save/TapNet_tieredImageNet_ResNet12'
filename_5shot_last='save/TapNet_tieredImageNet_ResNet12_last'
# set up training
# ------------------
model = TapNet(nb_class_train=nb_class_train, nb_class_test=nb_class_test, input_size=3*84*84,
dimension=dimension, n_shot=n_shot, gpu=args.gpu)
optimizer = optimizers.Adam(alpha=1e-3, weight_decay_rate=wd_rate)
model.set_optimizer(optimizer)
train_generator = tieredImageNetGenerator(image_file='../data/train_images.npz', label_file='../data/train_labels.pkl',
nb_classes=nb_class_train, nb_samples_per_class=n_shot+n_query,
max_iter=max_iter, xp=xp)
# Result analysis list
# -----------------
loss_h=[]
accuracy_h_val=[]
accuracy_h_test=[]
acc_best=0
epoch_best=0
# start training
# ----------------
for t, (images, labels) in train_generator:
# train
loss = model.train(images, labels)
# logging
loss_h.extend([loss.tolist()])
if (t % 50 == 0):
print("Episode: %d, Train Loss: %f "%(t, loss))
if (t != 0) and (t % 500 == 0):
print('Evaluation in Validation data')
test_generator = tieredImageNetGenerator(image_file='../data/val_images.npz', label_file='../data/val_labels.pkl',
nb_classes=nb_class_test, nb_samples_per_class=n_shot+n_query_test,
max_iter=600, xp=xp)
scores = []
for i, (images, labels) in test_generator:
accs = model.evaluate(images, labels)
accs_ = [cuda.to_cpu(acc) for acc in accs]
score = np.asarray(accs_, dtype=int)
scores.append(score)
print(('Accuracy 5 shot ={:.2f}%').format(100*np.mean(np.array(scores))))
accuracy_t=100*np.mean(np.array(scores))
if acc_best < accuracy_t:
acc_best = accuracy_t
epoch_best=t
serializers.save_npz(filename_5shot,model.chain)
accuracy_h_val.extend([accuracy_t.tolist()])
del(test_generator)
del(accs)
del(accs_)
del(accuracy_t)
print('Evaluation in Test data')
test_generator = tieredImageNetGenerator(image_file='../data/test_images.npz', label_file='../data/test_labels.pkl',
nb_classes=nb_class_test, nb_samples_per_class=n_shot+n_query_test,
max_iter=600, xp=xp)
scores = []
for i, (images, labels) in test_generator:
accs = model.evaluate(images, labels)
accs_ = [cuda.to_cpu(acc) for acc in accs]
score = np.asarray(accs_, dtype=int)
scores.append(score)
print(('Accuracy 5 shot ={:.2f}%').format(100*np.mean(np.array(scores))))
accuracy_t=100*np.mean(np.array(scores))
accuracy_h_test.extend([accuracy_t.tolist()])
del(test_generator)
del(accs)
del(accs_)
del(accuracy_t)
sio.savemat(savefile_name, {'accuracy_h_val':accuracy_h_val, 'accuracy_h_test':accuracy_h_test, 'epoch_best':epoch_best,'acc_best':acc_best})
if len(accuracy_h_val) >10:
print('***Average accuracy on past 10 evaluation***')
print('Best epoch =',epoch_best,'Best 5 shot acc=',acc_best)
serializers.save_npz(filename_5shot_last,model.chain)
if (t != 0) and (t % lrstep == 0) and lrdecay:
model.decay_learning_rate(0.1)
accuracy_h5=[]
serializers.load_npz(filename_5shot, model.chain)
print('Evaluating the best 5shot model...')
for i in range(50):
test_generator = tieredImageNetGenerator(image_file='../data/test_images.npz', label_file='../data/test_labels.pkl',
nb_classes=nb_class_test, nb_samples_per_class=n_shot+n_query_test,
max_iter=600, xp=xp)
scores=[]
for j, (images, labels) in test_generator:
accs = model.evaluate(images, labels)
accs_ = [cuda.to_cpu(acc) for acc in accs]
score = np.asarray(accs_, dtype=int)
scores.append(score)
accuracy_t=100*np.mean(np.array(scores))
accuracy_h5.extend([accuracy_t.tolist()])
print(('600 episodes with 15-query accuracy: 5-shot ={:.2f}%').format(accuracy_t))
del(test_generator)
del(accs)
del(accs_)
del(accuracy_t)
sio.savemat(savefile_name, {'accuracy_h_val':accuracy_h_val, 'accuracy_h_test':accuracy_h_test, 'epoch_best':epoch_best,'acc_best':acc_best, 'accuracy_h5':accuracy_h5})
print(('Accuracy_test 5 shot ={:.2f}%').format(np.mean(accuracy_h5)))
| 7,175 | 41.714286 | 176 | py |
TapNet | TapNet-master/tieredImageNet_TapNet/utils/generators.py | """
This code based on codes from https://github.com/tristandeleu/ntm-one-shot
"""
import numpy as np
import random
import pickle as pkl
class tieredImageNetGenerator(object):
"""tieredImageNetGenerator
Args:
image_file (str): 'data/train_images.npz' or 'data/test_images.npz' or 'data/val_images.npz'
label_file (str): 'data/train_labels.pkl' or 'data/test_labels.pkl' or 'data/val_labels.pkl'
nb_classes (int): number of classes in an episode
nb_samples_per_class (int): nuber of samples per class in an episode
max_iter (int): max number of episode generation
xp: numpy or cupy
"""
def __init__(self, image_file, label_file, nb_classes=5, nb_samples_per_class=10,
max_iter=None, xp=np):
super(tieredImageNetGenerator, self).__init__()
self.image_file = image_file
self.label_file = label_file
self.nb_classes = nb_classes
self.nb_samples_per_class = nb_samples_per_class
self.max_iter = max_iter
self.xp = xp
self.num_iter = 0
self._load_data(self.image_file, self.label_file)
def _load_data(self, image_file, label_file):
with np.load(image_file, mmap_mode="r", encoding='latin1') as data:
images=data["images"]
with open(label_file,'rb') as f:
data=pkl.load(f, encoding='bytes')
label_specific = data[b"label_specific"]
label_specific_str = data[b"label_specific_str"]
num_ex = label_specific.shape[0]
ex_ids = np.arange(num_ex)
num_label_cls_specific = len(label_specific_str)
self.label_specific_idict={}
for cc in range(num_label_cls_specific):
self.label_specific_idict[cc]=ex_ids[label_specific == cc]
self.images=images
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
if (self.max_iter is None) or (self.num_iter < self.max_iter):
self.num_iter += 1
images, labels = self.sample(self.nb_classes, self.nb_samples_per_class)
return (self.num_iter - 1), (images, labels)
else:
raise StopIteration()
def sample(self, nb_classes, nb_samples_per_class):
sampled_characters = random.sample(self.label_specific_idict.keys(), nb_classes)
labels_and_images = []
for (k, char) in enumerate(sampled_characters):
_ind = random.sample(list(self.label_specific_idict[char]), nb_samples_per_class)
labels_and_images.extend([(k, self.xp.array(self.images[i]/np.float32(255).flatten())) for i in _ind])
arg_labels_and_images = []
for i in range(self.nb_samples_per_class):
for j in range(self.nb_classes):
arg_labels_and_images.extend([labels_and_images[i+j*self.nb_samples_per_class]])
labels, images = zip(*arg_labels_and_images)
return images, labels
| 3,029 | 39.945946 | 114 | py |
TapNet | TapNet-master/tieredImageNet_TapNet/utils/rank_nullspace.py | import numpy as np
from numpy.linalg import svd
import cupy as cp
from cupy.linalg import svd as svd_gpu
from cupy import core
def rank(A, atol=1e-13, rtol=0):
A = np.atleast_2d(A)
s = svd(A, compute_uv=False)
tol = max(atol, rtol*s[0])
rank = int((s >= tol).sum())
return rank
def nullspace(A, tol=1e-13):
A=np.atleast_2d(A)
u, s, vh = svd(A)
if len(A.shape) == 2:
nnz = (s >= tol).sum()
ns = vh[nnz:].conj().T
elif len(A.shape) == 3:
nnz = (s >= tol).sum(axis=-1)
nnz = max(nnz)
ns = np.transpose(vh[:,nnz:,:].conj(), axes=[0,2,1])
return ns
def nullspace_gpu(A, tol=1e-13):
A = cp.atleast_2d(A)
u, s, vh =svd_gpu(A)
nnz = (s >= tol).sum()
ns = vh[nnz:].conj().T
return ns
| 781 | 22.69697 | 60 | py |
TapNet | TapNet-master/tieredImageNet_TapNet/utils/model_TapNet_ResNet12.py | import cupy as cp
import numpy as np
import chainer
import chainer.links as L
import chainer.functions as F
from chainer import cuda
from utils.rank_nullspace import nullspace_gpu
class TapNet(object):
def __init__(self, nb_class_train, nb_class_test, input_size, dimension,
n_shot, gpu=-1):
"""
Args
nb_class_train (int): number of classes in a training episode
nb_class_test (int): number of classes in a test episode
input_size (int): dimension of input vector
dimension (int) : dimension of embedding space
n_shot (int) : number of shots
"""
self.nb_class_train = nb_class_train
self.nb_class_test = nb_class_test
self.input_size = input_size
self.dimension = dimension
self.n_shot = n_shot
# create chain
self.chain = self._create_chain()
self.set_gpu(gpu)
# Set up methods
# ---------------
@property
def xp(self):
if self.gpu<0:
return np
else:
return cp
def set_gpu(self, gpu):
self.gpu = gpu
if self.gpu < 0:
self.chain.to_cpu()
else:
self.chain.to_gpu()
def set_optimizer(self, optimizer):
self.optimizer = optimizer
self.optimizer.setup(self.chain)
self.optimizer.use_cleargrads(use=False)
def _create_chain(self):
chain = chainer.Chain(
l_conv1_1=L.Convolution2D(None,64,(3,3), pad=1),
l_norm1_1=L.BatchNormalization(64),
l_conv1_2=L.Convolution2D(64,64,(3,3), pad=1),
l_norm1_2=L.BatchNormalization(64),
l_conv1_3=L.Convolution2D(64,64,(3,3), pad=1),
l_norm1_3=L.BatchNormalization(64),
l_conv1_r=L.Convolution2D(None,64,(3,3), pad=1),
l_norm1_r=L.BatchNormalization(64),
l_conv2_1=L.Convolution2D(64,128,(3,3), pad=1),
l_norm2_1=L.BatchNormalization(128),
l_conv2_2=L.Convolution2D(128,128,(3,3), pad=1),
l_norm2_2=L.BatchNormalization(128),
l_conv2_3=L.Convolution2D(128,128,(3,3), pad=1),
l_norm2_3=L.BatchNormalization(128),
l_conv2_r=L.Convolution2D(64,128,(3,3), pad=1),
l_norm2_r=L.BatchNormalization(128),
l_conv3_1=L.Convolution2D(128,256,(3,3), pad=1),
l_norm3_1=L.BatchNormalization(256),
l_conv3_2=L.Convolution2D(256,256,(3,3), pad=1),
l_norm3_2=L.BatchNormalization(256),
l_conv3_3=L.Convolution2D(256,256,(3,3), pad=1),
l_norm3_3=L.BatchNormalization(256),
l_conv3_r=L.Convolution2D(128,256,(3,3), pad=1),
l_norm3_r=L.BatchNormalization(256),
l_conv4_1=L.Convolution2D(256,512,(3,3), pad=1),
l_norm4_1=L.BatchNormalization(512),
l_conv4_2=L.Convolution2D(512,512,(3,3), pad=1),
l_norm4_2=L.BatchNormalization(512),
l_conv4_3=L.Convolution2D(512,512,(3,3), pad=1),
l_norm4_3=L.BatchNormalization(512),
l_conv4_r=L.Convolution2D(256,512,(3,3), pad=1),
l_norm4_r=L.BatchNormalization(512),
l_phi=L.Linear(self.dimension, self.nb_class_train),
)
return chain
# Train methods
# ---------------
def encoder(self, x, batchsize, train=True):
with chainer.using_config('train', train):
x2 = F.reshape(x, (batchsize,84,84,3))
x3 = F.transpose(x2, [0,3,1,2])
c1_r=self.chain.l_conv1_r(x3)
n1_r=self.chain.l_norm1_r(c1_r)
c1_1=self.chain.l_conv1_1(x3)
n1_1=self.chain.l_norm1_1(c1_1)
a1_1=F.relu(n1_1)
c1_2=self.chain.l_conv1_2(a1_1)
n1_2=self.chain.l_norm1_2(c1_2)
a1_2=F.relu(n1_2)
c1_3=self.chain.l_conv1_3(a1_2)
n1_3=self.chain.l_norm1_3(c1_3)
a1_3=F.relu(n1_3+n1_r)
p1=F.max_pooling_2d(a1_3,2)
p1=F.dropout(p1,ratio=0.2)
c2_r=self.chain.l_conv2_r(p1)
n2_r=self.chain.l_norm2_r(c2_r)
c2_1=self.chain.l_conv2_1(p1)
n2_1=self.chain.l_norm2_1(c2_1)
a2_1=F.relu(n2_1)
c2_2=self.chain.l_conv2_2(a2_1)
n2_2=self.chain.l_norm2_2(c2_2)
a2_2=F.relu(n2_2)
c2_3=self.chain.l_conv2_3(a2_2)
n2_3=self.chain.l_norm2_3(c2_3)
a2_3=F.relu(n2_3+n2_r)
p2=F.max_pooling_2d(a2_3,2)
p2=F.dropout(p2, ratio=0.2)
c3_r=self.chain.l_conv3_r(p2)
n3_r=self.chain.l_norm3_r(c3_r)
c3_1=self.chain.l_conv3_1(p2)
n3_1=self.chain.l_norm3_1(c3_1)
a3_1=F.relu(n3_1)
c3_2=self.chain.l_conv3_2(a3_1)
n3_2=self.chain.l_norm3_2(c3_2)
a3_2=F.relu(n3_2)
c3_3=self.chain.l_conv3_3(a3_2)
n3_3=self.chain.l_norm3_3(c3_3)
a3_3=F.relu(n3_3+n3_r)
p3=F.max_pooling_2d(a3_3,2)
p3=F.dropout(p3,ratio=0.2)
c4_r=self.chain.l_conv4_r(p3)
n4_r=self.chain.l_norm4_r(c4_r)
c4_1=self.chain.l_conv4_1(p3)
n4_1=self.chain.l_norm4_1(c4_1)
a4_1=F.relu(n4_1)
c4_2=self.chain.l_conv4_2(a4_1)
n4_2=self.chain.l_norm4_2(c4_2)
a4_2=F.relu(n4_2)
c4_3=self.chain.l_conv4_3(a4_2)
n4_3=self.chain.l_norm4_3(c4_3)
a4_3=F.relu(n4_3+n4_r)
p4=F.max_pooling_2d(a4_3,2)
p4=F.dropout(p4, ratio=0.2)
p5=F.average_pooling_2d(p4,6)
h_t=F.reshape(p5, (batchsize,-1))
return h_t
def Projection_Space(self, average_key, batchsize, nb_class, train=True, phi_ind=None):
c_t = average_key
eps=1e-6
if train == True:
Phi_tmp = self.chain.l_phi.W
else:
Phi_data = self.chain.l_phi.W.data
Phi_tmp = chainer.Variable(Phi_data[phi_ind,:])
for i in range(nb_class):
if i == 0:
Phi_sum = Phi_tmp[i]
else:
Phi_sum += Phi_tmp[i]
Phi = nb_class*(Phi_tmp)-F.broadcast_to(Phi_sum,(nb_class,self.dimension))
power_Phi = F.sqrt(F.sum(Phi*Phi, axis=1))
power_Phi = F.transpose(F.broadcast_to(power_Phi, [self.dimension,nb_class]))
Phi = Phi/(power_Phi+eps)
power_c = F.sqrt(F.sum(c_t*c_t, axis=1))
power_c = F.transpose(F.broadcast_to(power_c, [self.dimension,nb_class]))
c_tmp = c_t/(power_c+eps)
null=Phi - c_tmp
M = nullspace_gpu(null.data)
M = F.broadcast_to(M,[batchsize, self.dimension, self.dimension-nb_class])
return M
def compute_power(self, batchsize,key,M, nb_class, train=True,phi_ind=None):
if train == True:
Phi_out = self.chain.l_phi.W
else:
Phi_data = self.chain.l_phi.W.data
Phi_out = chainer.Variable(Phi_data[phi_ind,:])
Phi_out_batch = F.broadcast_to(Phi_out,[batchsize,nb_class, self.dimension])
PhiM = F.batch_matmul(Phi_out_batch,M)
PhiMs = F.sum(PhiM*PhiM,axis=2)
key_t = F.reshape(key,[batchsize,1,self.dimension])
keyM = F.batch_matmul(key_t,M)
keyMs = F.sum(keyM*keyM, axis=2)
keyMs = F.broadcast_to(keyMs, [batchsize,nb_class])
pow_t = PhiMs + keyMs
return pow_t
def compute_power_avg_phi(self, batchsize, nb_class, average_key, train=False):
avg_pow = F.sum(average_key*average_key,axis=1)
Phi = self.chain.l_phi.W
Phis = F.sum(Phi*Phi,axis=1)
avg_pow_bd = F.broadcast_to(F.reshape(avg_pow,[len(avg_pow),1]),[len(avg_pow),len(Phis)])
wzs_bd = F.broadcast_to(F.reshape(Phis,[1,len(Phis)]),[len(avg_pow),len(Phis)])
pow_avg = avg_pow_bd + wzs_bd
return pow_avg
def compute_loss(self, t_data, r_t, pow_t, batchsize,nb_class, train=True):
t = chainer.Variable(self.xp.array(t_data, dtype=self.xp.int32))
u = 2*self.chain.l_phi(r_t)-pow_t
return F.softmax_cross_entropy(u,t)
def compute_accuracy(self, t_data, r_t, pow_t,batchsize, nb_class, phi_ind=None):
ro = 2*self.chain.l_phi(r_t)
ro_t = chainer.Variable(ro.data[:,phi_ind])
u = ro_t-pow_t
t_est = self.xp.argmax(F.softmax(u).data, axis=1)
return (t_est == self.xp.array(t_data))
def select_phi(self, average_key, avg_pow):
u_avg = 2*self.chain.l_phi(average_key).data
u_avg = u_avg - avg_pow.data
u_avg_ind = cp.asnumpy(self.xp.argsort(u_avg, axis=1))
phi_ind = np.zeros(self.nb_class_test)
for i in range(self.nb_class_test):
if i == 0:
phi_ind[i] = np.int(u_avg_ind[i, self.nb_class_train-1])
else:
k=self.nb_class_train-1
while u_avg_ind[i,k] in phi_ind[:i]:
k = k-1
phi_ind[i] = np.int(u_avg_ind[i,k])
return phi_ind.tolist()
def train(self, images, labels):
"""
Train a minibatch of episodes
"""
images = self.xp.stack(images)
batchsize = images.shape[0]
loss = 0
key = self.encoder(images, batchsize, train=True)
support_set = key[:self.nb_class_train*self.n_shot,:]
query_set = key[self.nb_class_train*self.n_shot:,:]
average_key = F.mean(F.reshape(support_set,[self.n_shot,self.nb_class_train,-1]),axis=0)
batchsize_q = len(query_set.data)
M = self.Projection_Space(average_key, batchsize_q, self.nb_class_train)
r_t = F.reshape(F.batch_matmul(M,F.batch_matmul(M,query_set,transa=True)),(batchsize_q,-1))
pow_t = self.compute_power(batchsize_q,query_set,M,self.nb_class_train)
loss = self.compute_loss(labels[self.nb_class_train*self.n_shot:], r_t, pow_t, batchsize_q,self.nb_class_train)
self.chain.zerograds()
loss.backward()
self.optimizer.update()
return loss.data
def evaluate(self, images, labels):
"""
Evaluate accuracy score
"""
nb_class = self.nb_class_test
images = self.xp.stack(images)
batchsize = images.shape[0]
accs = []
key= self.encoder(images,batchsize, train=False)
support_set = key[:nb_class*self.n_shot,:]
query_set = key[nb_class*self.n_shot:,:]
average_key = F.mean(F.reshape(support_set,[self.n_shot,nb_class,-1]),axis=0)
batchsize_q = len(query_set.data)
pow_avg = self.compute_power_avg_phi(batchsize_q, nb_class, average_key, train=False)
phi_ind = [np.int(ind) for ind in self.select_phi(average_key,pow_avg)]
M = self.Projection_Space(average_key, batchsize_q,nb_class, train=False, phi_ind=phi_ind)
r_t = F.reshape(F.batch_matmul(M,F.batch_matmul(M,query_set,transa=True)),(batchsize_q,-1))
pow_t = self.compute_power(batchsize_q,query_set,M,nb_class, train=False, phi_ind=phi_ind)
accs_tmp = self.compute_accuracy(labels[nb_class*self.n_shot:], r_t, pow_t, batchsize_q, nb_class, phi_ind=phi_ind)
accs.append(accs_tmp)
return accs
def decay_learning_rate(self, decaying_parameter=0.5):
self.optimizer.alpha=self.optimizer.alpha*decaying_parameter
| 12,091 | 34.253644 | 123 | py |
TapNet | TapNet-master/tieredImageNet_TapNet/data/__init__.py | 1 | 0 | 0 | py | |
TapNet | TapNet-master/miniImageNet_TapNet/scripts/train_TapNet_miniImageNet.py | import os
import sys
sys.path.append('../')
import argparse
import numpy as np
import scipy.io as sio
import chainer.functions as F
from chainer import optimizers
from chainer import cuda
from chainer import serializers
from utils.generators import miniImageNetGenerator
from utils.model_TapNet_ResNet12 import TapNet
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0,
help='gpu device number. -1 for cpu.')
parser.add_argument('--n_shot', type=int, default=5,
help='Number of shots.')
parser.add_argument('--nb_class_train', type=int, default=20,
help='Number of training classes .')
parser.add_argument('--nb_class_test', type=int, default=5,
help='Number of test classes .')
parser.add_argument('--n_query_train', type=int, default=8,
help='Number of queries per class in training.')
parser.add_argument('--n_query_test', type=int, default=15,
help='Number of queries per class in test.')
parser.add_argument('--wd_rate', type=float, default=5e-4,
help='Weight decay rate in Adam optimizer')
# set params
# -----------
args = parser.parse_args()
if args.gpu < 0:
xp = np
else:
import cupy as cp
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="%d" %args.gpu
xp = cp
dimension=512
max_iter=50001
lrdecay = True
lrstep = 40000
n_shot=args.n_shot
n_query=args.n_query_train
n_query_test = args.n_query_test
nb_class_train=args.nb_class_train
nb_class_test=args.nb_class_test
wd_rate=args.wd_rate
savefile_name='save/TapNet_miniImageNet_ResNet12.mat'
filename_5shot='save/TapNet_miniImageNet_ResNet12'
filename_5shot_last='save/TapNet_miniImageNet_ResNet12_last'
# set up training
# ------------------
model = TapNet(nb_class_train=nb_class_train, nb_class_test=nb_class_test, input_size=3*84*84,
dimension=dimension, n_shot=n_shot, gpu=args.gpu)
optimizer = optimizers.Adam(alpha=1e-3, weight_decay_rate=wd_rate)
model.set_optimizer(optimizer)
train_generator = miniImageNetGenerator(data_file='../data/Imagenet/train.npz',
nb_classes=nb_class_train, nb_samples_per_class=n_shot+n_query,
max_iter=max_iter, xp=xp)
# Result analysis list
# -----------------
loss_h=[]
accuracy_h_val=[]
accuracy_h_test=[]
acc_best=0
epoch_best=0
# start training
# ----------------
for t, (images, labels) in train_generator:
# train
loss = model.train(images, labels)
# logging
loss_h.extend([loss.tolist()])
if (t % 50 == 0):
print("Episode: %d, Train Loss: %f "%(t, loss))
if (t != 0) and (t % 500 == 0):
print('Evaluation in Validation data')
test_generator = miniImageNetGenerator(data_file='../data/Imagenet/val.npz',
nb_classes=nb_class_test, nb_samples_per_class=n_shot+n_query_test,
max_iter=600, xp=xp)
scores = []
for i, (images, labels) in test_generator:
accs = model.evaluate(images, labels)
accs_ = [cuda.to_cpu(acc) for acc in accs]
score = np.asarray(accs_, dtype=int)
scores.append(score)
print(('Accuracy 5 shot ={:.2f}%').format(100*np.mean(np.array(scores))))
accuracy_t=100*np.mean(np.array(scores))
if acc_best < accuracy_t:
acc_best = accuracy_t
epoch_best=t
serializers.save_npz(filename_5shot,model.chain)
accuracy_h_val.extend([accuracy_t.tolist()])
del(test_generator)
del(accs)
del(accs_)
del(accuracy_t)
print('Evaluation in Test data')
test_generator = miniImageNetGenerator(data_file='../data/Imagenet/test.npz',
nb_classes=nb_class_test, nb_samples_per_class=n_shot+n_query_test,
max_iter=600, xp=xp)
scores = []
for i, (images, labels) in test_generator:
accs = model.evaluate(images, labels)
accs_ = [cuda.to_cpu(acc) for acc in accs]
score = np.asarray(accs_, dtype=int)
scores.append(score)
print(('Accuracy 5 shot ={:.2f}%').format(100*np.mean(np.array(scores))))
accuracy_t=100*np.mean(np.array(scores))
accuracy_h_test.extend([accuracy_t.tolist()])
del(test_generator)
del(accs)
del(accs_)
del(accuracy_t)
sio.savemat(savefile_name, {'accuracy_h_val':accuracy_h_val, 'accuracy_h_test':accuracy_h_test, 'epoch_best':epoch_best,'acc_best':acc_best})
if len(accuracy_h_val) >10:
print('***Average accuracy on past 10 evaluation***')
print('Best epoch =',epoch_best,'Best 5 shot acc=',acc_best)
serializers.save_npz(filename_5shot_last,model.chain)
if (t != 0) and (t % lrstep == 0) and lrdecay:
model.decay_learning_rate(0.1)
accuracy_h5=[]
serializers.load_npz(filename_5shot, model.chain)
print('Evaluating the best 5shot model...')
for i in range(50):
test_generator = miniImageNetGenerator(data_file='../data/Imagenet/test.npz',
nb_classes=nb_class_test, nb_samples_per_class=n_shot+n_query_test,
max_iter=600, xp=xp)
scores=[]
for j, (images, labels) in test_generator:
accs = model.evaluate(images, labels)
accs_ = [cuda.to_cpu(acc) for acc in accs]
score = np.asarray(accs_, dtype=int)
scores.append(score)
accuracy_t=100*np.mean(np.array(scores))
accuracy_h5.extend([accuracy_t.tolist()])
print(('600 episodes with 15-query accuracy: 5-shot ={:.2f}%').format(accuracy_t))
del(test_generator)
del(accs)
del(accs_)
del(accuracy_t)
sio.savemat(savefile_name, {'accuracy_h_val':accuracy_h_val, 'accuracy_h_test':accuracy_h_test, 'epoch_best':epoch_best,'acc_best':acc_best, 'accuracy_h5':accuracy_h5})
print(('Accuracy_test 5 shot ={:.2f}%').format(np.mean(accuracy_h5)))
| 7,014 | 40.755952 | 176 | py |
TapNet | TapNet-master/miniImageNet_TapNet/utils/generators.py | """
This code based on codes from https://github.com/tristandeleu/ntm-one-shot
"""
import numpy as np
import random
class miniImageNetGenerator(object):
"""miniImageNetGenerator
Args:
data_file (str): 'data/train.npz' or 'data/test.npz'
nb_classes (int): number of classes in an episode
nb_samples_per_class (int): nuber of samples per class in an episode
max_iter (int): max number of episode generation
xp: numpy or cupy
"""
def __init__(self, data_file, nb_classes=5, nb_samples_per_class=10,
max_iter=None, xp=np):
super(miniImageNetGenerator, self).__init__()
self.data_file = data_file
self.nb_classes = nb_classes
self.nb_samples_per_class = nb_samples_per_class
self.max_iter = max_iter
self.xp = xp
self.num_iter = 0
self.data = self._load_data(self.data_file)
def _load_data(self, data_file):
data_dict = np.load(data_file)
return {key: np.array(val) for (key, val) in data_dict.items()}
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
if (self.max_iter is None) or (self.num_iter < self.max_iter):
self.num_iter += 1
images, labels = self.sample(self.nb_classes, self.nb_samples_per_class)
return (self.num_iter - 1), (images, labels)
else:
raise StopIteration()
def sample(self, nb_classes, nb_samples_per_class):
sampled_characters = random.sample(self.data.keys(), nb_classes)
labels_and_images = []
for (k, char) in enumerate(sampled_characters):
_imgs = self.data[char]
_ind = random.sample(range(len(_imgs)), nb_samples_per_class)
labels_and_images.extend([(k, self.xp.array(_imgs[i].flatten())) for i in _ind])
arg_labels_and_images = []
for i in range(self.nb_samples_per_class):
for j in range(self.nb_classes):
arg_labels_and_images.extend([labels_and_images[i+j*self.nb_samples_per_class]])
labels, images = zip(*arg_labels_and_images)
return images, labels
| 2,249 | 35.290323 | 96 | py |
TapNet | TapNet-master/miniImageNet_TapNet/utils/rank_nullspace.py | import numpy as np
from numpy.linalg import svd
import cupy as cp
from cupy.linalg import svd as svd_gpu
from cupy import core
def rank(A, atol=1e-13, rtol=0):
A = np.atleast_2d(A)
s = svd(A, compute_uv=False)
tol = max(atol, rtol*s[0])
rank = int((s >= tol).sum())
return rank
def nullspace(A, tol=1e-13):
A=np.atleast_2d(A)
u, s, vh = svd(A)
if len(A.shape) == 2:
nnz = (s >= tol).sum()
ns = vh[nnz:].conj().T
elif len(A.shape) == 3:
nnz = (s >= tol).sum(axis=-1)
nnz = max(nnz)
ns = np.transpose(vh[:,nnz:,:].conj(), axes=[0,2,1])
return ns
def nullspace_gpu(A, tol=1e-13):
A = cp.atleast_2d(A)
u, s, vh =svd_gpu(A)
nnz = (s >= tol).sum()
ns = vh[nnz:].conj().T
return ns
| 781 | 22.69697 | 60 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.