repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
MRE-ISE | MRE-ISE-main/VSG/VG_parser/dataloaders/__init__.py | 0 | 0 | 0 | py | |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/dataloaders/mscoco.py | from config import COCO_PATH, IM_SCALE, BOX_SCALE
import os
from torch.utils.data import Dataset
from pycocotools.coco import COCO
from PIL import Image
from lib.fpn.anchor_targets import anchor_target_layer
from torchvision.transforms import Resize, Compose, ToTensor, Normalize
from dataloaders.image_transforms import SquarePad, Grayscale, Brightness, Sharpness, Contrast, RandomOrder, Hue, random_crop
import numpy as np
from dataloaders.blob import Blob
import torch
class CocoDetection(Dataset):
"""
Adapted from the torchvision code
"""
def __init__(self, mode):
"""
:param mode: train2014 or val2014
"""
self.mode = mode
self.root = os.path.join(COCO_PATH, mode)
self.ann_file = os.path.join(COCO_PATH, 'annotations', 'instances_{}.json'.format(mode))
self.coco = COCO(self.ann_file)
self.ids = [k for k in self.coco.imgs.keys() if len(self.coco.imgToAnns[k]) > 0]
tform = []
if self.is_train:
tform.append(RandomOrder([
Grayscale(),
Brightness(),
Contrast(),
Sharpness(),
Hue(),
]))
tform += [
SquarePad(),
Resize(IM_SCALE),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
self.transform_pipeline = Compose(tform)
self.ind_to_classes = ['__background__'] + [v['name'] for k, v in self.coco.cats.items()]
# COCO inds are weird (84 inds in total but a bunch of numbers are skipped)
self.id_to_ind = {coco_id:(ind+1) for ind, coco_id in enumerate(self.coco.cats.keys())}
self.id_to_ind[0] = 0
self.ind_to_id = {x:y for y,x in self.id_to_ind.items()}
@property
def is_train(self):
return self.mode.startswith('train')
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns: entry dict
"""
img_id = self.ids[index]
path = self.coco.loadImgs(img_id)[0]['file_name']
image_unpadded = Image.open(os.path.join(self.root, path)).convert('RGB')
ann_ids = self.coco.getAnnIds(imgIds=img_id)
anns = self.coco.loadAnns(ann_ids)
gt_classes = np.array([self.id_to_ind[x['category_id']] for x in anns], dtype=np.int64)
if np.any(gt_classes >= len(self.ind_to_classes)):
raise ValueError("OH NO {}".format(index))
if len(anns) == 0:
raise ValueError("Annotations should not be empty")
# gt_boxes = np.array((0, 4), dtype=np.float32)
# else:
gt_boxes = np.array([x['bbox'] for x in anns], dtype=np.float32)
if np.any(gt_boxes[:, [0,1]] < 0):
raise ValueError("GT boxes empty columns")
if np.any(gt_boxes[:, [2,3]] < 0):
raise ValueError("GT boxes empty h/w")
gt_boxes[:, [2, 3]] += gt_boxes[:, [0, 1]]
# Rescale so that the boxes are at BOX_SCALE
if self.is_train:
image_unpadded, gt_boxes = random_crop(image_unpadded,
gt_boxes * BOX_SCALE / max(image_unpadded.size),
BOX_SCALE,
round_boxes=False,
)
else:
# Seems a bit silly because we won't be using GT boxes then but whatever
gt_boxes = gt_boxes * BOX_SCALE / max(image_unpadded.size)
w, h = image_unpadded.size
box_scale_factor = BOX_SCALE / max(w, h)
# Optionally flip the image if we're doing training
flipped = self.is_train and np.random.random() > 0.5
if flipped:
scaled_w = int(box_scale_factor * float(w))
image_unpadded = image_unpadded.transpose(Image.FLIP_LEFT_RIGHT)
gt_boxes[:, [0, 2]] = scaled_w - gt_boxes[:, [2, 0]]
img_scale_factor = IM_SCALE / max(w, h)
if h > w:
im_size = (IM_SCALE, int(w*img_scale_factor), img_scale_factor)
elif h < w:
im_size = (int(h*img_scale_factor), IM_SCALE, img_scale_factor)
else:
im_size = (IM_SCALE, IM_SCALE, img_scale_factor)
entry = {
'img': self.transform_pipeline(image_unpadded),
'img_size': im_size,
'gt_boxes': gt_boxes,
'gt_classes': gt_classes,
'scale': IM_SCALE / BOX_SCALE,
'index': index,
'image_id': img_id,
'flipped': flipped,
'fn': path,
}
return entry
@classmethod
def splits(cls, *args, **kwargs):
""" Helper method to generate splits of the example_dataset"""
train = cls('train2014', *args, **kwargs)
val = cls('val2014', *args, **kwargs)
return train, val
def __len__(self):
return len(self.ids)
def coco_collate(data, num_gpus=3, is_train=False):
blob = Blob(mode='det', is_train=is_train, num_gpus=num_gpus,
batch_size_per_gpu=len(data) // num_gpus)
for d in data:
blob.append(d)
blob.reduce()
return blob
class CocoDataLoader(torch.utils.data.DataLoader):
"""
Iterates through the data, filtering out None,
but also loads everything as a (cuda) variable
"""
# def __iter__(self):
# for x in super(CocoDataLoader, self).__iter__():
# if isinstance(x, tuple) or isinstance(x, list):
# yield tuple(y.cuda(async=True) if hasattr(y, 'cuda') else y for y in x)
# else:
# yield x.cuda(async=True)
@classmethod
def splits(cls, train_data, val_data, batch_size=3, num_workers=1, num_gpus=3, **kwargs):
train_load = cls(
dataset=train_data,
batch_size=batch_size*num_gpus,
shuffle=True,
num_workers=num_workers,
collate_fn=lambda x: coco_collate(x, num_gpus=num_gpus, is_train=True),
drop_last=True,
# pin_memory=True,
**kwargs,
)
val_load = cls(
dataset=val_data,
batch_size=batch_size*num_gpus,
shuffle=False,
num_workers=num_workers,
collate_fn=lambda x: coco_collate(x, num_gpus=num_gpus, is_train=False),
drop_last=True,
# pin_memory=True,
**kwargs,
)
return train_load, val_load
if __name__ == '__main__':
train, val = CocoDetection.splits()
gtbox = train[0]['gt_boxes']
img_size = train[0]['img_size']
anchor_strides, labels, bbox_targets = anchor_target_layer(gtbox, img_size)
| 6,783 | 34.518325 | 125 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/dataloaders/image_transforms.py | # Some image transforms
from PIL import Image, ImageOps, ImageFilter, ImageEnhance
import numpy as np
from random import randint
# All of these need to be called on PIL imagez
class SquarePad(object):
def __call__(self, img):
w, h = img.size
img_padded = ImageOps.expand(img, border=(0, 0, max(h - w, 0), max(w - h, 0)),
fill=(int(0.485 * 256), int(0.456 * 256), int(0.406 * 256)))
return img_padded
class Grayscale(object):
"""
Converts to grayscale (not always, sometimes).
"""
def __call__(self, img):
factor = np.sqrt(np.sqrt(np.random.rand(1)))
# print("gray {}".format(factor))
enhancer = ImageEnhance.Color(img)
return enhancer.enhance(factor)
class Brightness(object):
"""
Converts to grayscale (not always, sometimes).
"""
def __call__(self, img):
factor = np.random.randn(1)/6+1
factor = min(max(factor, 0.5), 1.5)
# print("brightness {}".format(factor))
enhancer = ImageEnhance.Brightness(img)
return enhancer.enhance(factor)
class Contrast(object):
"""
Converts to grayscale (not always, sometimes).
"""
def __call__(self, img):
factor = np.random.randn(1)/8+1.0
factor = min(max(factor, 0.5), 1.5)
# print("contrast {}".format(factor))
enhancer = ImageEnhance.Contrast(img)
return enhancer.enhance(factor)
class Hue(object):
"""
Converts to grayscale
"""
def __call__(self, img):
# 30 seems good
factor = int(np.random.randn(1)*8)
factor = min(max(factor, -30), 30)
factor = np.array(factor, dtype=np.uint8)
hsv = np.array(img.convert('HSV'))
hsv[:,:,0] += factor
new_img = Image.fromarray(hsv, 'HSV').convert('RGB')
return new_img
class Sharpness(object):
"""
Converts to grayscale
"""
def __call__(self, img):
factor = 1.0 + np.random.randn(1)/5
# print("sharpness {}".format(factor))
enhancer = ImageEnhance.Sharpness(img)
return enhancer.enhance(factor)
def random_crop(img, boxes, box_scale, round_boxes=True, max_crop_fraction=0.1):
"""
Randomly crops the image
:param img: PIL image
:param boxes: Ground truth boxes
:param box_scale: This is the scale that the boxes are at (e.g. 1024 wide). We'll preserve that ratio
:param round_boxes: Set this to true if we're going to round the boxes to ints
:return: Cropped image, new boxes
"""
w, h = img.size
max_crop_w = int(w*max_crop_fraction)
max_crop_h = int(h*max_crop_fraction)
boxes_scaled = boxes * max(w,h) / box_scale
max_to_crop_top = min(int(boxes_scaled[:, 1].min()), max_crop_h)
max_to_crop_left = min(int(boxes_scaled[:, 0].min()), max_crop_w)
max_to_crop_right = min(int(w - boxes_scaled[:, 2].max()), max_crop_w)
max_to_crop_bottom = min(int(h - boxes_scaled[:, 3].max()), max_crop_h)
crop_top = randint(0, max(max_to_crop_top, 0))
crop_left = randint(0, max(max_to_crop_left, 0))
crop_right = randint(0, max(max_to_crop_right, 0))
crop_bottom = randint(0, max(max_to_crop_bottom, 0))
img_cropped = img.crop((crop_left, crop_top, w - crop_right, h - crop_bottom))
new_boxes = box_scale / max(img_cropped.size) * np.column_stack(
(boxes_scaled[:,0]-crop_left, boxes_scaled[:,1]-crop_top, boxes_scaled[:,2]-crop_left, boxes_scaled[:,3]-crop_top))
if round_boxes:
new_boxes = np.round(new_boxes).astype(np.int32)
return img_cropped, new_boxes
class RandomOrder(object):
""" Composes several transforms together in random order - or not at all!
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
if self.transforms is None:
return img
num_to_pick = np.random.choice(len(self.transforms))
if num_to_pick == 0:
return img
order = np.random.choice(len(self.transforms), size=num_to_pick, replace=False)
for i in order:
img = self.transforms[i](img)
return img | 4,172 | 30.613636 | 123 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/misc/motifs.py | """
SCRIPT TO MAKE MEMES. this was from an old version of the code, so it might require some fixes to get working.
"""
from dataloaders.visual_genome import VG
# import matplotlib
# # matplotlib.use('Agg')
from tqdm import tqdm
import seaborn as sns
import numpy as np
from lib.fpn.box_intersections_cpu.bbox import bbox_overlaps
from collections import defaultdict
train, val, test = VG.splits(filter_non_overlap=False, num_val_im=2000)
count_threshold = 50
pmi_threshold = 10
o_type = []
f = open("object_types.txt")
for line in f.readlines():
tabs = line.strip().split("\t")
t = tabs[1].split("_")[0]
o_type.append(t)
r_type = []
f = open("relation_types.txt")
for line in f.readlines():
tabs = line.strip().split("\t")
t = tabs[1].split("_")[0]
r_type.append(t)
max_id = 0
memes_id_id = {}
memes_id = {}
id_memes = {}
id_key = {}
key_id = {}
# go through and assign keys
dataset = []
for i in range(0, len(train)):
item = []
_r = train.relationships[i]
_o = train.gt_classes[i]
for j in range(0, len(_r)):
h = _o[_r[j][0]]
t = _o[_r[j][1]]
e = _r[j][2]
key1 = (h, e, t)
if key1 not in key_id:
id_key[max_id] = key1
key_id[key1] = max_id
max_id += 1
item.append(key_id[key1])
dataset.append(item)
cids = train.ind_to_classes
rids = train.ind_to_predicates
all_memes = []
def id_to_str(_id):
key = id_key[_id]
if len(key) == 2:
pair = key
l1, s1 = id_to_str(pair[0])
l2, s2 = id_to_str(pair[1])
return (l1 + l2, s1 + " & " + s2)
else:
return (1, "{}--{}-->{}".format(cids[key[0]], rids[key[1]], cids[key[2]]))
new_meme_score = {}
for p in range(0, 25):
print("iteration : {}".format(p))
unigrams = defaultdict(float)
bigrams = defaultdict(float)
unigrams_ori = defaultdict(float)
T = 0
T2 = 0
for i in range(0, len(dataset)):
item = dataset[i]
for j in range(0, len(item)):
key1 = item[j]
unigrams_ori[key1] += 1
# T += 1
for j2 in range(j + 1, len(item)):
key2 = item[j2]
if key1 > key2:
jkey = (key1, key2)
else:
jkey = (key2, key1)
unigrams[key1] += 1
unigrams[key2] += 1
bigrams[jkey] += 1
T2 += 1
pmi = []
for (jkey, val) in bigrams.items():
pval = (val / T2) / ((unigrams[jkey[0]] / T2) * (unigrams[jkey[0]] / T2))
# print("{} {} {}".format(jkey, val, pval))
if val > count_threshold and unigrams_ori[jkey[0]] > count_threshold and unigrams_ori[
jkey[1]] > count_threshold and pval > pmi_threshold:
pmi.append((pval, jkey, val))
# new_memes.add(jkey)
new_memes = set()
pmi = sorted(pmi, key=lambda x: -x[0])
new_meme_c = set()
for (v, k, f) in pmi:
# if k[0] in all_memes and k[1] in all_memes: continue
# if len( new_memes) > 1000: break
if k[0] in new_meme_c or k[1] in new_meme_c: continue
new_meme_c.add(k[0])
new_meme_c.add(k[1])
print("{} & {} \t {} \t {} \t {} \t {}".format(id_to_str(k[0]), id_to_str(k[1]), v, unigrams[k[0]],
unigrams[k[1]], bigrams[k]))
new_memes.add(k)
# assign new ids to the memes
new_meme_score[k] = v
# break
for meme in new_memes:
if meme in key_id: continue
all_memes.append(max_id)
id_key[max_id] = meme
key_id[meme] = max_id
max_id += 1
print("{} memes discovered ".format(len(new_memes)))
# go through and adjust the example_dataset
new_dataset = []
eliminated = 0
for i in range(0, len(dataset)):
item_save = dataset[i]
item = item_save
new_item = []
# merges = {}
while True:
best = None
best_score = 0
for j in range(0, len(item)):
key1 = item[j]
for j2 in range(j + 1, len(item)):
key2 = item[j2]
if key1 > key2:
jkey = (key1, key2)
else:
jkey = (key2, key1)
if jkey in new_meme_score and new_meme_score[jkey] > best_score:
best = (j, j2)
best_score = new_meme_score[jkey]
# if jkey in key_id and j not in merges and j2 not in merges:
# merges[j] = j2
# merges[j2] = j
if best is not None:
for j in range(0, len(item)):
if j == best[0]:
key1 = item[j]
key2 = item[best[1]]
if key1 > key2:
jkey = (key1, key2)
else:
jkey = (key2, key1)
new_item.append(key_id[jkey])
elif j == best[1]:
continue
else:
new_item.append(item[j])
# break
item = new_item
new_item = []
else:
# print("done")
new_item = item
break
# for j in range(0, len(item)):
# if j not in merges: new_item.append(item[j])
# elif j < merges[j]:
# key1 = item[j]
# key2 = item[merges[j]]
# if key1 > key2 : jkey = (key1, key2)
# else: jkey = (key2, key1)
# new_item.append(key_id[jkey])
eliminated += len(item_save) - len(new_item)
new_dataset.append(new_item)
print("{} total eliminated".format(eliminated))
dataset = new_dataset
meme_freq = defaultdict(float)
def increment_recursive(i):
# meme = id_key[i]
if i in all_memes:
meme_freq[i] += 1
key1 = id_key[i][0]
key2 = id_key[i][1]
increment_recursive(key1)
increment_recursive(key2)
def meme_length(i):
if i in all_memes:
return meme_length(id_key[i][0]) + meme_length(id_key[i][1])
else:
return 1
# compute statistics of memes
for i in range(0, len(dataset)):
item = dataset[i]
for j in range(0, len(item)):
increment_recursive(item[j])
for meme in all_memes:
print("{} {}".format(id_to_str(meme), meme_freq[meme]))
T = 0
T2 = 0
n_images = defaultdict(float)
n_edges = defaultdict(float)
for item in dataset:
meme_lengths = []
for j in range(0, len(item)):
meme_lengths.append(meme_length(item[j]))
n_images[max(meme_lengths)] += 1
# for l in meme_lengths: n_images[l] +=1
T += 1
for item in dataset:
for j in range(0, len(item)):
l = meme_length(item[j])
n_edges[l] += l
T2 += l
for (k, v) in n_images.items():
print("{} {}".format(k, v / T))
print("---")
for (k, v) in n_edges.items():
print("{} {}".format(k, v / T2))
| 7,142 | 28.395062 | 110 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/misc/__init__.py | 0 | 0 | 0 | py | |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/rel_model_stanford.py | """
Let's get the relationships yo
"""
import torch
import torch.nn as nn
import torch.nn.parallel
from torch.autograd import Variable
from torch.nn import functional as F
from lib.surgery import filter_dets
from lib.fpn.proposal_assignments.rel_assignments import rel_assignments
from lib.pytorch_misc import arange
from lib.object_detector import filter_det
from lib.rel_model import RelModel
MODES = ('sgdet', 'sgcls', 'predcls')
SIZE=512
class RelModelStanford(RelModel):
"""
RELATIONSHIPS
"""
def __init__(self, classes, rel_classes, mode='sgdet', num_gpus=1, require_overlap_det=True,
use_resnet=False, use_proposals=False, **kwargs):
"""
:param classes: Object classes
:param rel_classes: Relationship classes. None if were not using rel mode
:param num_gpus: how many GPUS 2 use
"""
super(RelModelStanford, self).__init__(classes, rel_classes, mode=mode, num_gpus=num_gpus,
require_overlap_det=require_overlap_det,
use_resnet=use_resnet,
nl_obj=0, nl_edge=0, use_proposals=use_proposals, thresh=0.01,
pooling_dim=4096)
del self.context
del self.post_lstm
del self.post_emb
self.rel_fc = nn.Linear(SIZE, self.num_rels)
self.obj_fc = nn.Linear(SIZE, self.num_classes)
self.obj_unary = nn.Linear(self.obj_dim, SIZE)
self.edge_unary = nn.Linear(4096, SIZE)
self.edge_gru = nn.GRUCell(input_size=SIZE, hidden_size=SIZE)
self.node_gru = nn.GRUCell(input_size=SIZE, hidden_size=SIZE)
self.n_iter = 3
self.sub_vert_w_fc = nn.Sequential(nn.Linear(SIZE*2, 1), nn.Sigmoid())
self.obj_vert_w_fc = nn.Sequential(nn.Linear(SIZE*2, 1), nn.Sigmoid())
self.out_edge_w_fc = nn.Sequential(nn.Linear(SIZE*2, 1), nn.Sigmoid())
self.in_edge_w_fc = nn.Sequential(nn.Linear(SIZE*2, 1), nn.Sigmoid())
def message_pass(self, rel_rep, obj_rep, rel_inds):
"""
:param rel_rep: [num_rel, fc]
:param obj_rep: [num_obj, fc]
:param rel_inds: [num_rel, 2] of the valid relationships
:return: object prediction [num_obj, 151], bbox_prediction [num_obj, 151*4]
and rel prediction [num_rel, 51]
"""
# [num_obj, num_rel] with binary!
numer = torch.arange(0, rel_inds.size(0)).long().cuda(rel_inds.get_device())
objs_to_outrels = rel_rep.data.new(obj_rep.size(0), rel_rep.size(0)).zero_()
objs_to_outrels.view(-1)[rel_inds[:, 0] * rel_rep.size(0) + numer] = 1
objs_to_outrels = Variable(objs_to_outrels)
objs_to_inrels = rel_rep.data.new(obj_rep.size(0), rel_rep.size(0)).zero_()
objs_to_inrels.view(-1)[rel_inds[:, 1] * rel_rep.size(0) + numer] = 1
objs_to_inrels = Variable(objs_to_inrels)
hx_rel = Variable(rel_rep.data.new(rel_rep.size(0), SIZE).zero_(), requires_grad=False)
hx_obj = Variable(obj_rep.data.new(obj_rep.size(0), SIZE).zero_(), requires_grad=False)
vert_factor = [self.node_gru(obj_rep, hx_obj)]
edge_factor = [self.edge_gru(rel_rep, hx_rel)]
for i in range(3):
# compute edge context
sub_vert = vert_factor[i][rel_inds[:, 0]]
obj_vert = vert_factor[i][rel_inds[:, 1]]
weighted_sub = self.sub_vert_w_fc(
torch.cat((sub_vert, edge_factor[i]), 1)) * sub_vert
weighted_obj = self.obj_vert_w_fc(
torch.cat((obj_vert, edge_factor[i]), 1)) * obj_vert
edge_factor.append(self.edge_gru(weighted_sub + weighted_obj, edge_factor[i]))
# Compute vertex context
pre_out = self.out_edge_w_fc(torch.cat((sub_vert, edge_factor[i]), 1)) * \
edge_factor[i]
pre_in = self.in_edge_w_fc(torch.cat((obj_vert, edge_factor[i]), 1)) * edge_factor[
i]
vert_ctx = objs_to_outrels @ pre_out + objs_to_inrels @ pre_in
vert_factor.append(self.node_gru(vert_ctx, vert_factor[i]))
# woohoo! done
return self.obj_fc(vert_factor[-1]), self.rel_fc(edge_factor[-1])
# self.box_fc(vert_factor[-1]).view(-1, self.num_classes, 4), \
# self.rel_fc(edge_factor[-1])
def forward(self, x, im_sizes, image_offset,
gt_boxes=None, gt_classes=None, gt_rels=None, proposals=None, train_anchor_inds=None,
return_fmap=False):
"""
Forward pass for detection
:param x: Images@[batch_size, 3, IM_SIZE, IM_SIZE]
:param im_sizes: A numpy array of (h, w, scale) for each image.
:param image_offset: Offset onto what image we're on for MGPU training (if single GPU this is 0)
:param gt_boxes:
Training parameters:
:param gt_boxes: [num_gt, 4] GT boxes over the batch.
:param gt_classes: [num_gt, 2] gt boxes where each one is (img_id, class)
:param train_anchor_inds: a [num_train, 2] array of indices for the anchors that will
be used to compute the training loss. Each (img_ind, fpn_idx)
:return: If train:
scores, boxdeltas, labels, boxes, boxtargets, rpnscores, rpnboxes, rellabels
if test:
prob dists, boxes, img inds, maxscores, classes
"""
result = self.detector(x, im_sizes, image_offset, gt_boxes, gt_classes, gt_rels, proposals,
train_anchor_inds, return_fmap=True)
if result.is_none():
return ValueError("heck")
im_inds = result.im_inds - image_offset
boxes = result.rm_box_priors
if self.training and result.rel_labels is None:
assert self.mode == 'sgdet'
result.rel_labels = rel_assignments(im_inds.data, boxes.data, result.rm_obj_labels.data,
gt_boxes.data, gt_classes.data, gt_rels.data,
image_offset, filter_non_overlap=True, num_sample_per_gt=1)
rel_inds = self.get_rel_inds(result.rel_labels, im_inds, boxes)
rois = torch.cat((im_inds[:, None].float(), boxes), 1)
visual_rep = self.visual_rep(result.fmap, rois, rel_inds[:, 1:])
result.obj_fmap = self.obj_feature_map(result.fmap.detach(), rois)
# Now do the approximation WHEREVER THERES A VALID RELATIONSHIP.
result.rm_obj_dists, result.rel_dists = self.message_pass(
F.relu(self.edge_unary(visual_rep)), self.obj_unary(result.obj_fmap), rel_inds[:, 1:])
# result.box_deltas_update = box_deltas
if self.training:
return result
# Decode here ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if self.mode == 'predcls':
# Hack to get the GT object labels
result.obj_scores = result.rm_obj_dists.data.new(gt_classes.size(0)).fill_(1)
result.obj_preds = gt_classes.data[:, 1]
elif self.mode == 'sgdet':
order, obj_scores, obj_preds= filter_det(F.softmax(result.rm_obj_dists),
result.boxes_all,
start_ind=0,
max_per_img=100,
thresh=0.00,
pre_nms_topn=6000,
post_nms_topn=300,
nms_thresh=0.3,
nms_filter_duplicates=True)
idx, perm = torch.sort(order)
result.obj_preds = rel_inds.new(result.rm_obj_dists.size(0)).fill_(1)
result.obj_scores = result.rm_obj_dists.data.new(result.rm_obj_dists.size(0)).fill_(0)
result.obj_scores[idx] = obj_scores.data[perm]
result.obj_preds[idx] = obj_preds.data[perm]
else:
scores_nz = F.softmax(result.rm_obj_dists).data
scores_nz[:, 0] = 0.0
result.obj_scores, score_ord = scores_nz[:, 1:].sort(dim=1, descending=True)
result.obj_preds = score_ord[:,0] + 1
result.obj_scores = result.obj_scores[:,0]
result.obj_preds = Variable(result.obj_preds)
result.obj_scores = Variable(result.obj_scores)
# Set result's bounding boxes to be size
# [num_boxes, topk, 4] instead of considering every single object assignment.
twod_inds = arange(result.obj_preds.data) * self.num_classes + result.obj_preds.data
if self.mode == 'sgdet':
bboxes = result.boxes_all.view(-1, 4)[twod_inds].view(result.boxes_all.size(0), 4)
else:
# Boxes will get fixed by filter_dets function.
bboxes = result.rm_box_priors
rel_rep = F.softmax(result.rel_dists)
return filter_dets(bboxes, result.obj_scores,
result.obj_preds, rel_inds[:, 1:], rel_rep)
| 9,332 | 44.305825 | 109 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/pytorch_misc.py | """
Miscellaneous functions that might be useful for pytorch
"""
import h5py
import numpy as np
import torch
from torch.autograd import Variable
import os
import dill as pkl
from itertools import tee
from torch import nn
def optimistic_restore(network, state_dict):
mismatch = False
own_state = network.state_dict()
for name, param in state_dict.items():
if name not in own_state:
print("Unexpected key {} in state_dict with size {}".format(name, param.size()))
mismatch = True
elif param.size() == own_state[name].size():
own_state[name].copy_(param)
else:
print("Network has {} with size {}, ckpt has {}".format(name,
own_state[name].size(),
param.size()))
mismatch = True
missing = set(own_state.keys()) - set(state_dict.keys())
if len(missing) > 0:
print("We couldn't find {}".format(','.join(missing)))
mismatch = True
return not mismatch
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def get_ranking(predictions, labels, num_guesses=5):
"""
Given a matrix of predictions and labels for the correct ones, get the number of guesses
required to get the prediction right per example.
:param predictions: [batch_size, range_size] predictions
:param labels: [batch_size] array of labels
:param num_guesses: Number of guesses to return
:return:
"""
assert labels.size(0) == predictions.size(0)
assert labels.dim() == 1
assert predictions.dim() == 2
values, full_guesses = predictions.topk(predictions.size(1), dim=1)
_, ranking = full_guesses.topk(full_guesses.size(1), dim=1, largest=False)
gt_ranks = torch.gather(ranking.data, 1, labels.data[:, None]).squeeze()
guesses = full_guesses[:, :num_guesses]
return gt_ranks, guesses
def cache(f):
"""
Caches a computation
"""
def cache_wrapper(fn, *args, **kwargs):
if os.path.exists(fn):
with open(fn, 'rb') as file:
data = pkl.load(file)
else:
print("file {} not found, so rebuilding".format(fn))
data = f(*args, **kwargs)
with open(fn, 'wb') as file:
pkl.dump(data, file)
return data
return cache_wrapper
class Flattener(nn.Module):
def __init__(self):
"""
Flattens last 3 dimensions to make it only batch size, -1
"""
super(Flattener, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
def to_variable(f):
"""
Decorator that pushes all the outputs to a variable
:param f:
:return:
"""
def variable_wrapper(*args, **kwargs):
rez = f(*args, **kwargs)
if isinstance(rez, tuple):
return tuple([Variable(x) for x in rez])
return Variable(rez)
return variable_wrapper
def arange(base_tensor, n=None):
new_size = base_tensor.size(0) if n is None else n
new_vec = base_tensor.new(new_size).long()
torch.arange(0, new_size, out=new_vec)
return new_vec
def to_onehot(vec, num_classes, fill=1000):
"""
Creates a [size, num_classes] torch FloatTensor where
one_hot[i, vec[i]] = fill
:param vec: 1d torch tensor
:param num_classes: int
:param fill: value that we want + and - things to be.
:return:
"""
onehot_result = vec.new(vec.size(0), num_classes).float().fill_(-fill)
arange_inds = vec.new(vec.size(0)).long()
torch.arange(0, vec.size(0), out=arange_inds)
onehot_result.view(-1)[vec + num_classes*arange_inds] = fill
return onehot_result
def save_net(fname, net):
h5f = h5py.File(fname, mode='w')
for k, v in list(net.state_dict().items()):
h5f.create_dataset(k, data=v.cpu().numpy())
def load_net(fname, net):
h5f = h5py.File(fname, mode='r')
for k, v in list(net.state_dict().items()):
param = torch.from_numpy(np.asarray(h5f[k]))
if v.size() != param.size():
print("On k={} desired size is {} but supplied {}".format(k, v.size(), param.size()))
else:
v.copy_(param)
def batch_index_iterator(len_l, batch_size, skip_end=True):
"""
Provides indices that iterate over a list
:param len_l: int representing size of thing that we will
iterate over
:param batch_size: size of each batch
:param skip_end: if true, don't iterate over the last batch
:return: A generator that returns (start, end) tuples
as it goes through all batches
"""
iterate_until = len_l
if skip_end:
iterate_until = (len_l // batch_size) * batch_size
for b_start in range(0, iterate_until, batch_size):
yield (b_start, min(b_start+batch_size, len_l))
def batch_map(f, a, batch_size):
"""
Maps f over the array a in chunks of batch_size.
:param f: function to be applied. Must take in a block of
(batch_size, dim_a) and map it to (batch_size, something).
:param a: Array to be applied over of shape (num_rows, dim_a).
:param batch_size: size of each array
:return: Array of size (num_rows, something).
"""
rez = []
for s, e in batch_index_iterator(a.size(0), batch_size, skip_end=False):
print("Calling on {}".format(a[s:e].size()))
rez.append(f(a[s:e]))
return torch.cat(rez)
def const_row(fill, l, volatile=False):
input_tok = Variable(torch.LongTensor([fill] * l),volatile=volatile)
if torch.cuda.is_available():
input_tok = input_tok.cuda()
return input_tok
def print_para(model):
"""
Prints parameters of a model
:param opt:
:return:
"""
st = {}
strings = []
total_params = 0
for p_name, p in model.named_parameters():
if not ('bias' in p_name.split('.')[-1] or 'bn' in p_name.split('.')[-1]):
st[p_name] = ([str(x) for x in p.size()], np.prod(p.size()), p.requires_grad)
total_params += np.prod(p.size())
for p_name, (size, prod, p_req_grad) in sorted(st.items(), key=lambda x: -x[1][1]):
strings.append("{:<50s}: {:<16s}({:8d}) ({})".format(
p_name, '[{}]'.format(','.join(size)), prod, 'grad' if p_req_grad else ' '
))
return '\n {:.1f}M total parameters \n ----- \n \n{}'.format(total_params / 1000000.0, '\n'.join(strings))
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def nonintersecting_2d_inds(x):
"""
Returns np.array([(a,b) for a in range(x) for b in range(x) if a != b]) efficiently
:param x: Size
:return: a x*(x-1) array that is [(0,1), (0,2)... (0, x-1), (1,0), (1,2), ..., (x-1, x-2)]
"""
rs = 1 - np.diag(np.ones(x, dtype=np.int32))
relations = np.column_stack(np.where(rs))
return relations
def intersect_2d(x1, x2):
"""
Given two arrays [m1, n], [m2,n], returns a [m1, m2] array where each entry is True if those
rows match.
:param x1: [m1, n] numpy array
:param x2: [m2, n] numpy array
:return: [m1, m2] bool array of the intersections
"""
if x1.shape[1] != x2.shape[1]:
raise ValueError("Input arrays must have same #columns")
# This performs a matrix multiplication-esque thing between the two arrays
# Instead of summing, we want the equality, so we reduce in that way
res = (x1[..., None] == x2.T[None, ...]).all(1)
return res
def np_to_variable(x, is_cuda=True, dtype=torch.FloatTensor):
v = Variable(torch.from_numpy(x).type(dtype))
if is_cuda:
v = v.cuda()
return v
def gather_nd(x, index):
"""
:param x: n dimensional tensor [x0, x1, x2, ... x{n-1}, dim]
:param index: [num, n-1] where each row contains the indices we'll use
:return: [num, dim]
"""
nd = x.dim() - 1
assert nd > 0
assert index.dim() == 2
assert index.size(1) == nd
dim = x.size(-1)
sel_inds = index[:,nd-1].clone()
mult_factor = x.size(nd-1)
for col in range(nd-2, -1, -1): # [n-2, n-3, ..., 1, 0]
sel_inds += index[:,col] * mult_factor
mult_factor *= x.size(col)
grouped = x.view(-1, dim)[sel_inds]
return grouped
def enumerate_by_image(im_inds):
im_inds_np = im_inds.cpu().numpy()
initial_ind = int(im_inds_np[0])
s = 0
for i, val in enumerate(im_inds_np):
if val != initial_ind:
yield initial_ind, s, i
initial_ind = int(val)
s = i
yield initial_ind, s, len(im_inds_np)
# num_im = im_inds[-1] + 1
# # print("Num im is {}".format(num_im))
# for i in range(num_im):
# # print("On i={}".format(i))
# inds_i = (im_inds == i).nonzero()
# if inds_i.dim() == 0:
# continue
# inds_i = inds_i.squeeze(1)
# s = inds_i[0]
# e = inds_i[-1] + 1
# # print("On i={} we have s={} e={}".format(i, s, e))
# yield i, s, e
def diagonal_inds(tensor):
"""
Returns the indices required to go along first 2 dims of tensor in diag fashion
:param tensor: thing
:return:
"""
assert tensor.dim() >= 2
assert tensor.size(0) == tensor.size(1)
size = tensor.size(0)
arange_inds = tensor.new(size).long()
torch.arange(0, tensor.size(0), out=arange_inds)
return (size+1)*arange_inds
def enumerate_imsize(im_sizes):
s = 0
for i, (h, w, scale, num_anchors) in enumerate(im_sizes):
na = int(num_anchors)
e = s + na
yield i, s, e, h, w, scale, na
s = e
def argsort_desc(scores):
"""
Returns the indices that sort scores descending in a smart way
:param scores: Numpy array of arbitrary size
:return: an array of size [numel(scores), dim(scores)] where each row is the index you'd
need to get the score.
"""
return np.column_stack(np.unravel_index(np.argsort(-scores.ravel()), scores.shape))
def unravel_index(index, dims):
unraveled = []
index_cp = index.clone()
for d in dims[::-1]:
unraveled.append(index_cp % d)
index_cp /= d
return torch.cat([x[:,None] for x in unraveled[::-1]], 1)
def de_chunkize(tensor, chunks):
s = 0
for c in chunks:
yield tensor[s:(s+c)]
s = s+c
def random_choose(tensor, num):
"randomly choose indices"
num_choose = min(tensor.size(0), num)
if num_choose == tensor.size(0):
return tensor
# Gotta do this in numpy because of https://github.com/pytorch/pytorch/issues/1868
rand_idx = np.random.choice(tensor.size(0), size=num, replace=False)
rand_idx = torch.LongTensor(rand_idx).cuda(tensor.get_device())
chosen = tensor[rand_idx].contiguous()
# rand_values = tensor.new(tensor.size(0)).float().normal_()
# _, idx = torch.sort(rand_values)
#
# chosen = tensor[idx[:num]].contiguous()
return chosen
def transpose_packed_sequence_inds(lengths):
"""
Goes from a TxB packed sequence to a BxT or vice versa. Assumes that nothing is a variable
:param ps: PackedSequence
:return:
"""
new_inds = []
new_lens = []
cum_add = np.cumsum([0] + lengths)
max_len = lengths[0]
length_pointer = len(lengths) - 1
for i in range(max_len):
while length_pointer > 0 and lengths[length_pointer] <= i:
length_pointer -= 1
new_inds.append(cum_add[:(length_pointer+1)].copy())
cum_add[:(length_pointer+1)] += 1
new_lens.append(length_pointer+1)
new_inds = np.concatenate(new_inds, 0)
return new_inds, new_lens
def right_shift_packed_sequence_inds(lengths):
"""
:param lengths: e.g. [2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1]
:return: perm indices for the old stuff (TxB) to shift it right 1 slot so as to accomodate
BOS toks
visual example: of lengths = [4,3,1,1]
before:
a (0) b (4) c (7) d (8)
a (1) b (5)
a (2) b (6)
a (3)
after:
bos a (0) b (4) c (7)
bos a (1)
bos a (2)
bos
"""
cur_ind = 0
inds = []
for (l1, l2) in zip(lengths[:-1], lengths[1:]):
for i in range(l2):
inds.append(cur_ind + i)
cur_ind += l1
return inds
def clip_grad_norm(named_parameters, max_norm, clip=False, verbose=False):
r"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Arguments:
parameters (Iterable[Variable]): an iterable of Variables that will have
gradients normalized
max_norm (float or int): max norm of the gradients
Returns:
Total norm of the parameters (viewed as a single vector).
"""
max_norm = float(max_norm)
total_norm = 0
param_to_norm = {}
param_to_shape = {}
for n, p in named_parameters:
if p.grad is not None:
param_norm = p.grad.data.norm(2)
total_norm += param_norm ** 2
param_to_norm[n] = param_norm
param_to_shape[n] = p.size()
total_norm = total_norm ** (1. / 2)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1 and clip:
for _, p in named_parameters:
if p.grad is not None:
p.grad.data.mul_(clip_coef)
if verbose:
print('---Total norm {:.3f} clip coef {:.3f}-----------------'.format(total_norm, clip_coef))
for name, norm in sorted(param_to_norm.items(), key=lambda x: -x[1]):
print("{:<50s}: {:.3f}, ({})".format(name, norm, param_to_shape[name]))
print('-------------------------------', flush=True)
return total_norm
def update_lr(optimizer, lr=1e-4):
print("------ Learning rate -> {}".format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr | 14,457 | 30.430435 | 110 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/get_dataset_counts.py | """
Get counts of all of the examples in the example_dataset. Used for creating the baseline
dictionary model
"""
import numpy as np
from dataloaders.visual_genome import VG
from lib.fpn.box_intersections_cpu.bbox import bbox_overlaps
from lib.pytorch_misc import nonintersecting_2d_inds
def get_counts(train_data=VG(mode='train', filter_duplicate_rels=False, num_val_im=5000), must_overlap=True):
"""
Get counts of all of the relations. Used for modeling directly P(rel | o1, o2)
:param train_data:
:param must_overlap:
:return:
"""
fg_matrix = np.zeros((
train_data.num_classes,
train_data.num_classes,
train_data.num_predicates,
), dtype=np.int64)
bg_matrix = np.zeros((
train_data.num_classes,
train_data.num_classes,
), dtype=np.int64)
for ex_ind in range(len(train_data)):
gt_classes = train_data.gt_classes[ex_ind].copy()
gt_relations = train_data.relationships[ex_ind].copy()
gt_boxes = train_data.gt_boxes[ex_ind].copy()
# For the foreground, we'll just look at everything
o1o2 = gt_classes[gt_relations[:, :2]]
for (o1, o2), gtr in zip(o1o2, gt_relations[:,2]):
fg_matrix[o1, o2, gtr] += 1
# For the background, get all of the things that overlap.
o1o2_total = gt_classes[np.array(
box_filter(gt_boxes, must_overlap=must_overlap), dtype=int)]
for (o1, o2) in o1o2_total:
bg_matrix[o1, o2] += 1
return fg_matrix, bg_matrix
def box_filter(boxes, must_overlap=False):
""" Only include boxes that overlap as possible relations.
If no overlapping boxes, use all of them."""
n_cands = boxes.shape[0]
overlaps = bbox_overlaps(boxes.astype(np.float), boxes.astype(np.float)) > 0
np.fill_diagonal(overlaps, 0)
all_possib = np.ones_like(overlaps, dtype=np.bool)
np.fill_diagonal(all_possib, 0)
if must_overlap:
possible_boxes = np.column_stack(np.where(overlaps))
if possible_boxes.size == 0:
possible_boxes = np.column_stack(np.where(all_possib))
else:
possible_boxes = np.column_stack(np.where(all_possib))
return possible_boxes
if __name__ == '__main__':
fg, bg = get_counts(must_overlap=False)
| 2,293 | 31.309859 | 109 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/resnet.py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
from torchvision.models.resnet import model_urls, conv3x3, BasicBlock
from torchvision.models.vgg import vgg16
from config import BATCHNORM_MOMENTUM
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, relu_end=True):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BATCHNORM_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BATCHNORM_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4, momentum=BATCHNORM_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.relu_end = relu_end
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
if self.relu_end:
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BATCHNORM_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1) # HACK
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BATCHNORM_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet_l123():
model = resnet101(pretrained=True)
del model.layer4
del model.avgpool
del model.fc
return model
def resnet_l4(relu_end=True):
model = resnet101(pretrained=True)
l4 = model.layer4
if not relu_end:
l4[-1].relu_end = False
l4[0].conv2.stride = (1, 1)
l4[0].downsample[0].stride = (1, 1)
return l4
def vgg_fc(relu_end=True, linear_end=True):
model = vgg16(pretrained=True)
vfc = model.classifier
del vfc._modules['6'] # Get rid of linear layer
del vfc._modules['5'] # Get rid of linear layer
if not relu_end:
del vfc._modules['4'] # Get rid of linear layer
if not linear_end:
del vfc._modules['3']
return vfc
| 4,805 | 31.693878 | 86 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/rel_model.py | """
Let's get the relationships yo
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
from torch.autograd import Variable
from torch.nn import functional as F
from torch.nn.utils.rnn import PackedSequence
from lib.resnet import resnet_l4
from config import BATCHNORM_MOMENTUM
from lib.fpn.nms.functions.nms import apply_nms
# from lib.decoder_rnn import DecoderRNN, lstm_factory, LockedDropout
from lib.lstm.decoder_rnn import DecoderRNN
from lib.lstm.highway_lstm_cuda.alternating_highway_lstm import AlternatingHighwayLSTM
from lib.fpn.box_utils import bbox_overlaps, center_size
from lib.get_union_boxes import UnionBoxesAndFeats
from lib.fpn.proposal_assignments.rel_assignments import rel_assignments
from lib.object_detector import ObjectDetector, gather_res, load_vgg
from lib.pytorch_misc import transpose_packed_sequence_inds, to_onehot, arange, enumerate_by_image, diagonal_inds, Flattener
from lib.sparse_targets import FrequencyBias
from lib.surgery import filter_dets
from lib.word_vectors import obj_edge_vectors
from lib.fpn.roi_align.functions.roi_align import RoIAlignFunction
import math
def _sort_by_score(im_inds, scores):
"""
We'll sort everything scorewise from Hi->low, BUT we need to keep images together
and sort LSTM from l
:param im_inds: Which im we're on
:param scores: Goodness ranging between [0, 1]. Higher numbers come FIRST
:return: Permutation to put everything in the right order for the LSTM
Inverse permutation
Lengths for the TxB packed sequence.
"""
num_im = im_inds[-1] + 1
rois_per_image = scores.new(num_im)
lengths = []
for i, s, e in enumerate_by_image(im_inds):
rois_per_image[i] = 2 * (s - e) * num_im + i
lengths.append(e - s)
lengths = sorted(lengths, reverse=True)
inds, ls_transposed = transpose_packed_sequence_inds(lengths) # move it to TxB form
inds = torch.LongTensor(inds).cuda(im_inds.get_device())
# ~~~~~~~~~~~~~~~~
# HACKY CODE ALERT!!!
# we're sorting by confidence which is in the range (0,1), but more importantly by longest
# img....
# ~~~~~~~~~~~~~~~~
roi_order = scores - 2 * rois_per_image[im_inds]
_, perm = torch.sort(roi_order, 0, descending=True)
perm = perm[inds]
_, inv_perm = torch.sort(perm)
return perm, inv_perm, ls_transposed
MODES = ('sgdet', 'sgcls', 'predcls')
class LinearizedContext(nn.Module):
"""
Module for computing the object contexts and edge contexts
"""
def __init__(self, classes, rel_classes, mode='sgdet',
embed_dim=200, hidden_dim=256, obj_dim=2048,
nl_obj=2, nl_edge=2, dropout_rate=0.2, order='confidence',
pass_in_obj_feats_to_decoder=True,
pass_in_obj_feats_to_edge=True):
super(LinearizedContext, self).__init__()
self.classes = classes
self.rel_classes = rel_classes
assert mode in MODES
self.mode = mode
self.nl_obj = nl_obj
self.nl_edge = nl_edge
self.embed_dim = embed_dim
self.hidden_dim = hidden_dim
self.obj_dim = obj_dim
self.dropout_rate = dropout_rate
self.pass_in_obj_feats_to_decoder = pass_in_obj_feats_to_decoder
self.pass_in_obj_feats_to_edge = pass_in_obj_feats_to_edge
assert order in ('size', 'confidence', 'random', 'leftright')
self.order = order
# EMBEDDINGS
embed_vecs = obj_edge_vectors(self.classes, wv_dim=self.embed_dim)
self.obj_embed = nn.Embedding(self.num_classes, self.embed_dim)
self.obj_embed.weight.data = embed_vecs.clone()
self.obj_embed2 = nn.Embedding(self.num_classes, self.embed_dim)
self.obj_embed2.weight.data = embed_vecs.clone()
# This probably doesn't help it much
self.pos_embed = nn.Sequential(*[
nn.BatchNorm1d(4, momentum=BATCHNORM_MOMENTUM / 10.0),
nn.Linear(4, 128),
nn.ReLU(inplace=True),
nn.Dropout(0.1),
])
if self.nl_obj > 0:
self.obj_ctx_rnn = AlternatingHighwayLSTM(
input_size=self.obj_dim+self.embed_dim+128,
hidden_size=self.hidden_dim,
num_layers=self.nl_obj,
recurrent_dropout_probability=dropout_rate)
decoder_inputs_dim = self.hidden_dim
if self.pass_in_obj_feats_to_decoder:
decoder_inputs_dim += self.obj_dim + self.embed_dim
self.decoder_rnn = DecoderRNN(self.classes, embed_dim=self.embed_dim,
inputs_dim=decoder_inputs_dim,
hidden_dim=self.hidden_dim,
recurrent_dropout_probability=dropout_rate)
else:
self.decoder_lin = nn.Linear(self.obj_dim + self.embed_dim + 128, self.num_classes)
if self.nl_edge > 0:
input_dim = self.embed_dim
if self.nl_obj > 0:
input_dim += self.hidden_dim
if self.pass_in_obj_feats_to_edge:
input_dim += self.obj_dim
self.edge_ctx_rnn = AlternatingHighwayLSTM(input_size=input_dim,
hidden_size=self.hidden_dim,
num_layers=self.nl_edge,
recurrent_dropout_probability=dropout_rate)
def sort_rois(self, batch_idx, confidence, box_priors):
"""
:param batch_idx: tensor with what index we're on
:param confidence: tensor with confidences between [0,1)
:param boxes: tensor with (x1, y1, x2, y2)
:return: Permutation, inverse permutation, and the lengths transposed (same as _sort_by_score)
"""
cxcywh = center_size(box_priors)
if self.order == 'size':
sizes = cxcywh[:,2] * cxcywh[:, 3]
# sizes = (box_priors[:, 2] - box_priors[:, 0] + 1) * (box_priors[:, 3] - box_priors[:, 1] + 1)
assert sizes.min() > 0.0
scores = sizes / (sizes.max() + 1)
elif self.order == 'confidence':
scores = confidence
elif self.order == 'random':
scores = torch.FloatTensor(np.random.rand(batch_idx.size(0))).cuda(batch_idx.get_device())
elif self.order == 'leftright':
centers = cxcywh[:,0]
scores = centers / (centers.max() + 1)
else:
raise ValueError("invalid mode {}".format(self.order))
return _sort_by_score(batch_idx, scores)
@property
def num_classes(self):
return len(self.classes)
@property
def num_rels(self):
return len(self.rel_classes)
def edge_ctx(self, obj_feats, obj_dists, im_inds, obj_preds, box_priors=None):
"""
Object context and object classification.
:param obj_feats: [num_obj, img_dim + object embedding0 dim]
:param obj_dists: [num_obj, #classes]
:param im_inds: [num_obj] the indices of the images
:return: edge_ctx: [num_obj, #feats] For later!
"""
# Only use hard embeddings
obj_embed2 = self.obj_embed2(obj_preds)
# obj_embed3 = F.softmax(obj_dists, dim=1) @ self.obj_embed3.weight
inp_feats = torch.cat((obj_embed2, obj_feats), 1)
# Sort by the confidence of the maximum detection.
confidence = F.softmax(obj_dists, dim=1).data.view(-1)[
obj_preds.data + arange(obj_preds.data) * self.num_classes]
perm, inv_perm, ls_transposed = self.sort_rois(im_inds.data, confidence, box_priors)
edge_input_packed = PackedSequence(inp_feats[perm], ls_transposed)
edge_reps = self.edge_ctx_rnn(edge_input_packed)[0][0]
# now we're good! unperm
edge_ctx = edge_reps[inv_perm]
return edge_ctx
def obj_ctx(self, obj_feats, obj_dists, im_inds, obj_labels=None, box_priors=None, boxes_per_cls=None):
"""
Object context and object classification.
:param obj_feats: [num_obj, img_dim + object embedding0 dim]
:param obj_dists: [num_obj, #classes]
:param im_inds: [num_obj] the indices of the images
:param obj_labels: [num_obj] the GT labels of the image
:param boxes: [num_obj, 4] boxes. We'll use this for NMS
:return: obj_dists: [num_obj, #classes] new probability distribution.
obj_preds: argmax of that distribution.
obj_final_ctx: [num_obj, #feats] For later!
"""
# Sort by the confidence of the maximum detection.
confidence = F.softmax(obj_dists, dim=1).data[:, 1:].max(1)[0]
perm, inv_perm, ls_transposed = self.sort_rois(im_inds.data, confidence, box_priors)
# Pass object features, sorted by score, into the encoder LSTM
obj_inp_rep = obj_feats[perm].contiguous()
input_packed = PackedSequence(obj_inp_rep, ls_transposed)
encoder_rep = self.obj_ctx_rnn(input_packed)[0][0]
# Decode in order
if self.mode != 'predcls':
decoder_inp = PackedSequence(torch.cat((obj_inp_rep, encoder_rep), 1) if self.pass_in_obj_feats_to_decoder else encoder_rep,
ls_transposed)
obj_dists, obj_preds = self.decoder_rnn(
decoder_inp, #obj_dists[perm],
labels=obj_labels[perm] if obj_labels is not None else None,
boxes_for_nms=boxes_per_cls[perm] if boxes_per_cls is not None else None,
)
obj_preds = obj_preds[inv_perm]
obj_dists = obj_dists[inv_perm]
else:
assert obj_labels is not None
obj_preds = obj_labels
obj_dists = Variable(to_onehot(obj_preds.data, self.num_classes))
encoder_rep = encoder_rep[inv_perm]
return obj_dists, obj_preds, encoder_rep
def forward(self, obj_fmaps, obj_logits, im_inds, obj_labels=None, box_priors=None, boxes_per_cls=None):
"""
Forward pass through the object and edge context
:param obj_priors:
:param obj_fmaps:
:param im_inds:
:param obj_labels:
:param boxes:
:return:
"""
obj_embed = F.softmax(obj_logits, dim=1) @ self.obj_embed.weight
pos_embed = self.pos_embed(Variable(center_size(box_priors)))
obj_pre_rep = torch.cat((obj_fmaps, obj_embed, pos_embed), 1)
if self.nl_obj > 0:
obj_dists2, obj_preds, obj_ctx = self.obj_ctx(
obj_pre_rep,
obj_logits,
im_inds,
obj_labels,
box_priors,
boxes_per_cls,
)
else:
# UNSURE WHAT TO DO HERE
if self.mode == 'predcls':
obj_dists2 = Variable(to_onehot(obj_labels.data, self.num_classes))
else:
obj_dists2 = self.decoder_lin(obj_pre_rep)
if self.mode == 'sgdet' and not self.training:
# NMS here for baseline
probs = F.softmax(obj_dists2, 1)
nms_mask = obj_dists2.data.clone()
nms_mask.zero_()
for c_i in range(1, obj_dists2.size(1)):
scores_ci = probs.data[:, c_i]
boxes_ci = boxes_per_cls.data[:, c_i]
keep = apply_nms(scores_ci, boxes_ci,
pre_nms_topn=scores_ci.size(0), post_nms_topn=scores_ci.size(0),
nms_thresh=0.3)
nms_mask[:, c_i][keep] = 1
obj_preds = Variable(nms_mask * probs.data, volatile=True)[:,1:].max(1)[1] + 1
else:
obj_preds = obj_labels if obj_labels is not None else obj_dists2[:,1:].max(1)[1] + 1
obj_ctx = obj_pre_rep
edge_ctx = None
if self.nl_edge > 0:
edge_ctx = self.edge_ctx(
torch.cat((obj_fmaps, obj_ctx), 1) if self.pass_in_obj_feats_to_edge else obj_ctx,
obj_dists=obj_dists2.detach(), # Was previously obj_logits.
im_inds=im_inds,
obj_preds=obj_preds,
box_priors=box_priors,
)
return obj_dists2, obj_preds, edge_ctx
class RelModel(nn.Module):
"""
RELATIONSHIPS
"""
def __init__(self, classes, rel_classes, mode='sgdet', num_gpus=1, use_vision=True, require_overlap_det=True,
embed_dim=200, hidden_dim=256, pooling_dim=2048,
nl_obj=1, nl_edge=2, use_resnet=False, order='confidence', thresh=0.01,
use_proposals=False, pass_in_obj_feats_to_decoder=True,
pass_in_obj_feats_to_edge=True, rec_dropout=0.0, use_bias=True, use_tanh=True,
limit_vision=True):
"""
:param classes: Object classes
:param rel_classes: Relationship classes. None if were not using rel mode
:param mode: (sgcls, predcls, or sgdet)
:param num_gpus: how many GPUS 2 use
:param use_vision: Whether to use vision in the final product
:param require_overlap_det: Whether two objects must intersect
:param embed_dim: Dimension for all embeddings
:param hidden_dim: LSTM hidden size
:param obj_dim:
"""
super(RelModel, self).__init__()
self.classes = classes
self.rel_classes = rel_classes
self.num_gpus = num_gpus
assert mode in MODES
self.mode = mode
self.pooling_size = 7
self.embed_dim = embed_dim
self.hidden_dim = hidden_dim
self.obj_dim = 2048 if use_resnet else 4096
self.pooling_dim = pooling_dim
self.use_bias = use_bias
self.use_vision = use_vision
self.use_tanh = use_tanh
self.limit_vision=limit_vision
self.require_overlap = require_overlap_det and self.mode == 'sgdet'
self.detector = ObjectDetector(
classes=classes,
mode=('proposals' if use_proposals else 'refinerels') if mode == 'sgdet' else 'gtbox',
use_resnet=use_resnet,
thresh=thresh,
max_per_img=64,
)
self.context = LinearizedContext(self.classes, self.rel_classes, mode=self.mode,
embed_dim=self.embed_dim, hidden_dim=self.hidden_dim,
obj_dim=self.obj_dim,
nl_obj=nl_obj, nl_edge=nl_edge, dropout_rate=rec_dropout,
order=order,
pass_in_obj_feats_to_decoder=pass_in_obj_feats_to_decoder,
pass_in_obj_feats_to_edge=pass_in_obj_feats_to_edge)
# Image Feats (You'll have to disable if you want to turn off the features from here)
self.union_boxes = UnionBoxesAndFeats(pooling_size=self.pooling_size, stride=16,
dim=1024 if use_resnet else 512)
if use_resnet:
self.roi_fmap = nn.Sequential(
resnet_l4(relu_end=False),
nn.AvgPool2d(self.pooling_size),
Flattener(),
)
else:
roi_fmap = [
Flattener(),
load_vgg(use_dropout=False, use_relu=False, use_linear=pooling_dim == 4096, pretrained=False).classifier,
]
if pooling_dim != 4096:
roi_fmap.append(nn.Linear(4096, pooling_dim))
self.roi_fmap = nn.Sequential(*roi_fmap)
self.roi_fmap_obj = load_vgg(pretrained=False).classifier
###################################
self.post_lstm = nn.Linear(self.hidden_dim, self.pooling_dim * 2)
# Initialize to sqrt(1/2n) so that the outputs all have mean 0 and variance 1.
# (Half contribution comes from LSTM, half from embedding.
# In practice the pre-lstm stuff tends to have stdev 0.1 so I multiplied this by 10.
self.post_lstm.weight.data.normal_(0, 10.0 * math.sqrt(1.0 / self.hidden_dim))
self.post_lstm.bias.data.zero_()
if nl_edge == 0:
self.post_emb = nn.Embedding(self.num_classes, self.pooling_dim*2)
self.post_emb.weight.data.normal_(0, math.sqrt(1.0))
self.rel_compress = nn.Linear(self.pooling_dim, self.num_rels, bias=True)
self.rel_compress.weight = torch.nn.init.xavier_normal(self.rel_compress.weight, gain=1.0)
if self.use_bias:
self.freq_bias = FrequencyBias()
@property
def num_classes(self):
return len(self.classes)
@property
def num_rels(self):
return len(self.rel_classes)
def visual_rep(self, features, rois, pair_inds):
"""
Classify the features
:param features: [batch_size, dim, IM_SIZE/4, IM_SIZE/4]
:param rois: [num_rois, 5] array of [img_num, x0, y0, x1, y1].
:param pair_inds inds to use when predicting
:return: score_pred, a [num_rois, num_classes] array
box_pred, a [num_rois, num_classes, 4] array
"""
assert pair_inds.size(1) == 2
uboxes = self.union_boxes(features, rois, pair_inds)
return self.roi_fmap(uboxes)
def get_rel_inds(self, rel_labels, im_inds, box_priors):
# Get the relationship candidates
if self.training:
rel_inds = rel_labels[:, :3].data.clone()
else:
rel_cands = im_inds.data[:, None] == im_inds.data[None]
rel_cands.view(-1)[diagonal_inds(rel_cands)] = 0
# Require overlap for detection
if self.require_overlap:
rel_cands = rel_cands & (bbox_overlaps(box_priors.data,
box_priors.data) > 0)
# if there are fewer then 100 things then we might as well add some?
amt_to_add = 100 - rel_cands.long().sum()
rel_cands = rel_cands.nonzero()
if rel_cands.dim() == 0:
rel_cands = im_inds.data.new(1, 2).fill_(0)
rel_inds = torch.cat((im_inds.data[rel_cands[:, 0]][:, None], rel_cands), 1)
return rel_inds
def obj_feature_map(self, features, rois):
"""
Gets the ROI features
:param features: [batch_size, dim, IM_SIZE/4, IM_SIZE/4] (features at level p2)
:param rois: [num_rois, 5] array of [img_num, x0, y0, x1, y1].
:return: [num_rois, #dim] array
"""
feature_pool = RoIAlignFunction(self.pooling_size, self.pooling_size, spatial_scale=1 / 16)(
features, rois)
return self.roi_fmap_obj(feature_pool.view(rois.size(0), -1))
def forward(self, x, im_sizes, image_offset,
gt_boxes=None, gt_classes=None, gt_rels=None, proposals=None, train_anchor_inds=None,
return_fmap=False):
"""
Forward pass for detection
:param x: Images@[batch_size, 3, IM_SIZE, IM_SIZE]
:param im_sizes: A numpy array of (h, w, scale) for each image.
:param image_offset: Offset onto what image we're on for MGPU training (if single GPU this is 0)
:param gt_boxes:
Training parameters:
:param gt_boxes: [num_gt, 4] GT boxes over the batch.
:param gt_classes: [num_gt, 2] gt boxes where each one is (img_id, class)
:param train_anchor_inds: a [num_train, 2] array of indices for the anchors that will
be used to compute the training loss. Each (img_ind, fpn_idx)
:return: If train:
scores, boxdeltas, labels, boxes, boxtargets, rpnscores, rpnboxes, rellabels
if test:
prob dists, boxes, img inds, maxscores, classes
"""
result = self.detector(x, im_sizes, image_offset, gt_boxes, gt_classes, gt_rels, proposals,
train_anchor_inds, return_fmap=True)
if result.is_none():
return ValueError("heck")
im_inds = result.im_inds - image_offset
boxes = result.rm_box_priors
if self.training and result.rel_labels is None:
assert self.mode == 'sgdet'
result.rel_labels = rel_assignments(im_inds.data, boxes.data, result.rm_obj_labels.data,
gt_boxes.data, gt_classes.data, gt_rels.data,
image_offset, filter_non_overlap=True,
num_sample_per_gt=1)
rel_inds = self.get_rel_inds(result.rel_labels, im_inds, boxes)
rois = torch.cat((im_inds[:, None].float(), boxes), 1)
result.obj_fmap = self.obj_feature_map(result.fmap.detach(), rois)
# Prevent gradients from flowing back into score_fc from elsewhere
result.rm_obj_dists, result.obj_preds, edge_ctx = self.context(
result.obj_fmap,
result.rm_obj_dists.detach(),
im_inds, result.rm_obj_labels if self.training or self.mode == 'predcls' else None,
boxes.data, result.boxes_all)
if edge_ctx is None:
edge_rep = self.post_emb(result.obj_preds)
else:
edge_rep = self.post_lstm(edge_ctx)
# Split into subject and object representations
edge_rep = edge_rep.view(edge_rep.size(0), 2, self.pooling_dim)
subj_rep = edge_rep[:, 0]
obj_rep = edge_rep[:, 1]
prod_rep = subj_rep[rel_inds[:, 1]] * obj_rep[rel_inds[:, 2]]
if self.use_vision:
vr = self.visual_rep(result.fmap.detach(), rois, rel_inds[:, 1:])
if self.limit_vision:
# exact value TBD
prod_rep = torch.cat((prod_rep[:,:2048] * vr[:,:2048], prod_rep[:,2048:]), 1)
else:
prod_rep = prod_rep * vr
if self.use_tanh:
prod_rep = F.tanh(prod_rep)
result.rel_dists = self.rel_compress(prod_rep)
if self.use_bias:
result.rel_dists = result.rel_dists + self.freq_bias.index_with_labels(torch.stack((
result.obj_preds[rel_inds[:, 1]],
result.obj_preds[rel_inds[:, 2]],
), 1))
if self.training:
return result
twod_inds = arange(result.obj_preds.data) * self.num_classes + result.obj_preds.data
result.obj_scores = F.softmax(result.rm_obj_dists, dim=1).view(-1)[twod_inds]
# Bbox regression
if self.mode == 'sgdet':
bboxes = result.boxes_all.view(-1, 4)[twod_inds].view(result.boxes_all.size(0), 4)
else:
# Boxes will get fixed by filter_dets function.
bboxes = result.rm_box_priors
rel_rep = F.softmax(result.rel_dists, dim=1)
return filter_dets(bboxes, result.obj_scores,
result.obj_preds, rel_inds[:, 1:], rel_rep)
def __getitem__(self, batch):
""" Hack to do multi-GPU training"""
batch.scatter()
if self.num_gpus == 1:
return self(*batch[0])
replicas = nn.parallel.replicate(self, devices=list(range(self.num_gpus)))
outputs = nn.parallel.parallel_apply(replicas, [batch[i] for i in range(self.num_gpus)])
if self.training:
return gather_res(outputs, 0, dim=0)
return outputs
| 23,579 | 41.032086 | 136 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/word_vectors.py | """
Adapted from PyTorch's text library.
"""
import array
import os
import zipfile
import six
import torch
from six.moves.urllib.request import urlretrieve
from tqdm import tqdm
from config import DATA_PATH
import sys
def obj_edge_vectors(names, wv_type='glove.6B', wv_dir=DATA_PATH, wv_dim=300):
wv_dict, wv_arr, wv_size = load_word_vectors(wv_dir, wv_type, wv_dim)
vectors = torch.Tensor(len(names), wv_dim)
vectors.normal_(0,1)
for i, token in enumerate(names):
wv_index = wv_dict.get(token, None)
if wv_index is not None:
vectors[i] = wv_arr[wv_index]
else:
# Try the longest word (hopefully won't be a preposition
lw_token = sorted(token.split(' '), key=lambda x: len(x), reverse=True)[0]
print("{} -> {} ".format(token, lw_token))
wv_index = wv_dict.get(lw_token, None)
if wv_index is not None:
vectors[i] = wv_arr[wv_index]
else:
print("fail on {}".format(token))
return vectors
URL = {
'glove.42B': 'http://nlp.stanford.edu/data/glove.42B.300d.zip',
'glove.840B': 'http://nlp.stanford.edu/data/glove.840B.300d.zip',
'glove.twitter.27B': 'http://nlp.stanford.edu/data/glove.twitter.27B.zip',
'glove.6B': 'http://nlp.stanford.edu/data/glove.6B.zip',
}
def load_word_vectors(root, wv_type, dim):
"""Load word vectors from a path, trying .pt, .txt, and .zip extensions."""
if isinstance(dim, int):
dim = str(dim) + 'd'
fname = os.path.join(root, wv_type + '.' + dim)
if os.path.isfile(fname + '.pt'):
fname_pt = fname + '.pt'
print('loading word vectors from', fname_pt)
try:
return torch.load(fname_pt)
except Exception as e:
print("""
Error loading the model from {}
This could be because this code was previously run with one
PyTorch version to generate cached data and is now being
run with another version.
You can try to delete the cached files on disk (this file
and others) and re-running the code
Error message:
---------
{}
""".format(fname_pt, str(e)))
sys.exit(-1)
if os.path.isfile(fname + '.txt'):
fname_txt = fname + '.txt'
cm = open(fname_txt, 'rb')
cm = [line for line in cm]
elif os.path.basename(wv_type) in URL:
url = URL[wv_type]
print('downloading word vectors from {}'.format(url))
filename = os.path.basename(fname)
if not os.path.exists(root):
os.makedirs(root)
with tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:
fname, _ = urlretrieve(url, fname, reporthook=reporthook(t))
with zipfile.ZipFile(fname, "r") as zf:
print('extracting word vectors into {}'.format(root))
zf.extractall(root)
if not os.path.isfile(fname + '.txt'):
raise RuntimeError('no word vectors of requested dimension found')
return load_word_vectors(root, wv_type, dim)
else:
raise RuntimeError('unable to load word vectors')
wv_tokens, wv_arr, wv_size = [], array.array('d'), None
if cm is not None:
for line in tqdm(range(len(cm)), desc="loading word vectors from {}".format(fname_txt)):
entries = cm[line].strip().split(b' ')
word, entries = entries[0], entries[1:]
if wv_size is None:
wv_size = len(entries)
try:
if isinstance(word, six.binary_type):
word = word.decode('utf-8')
except:
print('non-UTF8 token', repr(word), 'ignored')
continue
wv_arr.extend(float(x) for x in entries)
wv_tokens.append(word)
wv_dict = {word: i for i, word in enumerate(wv_tokens)}
wv_arr = torch.Tensor(wv_arr).view(-1, wv_size)
ret = (wv_dict, wv_arr, wv_size)
torch.save(ret, fname + '.pt')
return ret
def reporthook(t):
"""https://github.com/tqdm/tqdm"""
last_b = [0]
def inner(b=1, bsize=1, tsize=None):
"""
b: int, optionala
Number of blocks just transferred [default: 1].
bsize: int, optional
Size of each block (in tqdm units) [default: 1].
tsize: int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
| 4,711 | 34.428571 | 96 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/object_detector.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
from torch.autograd import Variable
from torch.nn import functional as F
from config import ANCHOR_SIZE, ANCHOR_RATIOS, ANCHOR_SCALES
from lib.fpn.generate_anchors import generate_anchors
from lib.fpn.box_utils import bbox_preds, center_size, bbox_overlaps
from lib.fpn.nms.functions.nms import apply_nms
from lib.fpn.proposal_assignments.proposal_assignments_gtbox import proposal_assignments_gtbox
from lib.fpn.proposal_assignments.proposal_assignments_det import proposal_assignments_det
from lib.fpn.roi_align.functions.roi_align import RoIAlignFunction
from lib.pytorch_misc import enumerate_by_image, gather_nd, diagonal_inds, Flattener
from torchvision.models.vgg import vgg16
from torchvision.models.resnet import resnet101
from torch.nn.parallel._functions import Gather
class Result(object):
""" little container class for holding the detection result
od: object detector, rm: rel model"""
def __init__(self, od_obj_dists=None, rm_obj_dists=None,
obj_scores=None, obj_preds=None, obj_fmap=None,
od_box_deltas=None, rm_box_deltas=None,
od_box_targets=None, rm_box_targets=None, od_box_priors=None, rm_box_priors=None,
boxes_assigned=None, boxes_all=None, od_obj_labels=None, rm_obj_labels=None,
rpn_scores=None, rpn_box_deltas=None, rel_labels=None,
im_inds=None, fmap=None, rel_dists=None, rel_inds=None, rel_rep=None):
self.__dict__.update(locals())
del self.__dict__['self']
def is_none(self):
return all([v is None for k, v in self.__dict__.items() if k != 'self'])
def gather_res(outputs, target_device, dim=0):
"""
Assuming the signatures are the same accross results!
"""
out = outputs[0]
args = {field: Gather.apply(target_device, dim, *[getattr(o, field) for o in outputs])
for field, v in out.__dict__.items() if v is not None}
return type(out)(**args)
class ObjectDetector(nn.Module):
"""
Core model for doing object detection + getting the visual features. This could be the first step in
a pipeline. We can provide GT rois or use the RPN (which would then be classification!)
"""
MODES = ('rpntrain', 'gtbox', 'refinerels', 'proposals')
def __init__(self, classes, mode='rpntrain', num_gpus=1, nms_filter_duplicates=True,
max_per_img=64, use_resnet=False, thresh=0.05):
"""
:param classes: Object classes
:param rel_classes: Relationship classes. None if were not using rel mode
:param num_gpus: how many GPUS 2 use
"""
super(ObjectDetector, self).__init__()
if mode not in self.MODES:
raise ValueError("invalid mode")
self.mode = mode
self.classes = classes
self.num_gpus = num_gpus
self.pooling_size = 7
self.nms_filter_duplicates = nms_filter_duplicates
self.max_per_img = max_per_img
self.use_resnet = use_resnet
self.thresh = thresh
if not self.use_resnet:
vgg_model = load_vgg()
self.features = vgg_model.features
self.roi_fmap = vgg_model.classifier
rpn_input_dim = 512
output_dim = 4096
else: # Deprecated
self.features = load_resnet()
self.compress = nn.Sequential(
nn.Conv2d(1024, 256, kernel_size=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(256),
)
self.roi_fmap = nn.Sequential(
nn.Linear(256 * 7 * 7, 2048),
nn.SELU(inplace=True),
nn.AlphaDropout(p=0.05),
nn.Linear(2048, 2048),
nn.SELU(inplace=True),
nn.AlphaDropout(p=0.05),
)
rpn_input_dim = 1024
output_dim = 2048
self.score_fc = nn.Linear(output_dim, self.num_classes)
self.bbox_fc = nn.Linear(output_dim, self.num_classes * 4)
self.rpn_head = RPNHead(dim=512, input_dim=rpn_input_dim)
@property
def num_classes(self):
return len(self.classes)
def feature_map(self, x):
"""
Produces feature map from the input image
:param x: [batch_size, 3, size, size] float32 padded image
:return: Feature maps at 1/16 the original size.
Each one is [batch_size, dim, IM_SIZE/k, IM_SIZE/k].
"""
if not self.use_resnet:
return self.features(x) # Uncomment this for "stanford" setting in which it's frozen: .detach()
x = self.features.conv1(x)
x = self.features.bn1(x)
x = self.features.relu(x)
x = self.features.maxpool(x)
c2 = self.features.layer1(x)
c3 = self.features.layer2(c2)
c4 = self.features.layer3(c3)
return c4
def obj_feature_map(self, features, rois):
"""
Gets the ROI features
:param features: [batch_size, dim, IM_SIZE/4, IM_SIZE/4] (features at level p2)
:param rois: [num_rois, 5] array of [img_num, x0, y0, x1, y1].
:return: [num_rois, #dim] array
"""
feature_pool = RoIAlignFunction(self.pooling_size, self.pooling_size, spatial_scale=1 / 16)(
self.compress(features) if self.use_resnet else features, rois)
return self.roi_fmap(feature_pool.view(rois.size(0), -1))
def rpn_boxes(self, fmap, im_sizes, image_offset, gt_boxes=None, gt_classes=None, gt_rels=None,
train_anchor_inds=None, proposals=None):
"""
Gets boxes from the RPN
:param fmap:
:param im_sizes:
:param image_offset:
:param gt_boxes:
:param gt_classes:
:param gt_rels:
:param train_anchor_inds:
:return:
"""
rpn_feats = self.rpn_head(fmap)
rois = self.rpn_head.roi_proposals(
rpn_feats, im_sizes, nms_thresh=0.7,
pre_nms_topn=12000 if self.training and self.mode == 'rpntrain' else 6000,
post_nms_topn=2000 if self.training and self.mode == 'rpntrain' else 1000,
)
if self.training:
if gt_boxes is None or gt_classes is None or train_anchor_inds is None:
raise ValueError(
"Must supply GT boxes, GT classes, trainanchors when in train mode")
rpn_scores, rpn_box_deltas = self.rpn_head.anchor_preds(rpn_feats, train_anchor_inds,
image_offset)
if gt_rels is not None and self.mode == 'rpntrain':
raise ValueError("Training the object detector and the relationship model with detection"
"at the same time isn't supported")
if self.mode == 'refinerels':
all_rois = Variable(rois)
# Potentially you could add in GT rois if none match
# is_match = (bbox_overlaps(rois[:,1:].contiguous(), gt_boxes.data) > 0.5).long()
# gt_not_matched = (is_match.sum(0) == 0).nonzero()
#
# if gt_not_matched.dim() > 0:
# gt_to_add = torch.cat((gt_classes[:,0,None][gt_not_matched.squeeze(1)].float(),
# gt_boxes[gt_not_matched.squeeze(1)]), 1)
#
# all_rois = torch.cat((all_rois, gt_to_add),0)
# num_gt = gt_to_add.size(0)
labels = None
bbox_targets = None
rel_labels = None
else:
all_rois, labels, bbox_targets = proposal_assignments_det(
rois, gt_boxes.data, gt_classes.data, image_offset, fg_thresh=0.5)
rel_labels = None
else:
all_rois = Variable(rois, volatile=True)
labels = None
bbox_targets = None
rel_labels = None
rpn_box_deltas = None
rpn_scores = None
return all_rois, labels, bbox_targets, rpn_scores, rpn_box_deltas, rel_labels
def gt_boxes(self, fmap, im_sizes, image_offset, gt_boxes=None, gt_classes=None, gt_rels=None,
train_anchor_inds=None, proposals=None):
"""
Gets GT boxes!
:param fmap:
:param im_sizes:
:param image_offset:
:param gt_boxes:
:param gt_classes:
:param gt_rels:
:param train_anchor_inds:
:return:
"""
assert gt_boxes is not None
im_inds = gt_classes[:, 0] - image_offset
rois = torch.cat((im_inds.float()[:, None], gt_boxes), 1)
if gt_rels is not None and self.training:
rois, labels, rel_labels = proposal_assignments_gtbox(
rois.data, gt_boxes.data, gt_classes.data, gt_rels.data, image_offset,
fg_thresh=0.5)
else:
labels = gt_classes[:, 1]
rel_labels = None
return rois, labels, None, None, None, rel_labels
def proposal_boxes(self, fmap, im_sizes, image_offset, gt_boxes=None, gt_classes=None, gt_rels=None,
train_anchor_inds=None, proposals=None):
"""
Gets boxes from the RPN
:param fmap:
:param im_sizes:
:param image_offset:
:param gt_boxes:
:param gt_classes:
:param gt_rels:
:param train_anchor_inds:
:return:
"""
assert proposals is not None
rois = filter_roi_proposals(proposals[:, 2:].data.contiguous(), proposals[:, 1].data.contiguous(),
np.array([2000] * len(im_sizes)),
nms_thresh=0.7,
pre_nms_topn=12000 if self.training and self.mode == 'rpntrain' else 6000,
post_nms_topn=2000 if self.training and self.mode == 'rpntrain' else 1000,
)
if self.training:
all_rois, labels, bbox_targets = proposal_assignments_det(
rois, gt_boxes.data, gt_classes.data, image_offset, fg_thresh=0.5)
# RETRAINING FOR DETECTION HERE.
all_rois = torch.cat((all_rois, Variable(rois)), 0)
else:
all_rois = Variable(rois, volatile=True)
labels = None
bbox_targets = None
rpn_scores = None
rpn_box_deltas = None
rel_labels = None
return all_rois, labels, bbox_targets, rpn_scores, rpn_box_deltas, rel_labels
def get_boxes(self, *args, **kwargs):
if self.mode == 'gtbox':
fn = self.gt_boxes
elif self.mode == 'proposals':
assert kwargs['proposals'] is not None
fn = self.proposal_boxes
else:
fn = self.rpn_boxes
return fn(*args, **kwargs)
def forward(self, x, im_sizes, image_offset,
gt_boxes=None, gt_classes=None, gt_rels=None, proposals=None, train_anchor_inds=None,
return_fmap=False):
"""
Forward pass for detection
:param x: Images@[batch_size, 3, IM_SIZE, IM_SIZE]
:param im_sizes: A numpy array of (h, w, scale) for each image.
:param image_offset: Offset onto what image we're on for MGPU training (if single GPU this is 0)
:param gt_boxes:
Training parameters:
:param gt_boxes: [num_gt, 4] GT boxes over the batch.
:param gt_classes: [num_gt, 2] gt boxes where each one is (img_id, class)
:param proposals: things
:param train_anchor_inds: a [num_train, 2] array of indices for the anchors that will
be used to compute the training loss. Each (img_ind, fpn_idx)
:return: If train:
"""
fmap = self.feature_map(x)
# Get boxes from RPN
rois, obj_labels, bbox_targets, rpn_scores, rpn_box_deltas, rel_labels = \
self.get_boxes(fmap, im_sizes, image_offset, gt_boxes,
gt_classes, gt_rels, train_anchor_inds, proposals=proposals)
# Now classify them
obj_fmap = self.obj_feature_map(fmap, rois)
od_obj_dists = self.score_fc(obj_fmap)
od_box_deltas = self.bbox_fc(obj_fmap).view(
-1, len(self.classes), 4) if self.mode != 'gtbox' else None
od_box_priors = rois[:, 1:]
if (not self.training and not self.mode == 'gtbox') or self.mode in ('proposals', 'refinerels'):
nms_inds, nms_scores, nms_preds, nms_boxes_assign, nms_boxes, nms_imgs = self.nms_boxes(
od_obj_dists,
rois,
od_box_deltas, im_sizes,
)
im_inds = nms_imgs + image_offset
obj_dists = od_obj_dists[nms_inds]
obj_fmap = obj_fmap[nms_inds]
box_deltas = od_box_deltas[nms_inds]
box_priors = nms_boxes[:, 0]
if self.training and not self.mode == 'gtbox':
# NOTE: If we're doing this during training, we need to assign labels here.
pred_to_gtbox = bbox_overlaps(box_priors, gt_boxes).data
pred_to_gtbox[im_inds.data[:, None] != gt_classes.data[None, :, 0]] = 0.0
max_overlaps, argmax_overlaps = pred_to_gtbox.max(1)
rm_obj_labels = gt_classes[:, 1][argmax_overlaps]
rm_obj_labels[max_overlaps < 0.5] = 0
else:
rm_obj_labels = None
else:
im_inds = rois[:, 0].long().contiguous() + image_offset
nms_scores = None
nms_preds = None
nms_boxes_assign = None
nms_boxes = None
box_priors = rois[:, 1:]
rm_obj_labels = obj_labels
box_deltas = od_box_deltas
obj_dists = od_obj_dists
return Result(
od_obj_dists=od_obj_dists,
rm_obj_dists=obj_dists,
obj_scores=nms_scores,
obj_preds=nms_preds,
obj_fmap=obj_fmap,
od_box_deltas=od_box_deltas,
rm_box_deltas=box_deltas,
od_box_targets=bbox_targets,
rm_box_targets=bbox_targets,
od_box_priors=od_box_priors,
rm_box_priors=box_priors,
boxes_assigned=nms_boxes_assign,
boxes_all=nms_boxes,
od_obj_labels=obj_labels,
rm_obj_labels=rm_obj_labels,
rpn_scores=rpn_scores,
rpn_box_deltas=rpn_box_deltas,
rel_labels=rel_labels,
im_inds=im_inds,
fmap=fmap if return_fmap else None,
)
def nms_boxes(self, obj_dists, rois, box_deltas, im_sizes):
"""
Performs NMS on the boxes
:param obj_dists: [#rois, #classes]
:param rois: [#rois, 5]
:param box_deltas: [#rois, #classes, 4]
:param im_sizes: sizes of images
:return
nms_inds [#nms]
nms_scores [#nms]
nms_labels [#nms]
nms_boxes_assign [#nms, 4]
nms_boxes [#nms, #classes, 4]. classid=0 is the box prior.
"""
# Now produce the boxes
# box deltas is (num_rois, num_classes, 4) but rois is only #(num_rois, 4)
boxes = bbox_preds(rois[:, None, 1:].expand_as(box_deltas).contiguous().view(-1, 4),
box_deltas.view(-1, 4)).view(*box_deltas.size())
# Clip the boxes and get the best N dets per image.
inds = rois[:, 0].long().contiguous()
dets = []
for i, s, e in enumerate_by_image(inds.data):
h, w = im_sizes[i, :2]
boxes[s:e, :, 0].data.clamp_(min=0, max=w - 1)
boxes[s:e, :, 1].data.clamp_(min=0, max=h - 1)
boxes[s:e, :, 2].data.clamp_(min=0, max=w - 1)
boxes[s:e, :, 3].data.clamp_(min=0, max=h - 1)
d_filtered = filter_det(
F.softmax(obj_dists[s:e], 1), boxes[s:e], start_ind=s,
nms_filter_duplicates=self.nms_filter_duplicates,
max_per_img=self.max_per_img,
thresh=self.thresh,
)
if d_filtered is not None:
dets.append(d_filtered)
if len(dets) == 0:
print("nothing was detected", flush=True)
return None
nms_inds, nms_scores, nms_labels = [torch.cat(x, 0) for x in zip(*dets)]
twod_inds = nms_inds * boxes.size(1) + nms_labels.data
nms_boxes_assign = boxes.view(-1, 4)[twod_inds]
nms_boxes = torch.cat((rois[:, 1:][nms_inds][:, None], boxes[nms_inds][:, 1:]), 1)
return nms_inds, nms_scores, nms_labels, nms_boxes_assign, nms_boxes, inds[nms_inds]
def __getitem__(self, batch):
""" Hack to do multi-GPU training"""
batch.scatter()
if self.num_gpus == 1:
return self(*batch[0])
replicas = nn.parallel.replicate(self, devices=list(range(self.num_gpus)))
outputs = nn.parallel.parallel_apply(replicas, [batch[i] for i in range(self.num_gpus)])
if any([x.is_none() for x in outputs]):
assert not self.training
return None
return gather_res(outputs, 0, dim=0)
def filter_det(scores, boxes, start_ind=0, max_per_img=100, thresh=0.001, pre_nms_topn=6000,
post_nms_topn=300, nms_thresh=0.3, nms_filter_duplicates=True):
"""
Filters the detections for a single image
:param scores: [num_rois, num_classes]
:param boxes: [num_rois, num_classes, 4]. Assumes the boxes have been clamped
:param max_per_img: Max detections per image
:param thresh: Threshold for calling it a good box
:param nms_filter_duplicates: True if we shouldn't allow for mulitple detections of the
same box (with different labels)
:return: A numpy concatenated array with up to 100 detections/img [num_im, x1, y1, x2, y2, score, cls]
"""
valid_cls = (scores[:, 1:].data.max(0)[0] > thresh).nonzero() + 1
if valid_cls.dim() == 0:
return None
nms_mask = scores.data.clone()
nms_mask.zero_()
for c_i in valid_cls.squeeze(1).cpu():
scores_ci = scores.data[:, c_i]
boxes_ci = boxes.data[:, c_i]
keep = apply_nms(scores_ci, boxes_ci,
pre_nms_topn=pre_nms_topn, post_nms_topn=post_nms_topn,
nms_thresh=nms_thresh)
nms_mask[:, c_i][keep] = 1
dists_all = Variable(nms_mask * scores.data, volatile=True)
if nms_filter_duplicates:
scores_pre, labels_pre = dists_all.data.max(1)
inds_all = scores_pre.nonzero()
assert inds_all.dim() != 0
inds_all = inds_all.squeeze(1)
labels_all = labels_pre[inds_all]
scores_all = scores_pre[inds_all]
else:
nz = nms_mask.nonzero()
assert nz.dim() != 0
inds_all = nz[:, 0]
labels_all = nz[:, 1]
scores_all = scores.data.view(-1)[inds_all * scores.data.size(1) + labels_all]
# dists_all = dists_all[inds_all]
# dists_all[:,0] = 1.0-dists_all.sum(1)
# # Limit to max per image detections
vs, idx = torch.sort(scores_all, dim=0, descending=True)
idx = idx[vs > thresh]
if max_per_img < idx.size(0):
idx = idx[:max_per_img]
inds_all = inds_all[idx] + start_ind
scores_all = Variable(scores_all[idx], volatile=True)
labels_all = Variable(labels_all[idx], volatile=True)
# dists_all = dists_all[idx]
return inds_all, scores_all, labels_all
class RPNHead(nn.Module):
"""
Serves as the class + box outputs for each level in the FPN.
"""
def __init__(self, dim=512, input_dim=1024):
"""
:param aspect_ratios: Aspect ratios for the anchors. NOTE - this can't be changed now
as it depends on other things in the C code...
"""
super(RPNHead, self).__init__()
self.anchor_target_dim = 6
self.stride = 16
self.conv = nn.Sequential(
nn.Conv2d(input_dim, dim, kernel_size=3, padding=1),
nn.ReLU6(inplace=True), # Tensorflow docs use Relu6, so let's use it too....
nn.Conv2d(dim, self.anchor_target_dim * self._A,
kernel_size=1)
)
ans_np = generate_anchors(base_size=ANCHOR_SIZE,
feat_stride=self.stride,
anchor_scales=ANCHOR_SCALES,
anchor_ratios=ANCHOR_RATIOS,
)
self.register_buffer('anchors', torch.FloatTensor(ans_np))
@property
def _A(self):
return len(ANCHOR_RATIOS) * len(ANCHOR_SCALES)
def forward(self, fmap):
"""
Gets the class / noclass predictions over all the scales
:param fmap: [batch_size, dim, IM_SIZE/16, IM_SIZE/16] featuremap
:return: [batch_size, IM_SIZE/16, IM_SIZE/16, A, 6]
"""
rez = self._reshape_channels(self.conv(fmap))
rez = rez.view(rez.size(0), rez.size(1), rez.size(2),
self._A, self.anchor_target_dim)
return rez
def anchor_preds(self, preds, train_anchor_inds, image_offset):
"""
Get predictions for the training indices
:param preds: [batch_size, IM_SIZE/16, IM_SIZE/16, A, 6]
:param train_anchor_inds: [num_train, 4] indices into the predictions
:return: class_preds: [num_train, 2] array of yes/no
box_preds: [num_train, 4] array of predicted boxes
"""
assert train_anchor_inds.size(1) == 4
tai = train_anchor_inds.data.clone()
tai[:, 0] -= image_offset
train_regions = gather_nd(preds, tai)
class_preds = train_regions[:, :2]
box_preds = train_regions[:, 2:]
return class_preds, box_preds
@staticmethod
def _reshape_channels(x):
""" [batch_size, channels, h, w] -> [batch_size, h, w, channels] """
assert x.dim() == 4
batch_size, nc, h, w = x.size()
x_t = x.view(batch_size, nc, -1).transpose(1, 2).contiguous()
x_t = x_t.view(batch_size, h, w, nc)
return x_t
def roi_proposals(self, fmap, im_sizes, nms_thresh=0.7, pre_nms_topn=12000, post_nms_topn=2000):
"""
:param fmap: [batch_size, IM_SIZE/16, IM_SIZE/16, A, 6]
:param im_sizes: [batch_size, 3] numpy array of (h, w, scale)
:return: ROIS: shape [a <=post_nms_topn, 5] array of ROIS.
"""
class_fmap = fmap[:, :, :, :, :2].contiguous()
# GET THE GOOD BOXES AYY LMAO :')
class_preds = F.softmax(class_fmap, 4)[..., 1].data.contiguous()
box_fmap = fmap[:, :, :, :, 2:].data.contiguous()
anchor_stacked = torch.cat([self.anchors[None]] * fmap.size(0), 0)
box_preds = bbox_preds(anchor_stacked.view(-1, 4), box_fmap.view(-1, 4)).view(
*box_fmap.size())
for i, (h, w, scale) in enumerate(im_sizes):
# Zero out all the bad boxes h, w, A, 4
h_end = int(h) // self.stride
w_end = int(w) // self.stride
if h_end < class_preds.size(1):
class_preds[i, h_end:] = -0.01
if w_end < class_preds.size(2):
class_preds[i, :, w_end:] = -0.01
# and clamp the others
box_preds[i, :, :, :, 0].clamp_(min=0, max=w - 1)
box_preds[i, :, :, :, 1].clamp_(min=0, max=h - 1)
box_preds[i, :, :, :, 2].clamp_(min=0, max=w - 1)
box_preds[i, :, :, :, 3].clamp_(min=0, max=h - 1)
sizes = center_size(box_preds.view(-1, 4))
class_preds.view(-1)[(sizes[:, 2] < 4) | (sizes[:, 3] < 4)] = -0.01
return filter_roi_proposals(box_preds.view(-1, 4), class_preds.view(-1),
boxes_per_im=np.array([np.prod(box_preds.size()[1:-1])] * fmap.size(0)),
nms_thresh=nms_thresh,
pre_nms_topn=pre_nms_topn, post_nms_topn=post_nms_topn)
def filter_roi_proposals(box_preds, class_preds, boxes_per_im, nms_thresh=0.7, pre_nms_topn=12000, post_nms_topn=2000):
inds, im_per = apply_nms(
class_preds,
box_preds,
pre_nms_topn=pre_nms_topn,
post_nms_topn=post_nms_topn,
boxes_per_im=boxes_per_im,
nms_thresh=nms_thresh,
)
img_inds = torch.cat([val * torch.ones(i) for val, i in enumerate(im_per)], 0).cuda(
box_preds.get_device())
rois = torch.cat((img_inds[:, None], box_preds[inds]), 1)
return rois
def load_resnet():
model = resnet101(pretrained=True)
del model.layer4
del model.avgpool
del model.fc
return model
def load_vgg(use_dropout=True, use_relu=True, use_linear=True, pretrained=True):
model = vgg16(pretrained=pretrained)
del model.features._modules['30'] # Get rid of the maxpool
del model.classifier._modules['6'] # Get rid of class layer
if not use_dropout:
del model.classifier._modules['5'] # Get rid of dropout
if not use_relu:
del model.classifier._modules['4'] # Get rid of relu activation
if not use_linear:
del model.classifier._modules['3'] # Get rid of linear layer
return model
| 25,429 | 39.11041 | 119 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/get_union_boxes.py | """
credits to https://github.com/ruotianluo/pytorch-faster-rcnn/blob/master/lib/nets/network.py#L91
"""
import torch
from torch.autograd import Variable
from torch.nn import functional as F
from lib.fpn.roi_align.functions.roi_align import RoIAlignFunction
from lib.draw_rectangles.draw_rectangles import draw_union_boxes
import numpy as np
from torch.nn.modules.module import Module
from torch import nn
from config import BATCHNORM_MOMENTUM
class UnionBoxesAndFeats(Module):
def __init__(self, pooling_size=7, stride=16, dim=256, concat=False, use_feats=True):
"""
:param pooling_size: Pool the union boxes to this dimension
:param stride: pixel spacing in the entire image
:param dim: Dimension of the feats
:param concat: Whether to concat (yes) or add (False) the representations
"""
super(UnionBoxesAndFeats, self).__init__()
self.pooling_size = pooling_size
self.stride = stride
self.dim = dim
self.use_feats = use_feats
self.conv = nn.Sequential(
nn.Conv2d(2, dim //2, kernel_size=7, stride=2, padding=3, bias=True),
nn.ReLU(inplace=True),
nn.BatchNorm2d(dim//2, momentum=BATCHNORM_MOMENTUM),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.Conv2d(dim // 2, dim, kernel_size=3, stride=1, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.BatchNorm2d(dim, momentum=BATCHNORM_MOMENTUM),
)
self.concat = concat
def forward(self, fmap, rois, union_inds):
union_pools = union_boxes(fmap, rois, union_inds, pooling_size=self.pooling_size, stride=self.stride)
if not self.use_feats:
return union_pools.detach()
pair_rois = torch.cat((rois[:, 1:][union_inds[:, 0]], rois[:, 1:][union_inds[:, 1]]),1).data.cpu().numpy()
# rects_np = get_rect_features(pair_rois, self.pooling_size*2-1) - 0.5
rects_np = draw_union_boxes(pair_rois, self.pooling_size*4-1) - 0.5
rects = Variable(torch.FloatTensor(rects_np).cuda(fmap.get_device()), volatile=fmap.volatile)
if self.concat:
return torch.cat((union_pools, self.conv(rects)), 1)
return union_pools + self.conv(rects)
def union_boxes(fmap, rois, union_inds, pooling_size=14, stride=16):
"""
:param fmap: (batch_size, d, IM_SIZE/stride, IM_SIZE/stride)
:param rois: (num_rois, 5) with [im_ind, x1, y1, x2, y2]
:param union_inds: (num_urois, 2) with [roi_ind1, roi_ind2]
:param pooling_size: we'll resize to this
:param stride:
:return:
"""
assert union_inds.size(1) == 2
im_inds = rois[:,0][union_inds[:,0]]
assert (im_inds.data == rois.data[:,0][union_inds[:,1]]).sum() == union_inds.size(0)
union_rois = torch.cat((
im_inds[:,None],
torch.min(rois[:, 1:3][union_inds[:, 0]], rois[:, 1:3][union_inds[:, 1]]),
torch.max(rois[:, 3:5][union_inds[:, 0]], rois[:, 3:5][union_inds[:, 1]]),
),1)
# (num_rois, d, pooling_size, pooling_size)
union_pools = RoIAlignFunction(pooling_size, pooling_size,
spatial_scale=1/stride)(fmap, union_rois)
return union_pools
| 3,235 | 39.45 | 114 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/__init__.py | 0 | 0 | 0 | py | |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/sparse_targets.py | from lib.word_vectors import obj_edge_vectors
import torch.nn as nn
import torch
from torch.autograd import Variable
import numpy as np
from config import DATA_PATH
import os
from lib.get_dataset_counts import get_counts
class FrequencyBias(nn.Module):
"""
The goal of this is to provide a simplified way of computing
P(predicate | obj1, obj2, img).
"""
def __init__(self, eps=1e-3):
super(FrequencyBias, self).__init__()
fg_matrix, bg_matrix = get_counts(must_overlap=True)
bg_matrix += 1
fg_matrix[:, :, 0] = bg_matrix
pred_dist = np.log(fg_matrix / fg_matrix.sum(2)[:, :, None] + eps)
self.num_objs = pred_dist.shape[0]
pred_dist = torch.FloatTensor(pred_dist).view(-1, pred_dist.shape[2])
self.obj_baseline = nn.Embedding(pred_dist.size(0), pred_dist.size(1))
self.obj_baseline.weight.data = pred_dist
def index_with_labels(self, labels):
"""
:param labels: [batch_size, 2]
:return:
"""
return self.obj_baseline(labels[:, 0] * self.num_objs + labels[:, 1])
def forward(self, obj_cands0, obj_cands1):
"""
:param obj_cands0: [batch_size, 151] prob distibution over cands.
:param obj_cands1: [batch_size, 151] prob distibution over cands.
:return: [batch_size, #predicates] array, which contains potentials for
each possibility
"""
# [batch_size, 151, 151] repr of the joint distribution
joint_cands = obj_cands0[:, :, None] * obj_cands1[:, None]
# [151, 151, 51] of targets per.
baseline = joint_cands.view(joint_cands.size(0), -1) @ self.obj_baseline.weight
return baseline
| 1,718 | 31.433962 | 87 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/surgery.py | # create predictions from the other stuff
"""
Go from proposals + scores to relationships.
pred-cls: No bbox regression, obj dist is exactly known
sg-cls : No bbox regression
sg-det : Bbox regression
in all cases we'll return:
boxes, objs, rels, pred_scores
"""
import numpy as np
import torch
from lib.pytorch_misc import unravel_index
from lib.fpn.box_utils import bbox_overlaps
# from ad3 import factor_graph as fg
from time import time
def filter_dets(boxes, obj_scores, obj_classes, rel_inds, pred_scores):
"""
Filters detections....
:param boxes: [num_box, topk, 4] if bbox regression else [num_box, 4]
:param obj_scores: [num_box] probabilities for the scores
:param obj_classes: [num_box] class labels for the topk
:param rel_inds: [num_rel, 2] TENSOR consisting of (im_ind0, im_ind1)
:param pred_scores: [topk, topk, num_rel, num_predicates]
:param use_nms: True if use NMS to filter dets.
:return: boxes, objs, rels, pred_scores
"""
if boxes.dim() != 2:
raise ValueError("Boxes needs to be [num_box, 4] but its {}".format(boxes.size()))
num_box = boxes.size(0)
assert obj_scores.size(0) == num_box
assert obj_classes.size() == obj_scores.size()
num_rel = rel_inds.size(0)
assert rel_inds.size(1) == 2
assert pred_scores.size(0) == num_rel
obj_scores0 = obj_scores.data[rel_inds[:,0]]
obj_scores1 = obj_scores.data[rel_inds[:,1]]
pred_scores_max, pred_classes_argmax = pred_scores.data[:,1:].max(1)
pred_classes_argmax = pred_classes_argmax + 1
rel_scores_argmaxed = pred_scores_max * obj_scores0 * obj_scores1
rel_scores_vs, rel_scores_idx = torch.sort(rel_scores_argmaxed.view(-1), dim=0, descending=True)
rels = rel_inds[rel_scores_idx].cpu().numpy()
pred_scores_sorted = pred_scores[rel_scores_idx].data.cpu().numpy()
obj_scores_np = obj_scores.data.cpu().numpy()
objs_np = obj_classes.data.cpu().numpy()
boxes_out = boxes.data.cpu().numpy()
return boxes_out, objs_np, obj_scores_np, rels, pred_scores_sorted
| 2,059 | 33.333333 | 100 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/evaluation/sg_eval_slow.py | # JUST TO CHECK THAT IT IS EXACTLY THE SAME..................................
import numpy as np
from config import MODES
class BasicSceneGraphEvaluator:
def __init__(self, mode):
self.result_dict = {}
self.mode = {'sgdet':'sg_det', 'sgcls':'sg_cls', 'predcls':'pred_cls'}[mode]
self.result_dict = {}
self.result_dict[self.mode + '_recall'] = {20:[], 50:[], 100:[]}
@classmethod
def all_modes(cls):
evaluators = {m: cls(mode=m) for m in MODES}
return evaluators
def evaluate_scene_graph_entry(self, gt_entry, pred_entry, iou_thresh=0.5):
roidb_entry = {
'max_overlaps': np.ones(gt_entry['gt_classes'].shape[0], dtype=np.int64),
'boxes': gt_entry['gt_boxes'],
'gt_relations': gt_entry['gt_relations'],
'gt_classes': gt_entry['gt_classes'],
}
sg_entry = {
'boxes': pred_entry['pred_boxes'],
'relations': pred_entry['pred_rels'],
'obj_scores': pred_entry['obj_scores'],
'rel_scores': pred_entry['rel_scores'],
'pred_classes': pred_entry['pred_classes'],
}
pred_triplets, triplet_boxes = \
eval_relation_recall(sg_entry, roidb_entry,
self.result_dict,
self.mode,
iou_thresh=iou_thresh)
return pred_triplets, triplet_boxes
def save(self, fn):
np.save(fn, self.result_dict)
def print_stats(self):
print('======================' + self.mode + '============================')
for k, v in self.result_dict[self.mode + '_recall'].items():
print('R@%i: %f' % (k, np.mean(v)))
def save(self, fn):
np.save(fn, self.result_dict)
def print_stats(self):
print('======================' + self.mode + '============================')
for k, v in self.result_dict[self.mode + '_recall'].items():
print('R@%i: %f' % (k, np.mean(v)))
def eval_relation_recall(sg_entry,
roidb_entry,
result_dict,
mode,
iou_thresh):
# gt
gt_inds = np.where(roidb_entry['max_overlaps'] == 1)[0]
gt_boxes = roidb_entry['boxes'][gt_inds].copy().astype(float)
num_gt_boxes = gt_boxes.shape[0]
gt_relations = roidb_entry['gt_relations'].copy()
gt_classes = roidb_entry['gt_classes'].copy()
num_gt_relations = gt_relations.shape[0]
if num_gt_relations == 0:
return (None, None)
gt_class_scores = np.ones(num_gt_boxes)
gt_predicate_scores = np.ones(num_gt_relations)
gt_triplets, gt_triplet_boxes, _ = _triplet(gt_relations[:,2],
gt_relations[:,:2],
gt_classes,
gt_boxes,
gt_predicate_scores,
gt_class_scores)
# pred
box_preds = sg_entry['boxes']
num_boxes = box_preds.shape[0]
relations = sg_entry['relations']
classes = sg_entry['pred_classes'].copy()
class_scores = sg_entry['obj_scores'].copy()
num_relations = relations.shape[0]
if mode =='pred_cls':
# if predicate classification task
# use ground truth bounding boxes
assert(num_boxes == num_gt_boxes)
classes = gt_classes
class_scores = gt_class_scores
boxes = gt_boxes
elif mode =='sg_cls':
assert(num_boxes == num_gt_boxes)
# if scene graph classification task
# use gt boxes, but predicted classes
# classes = np.argmax(class_preds, 1)
# class_scores = class_preds.max(axis=1)
boxes = gt_boxes
elif mode =='sg_det':
# if scene graph detection task
# use preicted boxes and predicted classes
# classes = np.argmax(class_preds, 1)
# class_scores = class_preds.max(axis=1)
boxes = box_preds
else:
raise NotImplementedError('Incorrect Mode! %s' % mode)
pred_triplets = np.column_stack((
classes[relations[:, 0]],
relations[:,2],
classes[relations[:, 1]],
))
pred_triplet_boxes = np.column_stack((
boxes[relations[:, 0]],
boxes[relations[:, 1]],
))
relation_scores = np.column_stack((
class_scores[relations[:, 0]],
sg_entry['rel_scores'],
class_scores[relations[:, 1]],
)).prod(1)
sorted_inds = np.argsort(relation_scores)[::-1]
# compue recall
for k in result_dict[mode + '_recall']:
this_k = min(k, num_relations)
keep_inds = sorted_inds[:this_k]
recall = _relation_recall(gt_triplets,
pred_triplets[keep_inds,:],
gt_triplet_boxes,
pred_triplet_boxes[keep_inds,:],
iou_thresh)
result_dict[mode + '_recall'][k].append(recall)
# for visualization
return pred_triplets[sorted_inds, :], pred_triplet_boxes[sorted_inds, :]
def _triplet(predicates, relations, classes, boxes,
predicate_scores, class_scores):
# format predictions into triplets
assert(predicates.shape[0] == relations.shape[0])
num_relations = relations.shape[0]
triplets = np.zeros([num_relations, 3]).astype(np.int32)
triplet_boxes = np.zeros([num_relations, 8]).astype(np.int32)
triplet_scores = np.zeros([num_relations]).astype(np.float32)
for i in range(num_relations):
triplets[i, 1] = predicates[i]
sub_i, obj_i = relations[i,:2]
triplets[i, 0] = classes[sub_i]
triplets[i, 2] = classes[obj_i]
triplet_boxes[i, :4] = boxes[sub_i, :]
triplet_boxes[i, 4:] = boxes[obj_i, :]
# compute triplet score
score = class_scores[sub_i]
score *= class_scores[obj_i]
score *= predicate_scores[i]
triplet_scores[i] = score
return triplets, triplet_boxes, triplet_scores
def _relation_recall(gt_triplets, pred_triplets,
gt_boxes, pred_boxes, iou_thresh):
# compute the R@K metric for a set of predicted triplets
num_gt = gt_triplets.shape[0]
num_correct_pred_gt = 0
for gt, gt_box in zip(gt_triplets, gt_boxes):
keep = np.zeros(pred_triplets.shape[0]).astype(bool)
for i, pred in enumerate(pred_triplets):
if gt[0] == pred[0] and gt[1] == pred[1] and gt[2] == pred[2]:
keep[i] = True
if not np.any(keep):
continue
boxes = pred_boxes[keep,:]
sub_iou = iou(gt_box[:4], boxes[:,:4])
obj_iou = iou(gt_box[4:], boxes[:,4:])
inds = np.intersect1d(np.where(sub_iou >= iou_thresh)[0],
np.where(obj_iou >= iou_thresh)[0])
if inds.size > 0:
num_correct_pred_gt += 1
return float(num_correct_pred_gt) / float(num_gt)
def iou(gt_box, pred_boxes):
# computer Intersection-over-Union between two sets of boxes
ixmin = np.maximum(gt_box[0], pred_boxes[:,0])
iymin = np.maximum(gt_box[1], pred_boxes[:,1])
ixmax = np.minimum(gt_box[2], pred_boxes[:,2])
iymax = np.minimum(gt_box[3], pred_boxes[:,3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((gt_box[2] - gt_box[0] + 1.) * (gt_box[3] - gt_box[1] + 1.) +
(pred_boxes[:, 2] - pred_boxes[:, 0] + 1.) *
(pred_boxes[:, 3] - pred_boxes[:, 1] + 1.) - inters)
overlaps = inters / uni
return overlaps
| 7,743 | 35.018605 | 85 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/evaluation/sg_eval.py | """
Adapted from Danfei Xu. In particular, slow code was removed
"""
import numpy as np
from functools import reduce
from lib.pytorch_misc import intersect_2d, argsort_desc
from lib.fpn.box_intersections_cpu.bbox import bbox_overlaps
from config import MODES
np.set_printoptions(precision=3)
class BasicSceneGraphEvaluator:
def __init__(self, mode, multiple_preds=False):
self.result_dict = {}
self.mode = mode
self.result_dict[self.mode + '_recall'] = {20: [], 50: [], 100: []}
self.multiple_preds = multiple_preds
@classmethod
def all_modes(cls, **kwargs):
evaluators = {m: cls(mode=m, **kwargs) for m in MODES}
return evaluators
@classmethod
def vrd_modes(cls, **kwargs):
evaluators = {m: cls(mode=m, multiple_preds=True, **kwargs) for m in ('preddet', 'phrdet')}
return evaluators
def evaluate_scene_graph_entry(self, gt_entry, pred_scores, viz_dict=None, iou_thresh=0.5):
res = evaluate_from_dict(gt_entry, pred_scores, self.mode, self.result_dict,
viz_dict=viz_dict, iou_thresh=iou_thresh, multiple_preds=self.multiple_preds)
# self.print_stats()
return res
def save(self, fn):
np.save(fn, self.result_dict)
def print_stats(self):
print('======================' + self.mode + '============================')
for k, v in self.result_dict[self.mode + '_recall'].items():
print('R@%i: %f' % (k, np.mean(v)))
def evaluate_from_dict(gt_entry, pred_entry, mode, result_dict, multiple_preds=False,
viz_dict=None, **kwargs):
"""
Shortcut to doing evaluate_recall from dict
:param gt_entry: Dictionary containing gt_relations, gt_boxes, gt_classes
:param pred_entry: Dictionary containing pred_rels, pred_boxes (if detection), pred_classes
:param mode: 'det' or 'cls'
:param result_dict:
:param viz_dict:
:param kwargs:
:return:
"""
gt_rels = gt_entry['gt_relations']
gt_boxes = gt_entry['gt_boxes'].astype(float)
gt_classes = gt_entry['gt_classes']
pred_rel_inds = pred_entry['pred_rel_inds']
rel_scores = pred_entry['rel_scores']
if mode == 'predcls':
pred_boxes = gt_boxes
pred_classes = gt_classes
obj_scores = np.ones(gt_classes.shape[0])
elif mode == 'sgcls':
pred_boxes = gt_boxes
pred_classes = pred_entry['pred_classes']
obj_scores = pred_entry['obj_scores']
elif mode == 'sgdet' or mode == 'phrdet':
pred_boxes = pred_entry['pred_boxes'].astype(float)
pred_classes = pred_entry['pred_classes']
obj_scores = pred_entry['obj_scores']
elif mode == 'preddet':
# Only extract the indices that appear in GT
prc = intersect_2d(pred_rel_inds, gt_rels[:, :2])
if prc.size == 0:
for k in result_dict[mode + '_recall']:
result_dict[mode + '_recall'][k].append(0.0)
return None, None, None
pred_inds_per_gt = prc.argmax(0)
pred_rel_inds = pred_rel_inds[pred_inds_per_gt]
rel_scores = rel_scores[pred_inds_per_gt]
# Now sort the matching ones
rel_scores_sorted = argsort_desc(rel_scores[:,1:])
rel_scores_sorted[:,1] += 1
rel_scores_sorted = np.column_stack((pred_rel_inds[rel_scores_sorted[:,0]], rel_scores_sorted[:,1]))
matches = intersect_2d(rel_scores_sorted, gt_rels)
for k in result_dict[mode + '_recall']:
rec_i = float(matches[:k].any(0).sum()) / float(gt_rels.shape[0])
result_dict[mode + '_recall'][k].append(rec_i)
return None, None, None
else:
raise ValueError('invalid mode')
if multiple_preds:
obj_scores_per_rel = obj_scores[pred_rel_inds].prod(1)
overall_scores = obj_scores_per_rel[:,None] * rel_scores[:,1:]
score_inds = argsort_desc(overall_scores)[:100]
pred_rels = np.column_stack((pred_rel_inds[score_inds[:,0]], score_inds[:,1]+1))
predicate_scores = rel_scores[score_inds[:,0], score_inds[:,1]+1]
else:
pred_rels = np.column_stack((pred_rel_inds, 1+rel_scores[:,1:].argmax(1)))
predicate_scores = rel_scores[:,1:].max(1)
pred_to_gt, pred_5ples, rel_scores = evaluate_recall(
gt_rels, gt_boxes, gt_classes,
pred_rels, pred_boxes, pred_classes,
predicate_scores, obj_scores, phrdet= mode=='phrdet',
**kwargs)
for k in result_dict[mode + '_recall']:
match = reduce(np.union1d, pred_to_gt[:k])
rec_i = float(len(match)) / float(gt_rels.shape[0])
result_dict[mode + '_recall'][k].append(rec_i)
return pred_to_gt, pred_5ples, rel_scores
# print(" ".join(["R@{:2d}: {:.3f}".format(k, v[-1]) for k, v in result_dict[mode + '_recall'].items()]))
# Deal with visualization later
# # Optionally, log things to a separate dictionary
# if viz_dict is not None:
# # Caution: pred scores has changed (we took off the 0 class)
# gt_rels_scores = pred_scores[
# gt_rels[:, 0],
# gt_rels[:, 1],
# gt_rels[:, 2] - 1,
# ]
# # gt_rels_scores_cls = gt_rels_scores * pred_class_scores[
# # gt_rels[:, 0]] * pred_class_scores[gt_rels[:, 1]]
#
# viz_dict[mode + '_pred_rels'] = pred_5ples.tolist()
# viz_dict[mode + '_pred_rels_scores'] = max_pred_scores.tolist()
# viz_dict[mode + '_pred_rels_scores_cls'] = max_rel_scores.tolist()
# viz_dict[mode + '_gt_rels_scores'] = gt_rels_scores.tolist()
# viz_dict[mode + '_gt_rels_scores_cls'] = gt_rels_scores_cls.tolist()
#
# # Serialize pred2gt matching as a list of lists, where each sublist is of the form
# # pred_ind, gt_ind1, gt_ind2, ....
# viz_dict[mode + '_pred2gt_rel'] = pred_to_gt
###########################
def evaluate_recall(gt_rels, gt_boxes, gt_classes,
pred_rels, pred_boxes, pred_classes, rel_scores=None, cls_scores=None,
iou_thresh=0.5, phrdet=False):
"""
Evaluates the recall
:param gt_rels: [#gt_rel, 3] array of GT relations
:param gt_boxes: [#gt_box, 4] array of GT boxes
:param gt_classes: [#gt_box] array of GT classes
:param pred_rels: [#pred_rel, 3] array of pred rels. Assumed these are in sorted order
and refer to IDs in pred classes / pred boxes
(id0, id1, rel)
:param pred_boxes: [#pred_box, 4] array of pred boxes
:param pred_classes: [#pred_box] array of predicted classes for these boxes
:return: pred_to_gt: Matching from predicate to GT
pred_5ples: the predicted (id0, id1, cls0, cls1, rel)
rel_scores: [cls_0score, cls1_score, relscore]
"""
if pred_rels.size == 0:
return [[]], np.zeros((0,5)), np.zeros(0)
num_gt_boxes = gt_boxes.shape[0]
num_gt_relations = gt_rels.shape[0]
assert num_gt_relations != 0
gt_triplets, gt_triplet_boxes, _ = _triplet(gt_rels[:, 2],
gt_rels[:, :2],
gt_classes,
gt_boxes)
num_boxes = pred_boxes.shape[0]
assert pred_rels[:,:2].max() < pred_classes.shape[0]
# Exclude self rels
# assert np.all(pred_rels[:,0] != pred_rels[:,1])
assert np.all(pred_rels[:,2] > 0)
pred_triplets, pred_triplet_boxes, relation_scores = \
_triplet(pred_rels[:,2], pred_rels[:,:2], pred_classes, pred_boxes,
rel_scores, cls_scores)
scores_overall = relation_scores.prod(1)
if not np.all(scores_overall[1:] <= scores_overall[:-1] + 1e-5):
print("Somehow the relations weren't sorted properly: \n{}".format(scores_overall))
# raise ValueError("Somehow the relations werent sorted properly")
# Compute recall. It's most efficient to match once and then do recall after
pred_to_gt = _compute_pred_matches(
gt_triplets,
pred_triplets,
gt_triplet_boxes,
pred_triplet_boxes,
iou_thresh,
phrdet=phrdet,
)
# Contains some extra stuff for visualization. Not needed.
pred_5ples = np.column_stack((
pred_rels[:,:2],
pred_triplets[:, [0, 2, 1]],
))
return pred_to_gt, pred_5ples, relation_scores
def _triplet(predicates, relations, classes, boxes,
predicate_scores=None, class_scores=None):
"""
format predictions into triplets
:param predicates: A 1d numpy array of num_boxes*(num_boxes-1) predicates, corresponding to
each pair of possibilities
:param relations: A (num_boxes*(num_boxes-1), 2) array, where each row represents the boxes
in that relation
:param classes: A (num_boxes) array of the classes for each thing.
:param boxes: A (num_boxes,4) array of the bounding boxes for everything.
:param predicate_scores: A (num_boxes*(num_boxes-1)) array of the scores for each predicate
:param class_scores: A (num_boxes) array of the likelihood for each object.
:return: Triplets: (num_relations, 3) array of class, relation, class
Triplet boxes: (num_relation, 8) array of boxes for the parts
Triplet scores: num_relation array of the scores overall for the triplets
"""
assert (predicates.shape[0] == relations.shape[0])
sub_ob_classes = classes[relations[:, :2]]
triplets = np.column_stack((sub_ob_classes[:, 0], predicates, sub_ob_classes[:, 1]))
triplet_boxes = np.column_stack((boxes[relations[:, 0]], boxes[relations[:, 1]]))
triplet_scores = None
if predicate_scores is not None and class_scores is not None:
triplet_scores = np.column_stack((
class_scores[relations[:, 0]],
class_scores[relations[:, 1]],
predicate_scores,
))
return triplets, triplet_boxes, triplet_scores
def _compute_pred_matches(gt_triplets, pred_triplets,
gt_boxes, pred_boxes, iou_thresh, phrdet=False):
"""
Given a set of predicted triplets, return the list of matching GT's for each of the
given predictions
:param gt_triplets:
:param pred_triplets:
:param gt_boxes:
:param pred_boxes:
:param iou_thresh:
:return:
"""
# This performs a matrix multiplication-esque thing between the two arrays
# Instead of summing, we want the equality, so we reduce in that way
# The rows correspond to GT triplets, columns to pred triplets
keeps = intersect_2d(gt_triplets, pred_triplets)
gt_has_match = keeps.any(1)
pred_to_gt = [[] for x in range(pred_boxes.shape[0])]
for gt_ind, gt_box, keep_inds in zip(np.where(gt_has_match)[0],
gt_boxes[gt_has_match],
keeps[gt_has_match],
):
boxes = pred_boxes[keep_inds]
if phrdet:
# Evaluate where the union box > 0.5
gt_box_union = gt_box.reshape((2, 4))
gt_box_union = np.concatenate((gt_box_union.min(0)[:2], gt_box_union.max(0)[2:]), 0)
box_union = boxes.reshape((-1, 2, 4))
box_union = np.concatenate((box_union.min(1)[:,:2], box_union.max(1)[:,2:]), 1)
inds = bbox_overlaps(gt_box_union[None], box_union)[0] >= iou_thresh
else:
sub_iou = bbox_overlaps(gt_box[None,:4], boxes[:, :4])[0]
obj_iou = bbox_overlaps(gt_box[None,4:], boxes[:, 4:])[0]
inds = (sub_iou >= iou_thresh) & (obj_iou >= iou_thresh)
for i in np.where(keep_inds)[0][inds]:
pred_to_gt[i].append(int(gt_ind))
return pred_to_gt
| 11,883 | 40.698246 | 111 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/evaluation/test_sg_eval.py | # Just some tests so you can be assured that sg_eval.py works the same as the (original) stanford evaluation
import numpy as np
from six.moves import xrange
from dataloaders.visual_genome import VG
from lib.evaluation.sg_eval import evaluate_from_dict
from tqdm import trange
from lib.fpn.box_utils import center_size, point_form
def eval_relation_recall(sg_entry,
roidb_entry,
result_dict,
mode,
iou_thresh):
# gt
gt_inds = np.where(roidb_entry['max_overlaps'] == 1)[0]
gt_boxes = roidb_entry['boxes'][gt_inds].copy().astype(float)
num_gt_boxes = gt_boxes.shape[0]
gt_relations = roidb_entry['gt_relations'].copy()
gt_classes = roidb_entry['gt_classes'].copy()
num_gt_relations = gt_relations.shape[0]
if num_gt_relations == 0:
return (None, None)
gt_class_scores = np.ones(num_gt_boxes)
gt_predicate_scores = np.ones(num_gt_relations)
gt_triplets, gt_triplet_boxes, _ = _triplet(gt_relations[:,2],
gt_relations[:,:2],
gt_classes,
gt_boxes,
gt_predicate_scores,
gt_class_scores)
# pred
box_preds = sg_entry['boxes']
num_boxes = box_preds.shape[0]
predicate_preds = sg_entry['relations']
class_preds = sg_entry['scores']
predicate_preds = predicate_preds.reshape(num_boxes, num_boxes, -1)
# no bg
predicate_preds = predicate_preds[:, :, 1:]
predicates = np.argmax(predicate_preds, 2).ravel() + 1
predicate_scores = predicate_preds.max(axis=2).ravel()
relations = []
keep = []
for i in xrange(num_boxes):
for j in xrange(num_boxes):
if i != j:
keep.append(num_boxes*i + j)
relations.append([i, j])
# take out self relations
predicates = predicates[keep]
predicate_scores = predicate_scores[keep]
relations = np.array(relations)
assert(relations.shape[0] == num_boxes * (num_boxes - 1))
assert(predicates.shape[0] == relations.shape[0])
num_relations = relations.shape[0]
if mode =='predcls':
# if predicate classification task
# use ground truth bounding boxes
assert(num_boxes == num_gt_boxes)
classes = gt_classes
class_scores = gt_class_scores
boxes = gt_boxes
elif mode =='sgcls':
assert(num_boxes == num_gt_boxes)
# if scene graph classification task
# use gt boxes, but predicted classes
classes = np.argmax(class_preds, 1)
class_scores = class_preds.max(axis=1)
boxes = gt_boxes
elif mode =='sgdet':
# if scene graph detection task
# use preicted boxes and predicted classes
classes = np.argmax(class_preds, 1)
class_scores = class_preds.max(axis=1)
boxes = []
for i, c in enumerate(classes):
boxes.append(box_preds[i]) # no bbox regression, c*4:(c+1)*4])
boxes = np.vstack(boxes)
else:
raise NotImplementedError('Incorrect Mode! %s' % mode)
pred_triplets, pred_triplet_boxes, relation_scores = \
_triplet(predicates, relations, classes, boxes,
predicate_scores, class_scores)
sorted_inds = np.argsort(relation_scores)[::-1]
# compue recall
for k in result_dict[mode + '_recall']:
this_k = min(k, num_relations)
keep_inds = sorted_inds[:this_k]
recall = _relation_recall(gt_triplets,
pred_triplets[keep_inds,:],
gt_triplet_boxes,
pred_triplet_boxes[keep_inds,:],
iou_thresh)
result_dict[mode + '_recall'][k].append(recall)
# for visualization
return pred_triplets[sorted_inds, :], pred_triplet_boxes[sorted_inds, :]
def _triplet(predicates, relations, classes, boxes,
predicate_scores, class_scores):
# format predictions into triplets
assert(predicates.shape[0] == relations.shape[0])
num_relations = relations.shape[0]
triplets = np.zeros([num_relations, 3]).astype(np.int32)
triplet_boxes = np.zeros([num_relations, 8]).astype(np.int32)
triplet_scores = np.zeros([num_relations]).astype(np.float32)
for i in xrange(num_relations):
triplets[i, 1] = predicates[i]
sub_i, obj_i = relations[i,:2]
triplets[i, 0] = classes[sub_i]
triplets[i, 2] = classes[obj_i]
triplet_boxes[i, :4] = boxes[sub_i, :]
triplet_boxes[i, 4:] = boxes[obj_i, :]
# compute triplet score
score = class_scores[sub_i]
score *= class_scores[obj_i]
score *= predicate_scores[i]
triplet_scores[i] = score
return triplets, triplet_boxes, triplet_scores
def _relation_recall(gt_triplets, pred_triplets,
gt_boxes, pred_boxes, iou_thresh):
# compute the R@K metric for a set of predicted triplets
num_gt = gt_triplets.shape[0]
num_correct_pred_gt = 0
for gt, gt_box in zip(gt_triplets, gt_boxes):
keep = np.zeros(pred_triplets.shape[0]).astype(bool)
for i, pred in enumerate(pred_triplets):
if gt[0] == pred[0] and gt[1] == pred[1] and gt[2] == pred[2]:
keep[i] = True
if not np.any(keep):
continue
boxes = pred_boxes[keep,:]
sub_iou = iou(gt_box[:4], boxes[:,:4])
obj_iou = iou(gt_box[4:], boxes[:,4:])
inds = np.intersect1d(np.where(sub_iou >= iou_thresh)[0],
np.where(obj_iou >= iou_thresh)[0])
if inds.size > 0:
num_correct_pred_gt += 1
return float(num_correct_pred_gt) / float(num_gt)
def iou(gt_box, pred_boxes):
# computer Intersection-over-Union between two sets of boxes
ixmin = np.maximum(gt_box[0], pred_boxes[:,0])
iymin = np.maximum(gt_box[1], pred_boxes[:,1])
ixmax = np.minimum(gt_box[2], pred_boxes[:,2])
iymax = np.minimum(gt_box[3], pred_boxes[:,3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((gt_box[2] - gt_box[0] + 1.) * (gt_box[3] - gt_box[1] + 1.) +
(pred_boxes[:, 2] - pred_boxes[:, 0] + 1.) *
(pred_boxes[:, 3] - pred_boxes[:, 1] + 1.) - inters)
overlaps = inters / uni
return overlaps
train, val, test = VG.splits()
result_dict_mine = {'sgdet_recall': {20: [], 50: [], 100: []}}
result_dict_theirs = {'sgdet_recall': {20: [], 50: [], 100: []}}
for img_i in trange(len(val)):
gt_entry = {
'gt_classes': val.gt_classes[img_i].copy(),
'gt_relations': val.relationships[img_i].copy(),
'gt_boxes': val.gt_boxes[img_i].copy(),
}
# Use shuffled GT boxes
gt_indices = np.arange(gt_entry['gt_boxes'].shape[0]) #np.random.choice(gt_entry['gt_boxes'].shape[0], 20)
pred_boxes = gt_entry['gt_boxes'][gt_indices]
# Jitter the boxes a bit
pred_boxes = center_size(pred_boxes)
pred_boxes[:,:2] += np.random.rand(pred_boxes.shape[0], 2)*128
pred_boxes[:,2:] *= (1+np.random.randn(pred_boxes.shape[0], 2).clip(-0.1, 0.1))
pred_boxes = point_form(pred_boxes)
obj_scores = np.random.rand(pred_boxes.shape[0])
rels_to_use = np.column_stack(np.where(1 - np.diag(np.ones(pred_boxes.shape[0], dtype=np.int32))))
rel_scores = np.random.rand(min(100, rels_to_use.shape[0]), 51)
rel_scores = rel_scores / rel_scores.sum(1, keepdims=True)
pred_rel_inds = rels_to_use[np.random.choice(rels_to_use.shape[0], rel_scores.shape[0],
replace=False)]
# We must sort by P(o, o, r)
rel_order = np.argsort(-rel_scores[:,1:].max(1) * obj_scores[pred_rel_inds[:,0]] * obj_scores[pred_rel_inds[:,1]])
pred_entry = {
'pred_boxes': pred_boxes,
'pred_classes': gt_entry['gt_classes'][gt_indices], #1+np.random.choice(150, pred_boxes.shape[0], replace=True),
'obj_scores': obj_scores,
'pred_rel_inds': pred_rel_inds[rel_order],
'rel_scores': rel_scores[rel_order],
}
# def check_whether_they_are_the_same(gt_entry, pred_entry):
evaluate_from_dict(gt_entry, pred_entry, 'sgdet', result_dict_mine, multiple_preds=False,
viz_dict=None)
#########################
predicate_scores_theirs = np.zeros((pred_boxes.shape[0], pred_boxes.shape[0], 51), dtype=np.float64)
for (o1, o2), s in zip(pred_entry['pred_rel_inds'], pred_entry['rel_scores']):
predicate_scores_theirs[o1, o2] = s
obj_scores_theirs = np.zeros((obj_scores.shape[0], 151), dtype=np.float64)
obj_scores_theirs[np.arange(obj_scores.shape[0]), pred_entry['pred_classes']] = obj_scores
sg_entry_orig_format = {
'boxes': pred_entry['pred_boxes'],
# 'gt_classes': gt_entry['gt_classes'],
# 'gt_relations': gt_entry['gt_relations'],
'relations': predicate_scores_theirs,
'scores': obj_scores_theirs
}
roidb_entry = {
'max_overlaps': np.concatenate((np.ones(gt_entry['gt_boxes'].shape[0]), np.zeros(pred_entry['pred_boxes'].shape[0])), 0),
'boxes': np.concatenate((gt_entry['gt_boxes'], pred_entry['pred_boxes']), 0),
'gt_classes': gt_entry['gt_classes'],
'gt_relations': gt_entry['gt_relations'],
}
eval_relation_recall(sg_entry_orig_format, roidb_entry, result_dict_theirs, 'sgdet', iou_thresh=0.5)
my_results = np.array(result_dict_mine['sgdet_recall'][20])
their_results = np.array(result_dict_theirs['sgdet_recall'][20])
assert np.all(my_results == their_results) | 9,840 | 39.004065 | 129 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/evaluation/__init__.py | 0 | 0 | 0 | py | |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/evaluation/sg_eval_all_rel_cates.py | """
Adapted from Danfei Xu. In particular, slow code was removed
"""
import numpy as np
from functools import reduce
from lib.pytorch_misc import intersect_2d, argsort_desc
from lib.fpn.box_intersections_cpu.bbox import bbox_overlaps
from config import MODES
import sys
np.set_printoptions(precision=3)
class BasicSceneGraphEvaluator:
def __init__(self, mode, multiple_preds=False):
self.result_dict = {}
self.mode = mode
rel_cats = {
0: 'all_rel_cates',
1: "above",
2: "across",
3: "against",
4: "along",
5: "and",
6: "at",
7: "attached to",
8: "behind",
9: "belonging to",
10: "between",
11: "carrying",
12: "covered in",
13: "covering",
14: "eating",
15: "flying in",
16: "for",
17: "from",
18: "growing on",
19: "hanging from",
20: "has",
21: "holding",
22: "in",
23: "in front of",
24: "laying on",
25: "looking at",
26: "lying on",
27: "made of",
28: "mounted on",
29: "near",
30: "of",
31: "on",
32: "on back of",
33: "over",
34: "painted on",
35: "parked on",
36: "part of",
37: "playing",
38: "riding",
39: "says",
40: "sitting on",
41: "standing on",
42: "to",
43: "under",
44: "using",
45: "walking in",
46: "walking on",
47: "watching",
48: "wearing",
49: "wears",
50: "with"
}
self.rel_cats = rel_cats
self.result_dict[self.mode + '_recall'] = {20: {}, 50: {}, 100: []}
for key, value in self.result_dict[self.mode + '_recall'].items():
self.result_dict[self.mode + '_recall'][key] = {}
for rel_cat_id, rel_cat_name in rel_cats.items():
self.result_dict[self.mode + '_recall'][key][rel_cat_name] = []
self.multiple_preds = multiple_preds
@classmethod
def all_modes(cls, **kwargs):
evaluators = {m: cls(mode=m, **kwargs) for m in MODES}
return evaluators
@classmethod
def vrd_modes(cls, **kwargs):
evaluators = {m: cls(mode=m, multiple_preds=True, **kwargs) for m in ('preddet', 'phrdet')}
return evaluators
def evaluate_scene_graph_entry(self, gt_entry, pred_scores, viz_dict=None, iou_thresh=0.5):
res = evaluate_from_dict(gt_entry, pred_scores, self.mode, self.result_dict,
viz_dict=viz_dict, iou_thresh=iou_thresh, multiple_preds=self.multiple_preds, rel_cats=self.rel_cats)
# self.print_stats()
return res
def save(self, fn):
np.save(fn, self.result_dict)
def print_stats(self):
print('======================' + self.mode + '============================')
for k, v in self.result_dict[self.mode + '_recall'].items():
for rel_cat_id, rel_cat_name in self.rel_cats.items():
print('R@%i: %f' % (k, np.mean(v[rel_cat_name])), rel_cat_name)
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
def evaluate_from_dict(gt_entry, pred_entry, mode, result_dict, multiple_preds=False,
viz_dict=None, rel_cats=None, **kwargs):
"""
Shortcut to doing evaluate_recall from dict
:param gt_entry: Dictionary containing gt_relations, gt_boxes, gt_classes
:param pred_entry: Dictionary containing pred_rels, pred_boxes (if detection), pred_classes
:param mode: 'det' or 'cls'
:param result_dict:
:param viz_dict:
:param kwargs:
:return:
"""
gt_rels = gt_entry['gt_relations']
gt_boxes = gt_entry['gt_boxes'].astype(float)
gt_classes = gt_entry['gt_classes']
gt_rels_nums = [0 for x in range(len(rel_cats))]
for rel in gt_rels:
gt_rels_nums[rel[2]] += 1
gt_rels_nums[0] += 1
pred_rel_inds = pred_entry['pred_rel_inds']
rel_scores = pred_entry['rel_scores']
if mode == 'predcls':
pred_boxes = gt_boxes
pred_classes = gt_classes
obj_scores = np.ones(gt_classes.shape[0])
elif mode == 'sgcls':
pred_boxes = gt_boxes
pred_classes = pred_entry['pred_classes']
obj_scores = pred_entry['obj_scores']
elif mode == 'sgdet' or mode == 'phrdet':
pred_boxes = pred_entry['pred_boxes'].astype(float)
pred_classes = pred_entry['pred_classes']
obj_scores = pred_entry['obj_scores']
elif mode == 'preddet':
# Only extract the indices that appear in GT
prc = intersect_2d(pred_rel_inds, gt_rels[:, :2])
if prc.size == 0:
for k in result_dict[mode + '_recall']:
result_dict[mode + '_recall'][k].append(0.0)
return None, None, None
pred_inds_per_gt = prc.argmax(0)
pred_rel_inds = pred_rel_inds[pred_inds_per_gt]
rel_scores = rel_scores[pred_inds_per_gt]
# Now sort the matching ones
rel_scores_sorted = argsort_desc(rel_scores[:,1:])
rel_scores_sorted[:,1] += 1
rel_scores_sorted = np.column_stack((pred_rel_inds[rel_scores_sorted[:,0]], rel_scores_sorted[:,1]))
matches = intersect_2d(rel_scores_sorted, gt_rels)
for k in result_dict[mode + '_recall']:
rec_i = float(matches[:k].any(0).sum()) / float(gt_rels.shape[0])
result_dict[mode + '_recall'][k].append(rec_i)
return None, None, None
else:
raise ValueError('invalid mode')
if multiple_preds:
obj_scores_per_rel = obj_scores[pred_rel_inds].prod(1)
overall_scores = obj_scores_per_rel[:,None] * rel_scores[:,1:]
score_inds = argsort_desc(overall_scores)[:100]
pred_rels = np.column_stack((pred_rel_inds[score_inds[:,0]], score_inds[:,1]+1))
predicate_scores = rel_scores[score_inds[:,0], score_inds[:,1]+1]
else:
pred_rels = np.column_stack((pred_rel_inds, 1+rel_scores[:,1:].argmax(1)))
predicate_scores = rel_scores[:,1:].max(1)
pred_to_gt, pred_5ples, rel_scores = evaluate_recall(
gt_rels, gt_boxes, gt_classes,
pred_rels, pred_boxes, pred_classes,
predicate_scores, obj_scores, phrdet= mode=='phrdet',rel_cats=rel_cats,
**kwargs)
for k in result_dict[mode + '_recall']:
for rel_cat_id, rel_cat_name in rel_cats.items():
match = reduce(np.union1d, pred_to_gt[rel_cat_name][:k])
rec_i = float(len(match)) / (float(gt_rels_nums[rel_cat_id]) + sys.float_info.min) #float(gt_rels.shape[0])
result_dict[mode + '_recall'][k][rel_cat_name].append(rec_i)
return pred_to_gt, pred_5ples, rel_scores
# print(" ".join(["R@{:2d}: {:.3f}".format(k, v[-1]) for k, v in result_dict[mode + '_recall'].items()]))
# Deal with visualization later
# # Optionally, log things to a separate dictionary
# if viz_dict is not None:
# # Caution: pred scores has changed (we took off the 0 class)
# gt_rels_scores = pred_scores[
# gt_rels[:, 0],
# gt_rels[:, 1],
# gt_rels[:, 2] - 1,
# ]
# # gt_rels_scores_cls = gt_rels_scores * pred_class_scores[
# # gt_rels[:, 0]] * pred_class_scores[gt_rels[:, 1]]
#
# viz_dict[mode + '_pred_rels'] = pred_5ples.tolist()
# viz_dict[mode + '_pred_rels_scores'] = max_pred_scores.tolist()
# viz_dict[mode + '_pred_rels_scores_cls'] = max_rel_scores.tolist()
# viz_dict[mode + '_gt_rels_scores'] = gt_rels_scores.tolist()
# viz_dict[mode + '_gt_rels_scores_cls'] = gt_rels_scores_cls.tolist()
#
# # Serialize pred2gt matching as a list of lists, where each sublist is of the form
# # pred_ind, gt_ind1, gt_ind2, ....
# viz_dict[mode + '_pred2gt_rel'] = pred_to_gt
###########################
def evaluate_recall(gt_rels, gt_boxes, gt_classes,
pred_rels, pred_boxes, pred_classes, rel_scores=None, cls_scores=None,
iou_thresh=0.5, phrdet=False, rel_cats=None):
"""
Evaluates the recall
:param gt_rels: [#gt_rel, 3] array of GT relations
:param gt_boxes: [#gt_box, 4] array of GT boxes
:param gt_classes: [#gt_box] array of GT classes
:param pred_rels: [#pred_rel, 3] array of pred rels. Assumed these are in sorted order
and refer to IDs in pred classes / pred boxes
(id0, id1, rel)
:param pred_boxes: [#pred_box, 4] array of pred boxes
:param pred_classes: [#pred_box] array of predicted classes for these boxes
:return: pred_to_gt: Matching from predicate to GT
pred_5ples: the predicted (id0, id1, cls0, cls1, rel)
rel_scores: [cls_0score, cls1_score, relscore]
"""
if pred_rels.size == 0:
return [[]], np.zeros((0,5)), np.zeros(0)
num_gt_boxes = gt_boxes.shape[0]
num_gt_relations = gt_rels.shape[0]
assert num_gt_relations != 0
gt_triplets, gt_triplet_boxes, _ = _triplet(gt_rels[:, 2],
gt_rels[:, :2],
gt_classes,
gt_boxes)
num_boxes = pred_boxes.shape[0]
assert pred_rels[:,:2].max() < pred_classes.shape[0]
# Exclude self rels
# assert np.all(pred_rels[:,0] != pred_rels[:,1])
assert np.all(pred_rels[:,2] > 0)
pred_triplets, pred_triplet_boxes, relation_scores = \
_triplet(pred_rels[:,2], pred_rels[:,:2], pred_classes, pred_boxes,
rel_scores, cls_scores)
scores_overall = relation_scores.prod(1)
if not np.all(scores_overall[1:] <= scores_overall[:-1] + 1e-5):
print("Somehow the relations weren't sorted properly: \n{}".format(scores_overall))
# raise ValueError("Somehow the relations werent sorted properly")
# Compute recall. It's most efficient to match once and then do recall after
pred_to_gt = _compute_pred_matches(
gt_triplets,
pred_triplets,
gt_triplet_boxes,
pred_triplet_boxes,
iou_thresh,
phrdet=phrdet,
rel_cats=rel_cats,
)
# Contains some extra stuff for visualization. Not needed.
pred_5ples = np.column_stack((
pred_rels[:,:2],
pred_triplets[:, [0, 2, 1]],
))
return pred_to_gt, pred_5ples, relation_scores
def _triplet(predicates, relations, classes, boxes,
predicate_scores=None, class_scores=None):
"""
format predictions into triplets
:param predicates: A 1d numpy array of num_boxes*(num_boxes-1) predicates, corresponding to
each pair of possibilities
:param relations: A (num_boxes*(num_boxes-1), 2) array, where each row represents the boxes
in that relation
:param classes: A (num_boxes) array of the classes for each thing.
:param boxes: A (num_boxes,4) array of the bounding boxes for everything.
:param predicate_scores: A (num_boxes*(num_boxes-1)) array of the scores for each predicate
:param class_scores: A (num_boxes) array of the likelihood for each object.
:return: Triplets: (num_relations, 3) array of class, relation, class
Triplet boxes: (num_relation, 8) array of boxes for the parts
Triplet scores: num_relation array of the scores overall for the triplets
"""
assert (predicates.shape[0] == relations.shape[0])
sub_ob_classes = classes[relations[:, :2]]
triplets = np.column_stack((sub_ob_classes[:, 0], predicates, sub_ob_classes[:, 1]))
triplet_boxes = np.column_stack((boxes[relations[:, 0]], boxes[relations[:, 1]]))
triplet_scores = None
if predicate_scores is not None and class_scores is not None:
triplet_scores = np.column_stack((
class_scores[relations[:, 0]],
class_scores[relations[:, 1]],
predicate_scores,
))
return triplets, triplet_boxes, triplet_scores
def _compute_pred_matches(gt_triplets, pred_triplets,
gt_boxes, pred_boxes, iou_thresh, phrdet=False, rel_cats=None):
"""
Given a set of predicted triplets, return the list of matching GT's for each of the
given predictions
:param gt_triplets:
:param pred_triplets:
:param gt_boxes:
:param pred_boxes:
:param iou_thresh:
:return:
"""
# This performs a matrix multiplication-esque thing between the two arrays
# Instead of summing, we want the equality, so we reduce in that way
# The rows correspond to GT triplets, columns to pred triplets
keeps = intersect_2d(gt_triplets, pred_triplets)
gt_has_match = keeps.any(1)
pred_to_gt = {}
for rel_cat_id, rel_cat_name in rel_cats.items():
pred_to_gt[rel_cat_name] = [[] for x in range(pred_boxes.shape[0])]
for gt_ind, gt_box, keep_inds in zip(np.where(gt_has_match)[0],
gt_boxes[gt_has_match],
keeps[gt_has_match],
):
boxes = pred_boxes[keep_inds]
if phrdet:
# Evaluate where the union box > 0.5
gt_box_union = gt_box.reshape((2, 4))
gt_box_union = np.concatenate((gt_box_union.min(0)[:2], gt_box_union.max(0)[2:]), 0)
box_union = boxes.reshape((-1, 2, 4))
box_union = np.concatenate((box_union.min(1)[:,:2], box_union.max(1)[:,2:]), 1)
inds = bbox_overlaps(gt_box_union[None], box_union)[0] >= iou_thresh
else:
sub_iou = bbox_overlaps(gt_box[None,:4], boxes[:, :4])[0]
obj_iou = bbox_overlaps(gt_box[None,4:], boxes[:, 4:])[0]
inds = (sub_iou >= iou_thresh) & (obj_iou >= iou_thresh)
for i in np.where(keep_inds)[0][inds]:
pred_to_gt['all_rel_cates'][i].append(int(gt_ind))
pred_to_gt[rel_cats[gt_triplets[int(gt_ind), 1]]][i].append(int(gt_ind))
return pred_to_gt
| 14,355 | 39.439437 | 135 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/lstm/decoder_rnn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import PackedSequence
from typing import Optional, Tuple
from lib.fpn.box_utils import nms_overlaps
from lib.word_vectors import obj_edge_vectors
from .highway_lstm_cuda.alternating_highway_lstm import block_orthogonal
import numpy as np
def get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.autograd.Variable):
"""
Computes and returns an element-wise dropout mask for a given tensor, where
each element in the mask is dropped out with probability dropout_probability.
Note that the mask is NOT applied to the tensor - the tensor is passed to retain
the correct CUDA tensor type for the mask.
Parameters
----------
dropout_probability : float, required.
Probability of dropping a dimension of the input.
tensor_for_masking : torch.Variable, required.
Returns
-------
A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability).
This scaling ensures expected values and variances of the output of applying this mask
and the original tensor are the same.
"""
binary_mask = tensor_for_masking.clone()
binary_mask.data.copy_(torch.rand(tensor_for_masking.size()) > dropout_probability)
# Scale mask by 1/keep_prob to preserve output statistics.
dropout_mask = binary_mask.float().div(1.0 - dropout_probability)
return dropout_mask
class DecoderRNN(torch.nn.Module):
def __init__(self, classes, embed_dim, inputs_dim, hidden_dim, recurrent_dropout_probability=0.2,
use_highway=True, use_input_projection_bias=True):
"""
Initializes the RNN
:param embed_dim: Dimension of the embeddings
:param encoder_hidden_dim: Hidden dim of the encoder, for attention purposes
:param hidden_dim: Hidden dim of the decoder
:param vocab_size: Number of words in the vocab
:param bos_token: To use during decoding (non teacher forcing mode))
:param bos: beginning of sentence token
:param unk: unknown token (not used)
"""
super(DecoderRNN, self).__init__()
self.classes = classes
embed_vecs = obj_edge_vectors(['start'] + self.classes, wv_dim=100)
self.obj_embed = nn.Embedding(len(self.classes), embed_dim)
self.obj_embed.weight.data = embed_vecs
self.hidden_size = hidden_dim
self.inputs_dim = inputs_dim
self.nms_thresh = 0.3
self.recurrent_dropout_probability=recurrent_dropout_probability
self.use_highway=use_highway
# We do the projections for all the gates all at once, so if we are
# using highway layers, we need some extra projections, which is
# why the sizes of the Linear layers change here depending on this flag.
if use_highway:
self.input_linearity = torch.nn.Linear(self.input_size, 6 * self.hidden_size,
bias=use_input_projection_bias)
self.state_linearity = torch.nn.Linear(self.hidden_size, 5 * self.hidden_size,
bias=True)
else:
self.input_linearity = torch.nn.Linear(self.input_size, 4 * self.hidden_size,
bias=use_input_projection_bias)
self.state_linearity = torch.nn.Linear(self.hidden_size, 4 * self.hidden_size,
bias=True)
self.out = nn.Linear(self.hidden_size, len(self.classes))
self.reset_parameters()
@property
def input_size(self):
return self.inputs_dim + self.obj_embed.weight.size(1)
def reset_parameters(self):
# Use sensible default initializations for parameters.
block_orthogonal(self.input_linearity.weight.data, [self.hidden_size, self.input_size])
block_orthogonal(self.state_linearity.weight.data, [self.hidden_size, self.hidden_size])
self.state_linearity.bias.data.fill_(0.0)
# Initialize forget gate biases to 1.0 as per An Empirical
# Exploration of Recurrent Network Architectures, (Jozefowicz, 2015).
self.state_linearity.bias.data[self.hidden_size:2 * self.hidden_size].fill_(1.0)
def lstm_equations(self, timestep_input, previous_state, previous_memory, dropout_mask=None):
"""
Does the hairy LSTM math
:param timestep_input:
:param previous_state:
:param previous_memory:
:param dropout_mask:
:return:
"""
# Do the projections for all the gates all at once.
projected_input = self.input_linearity(timestep_input)
projected_state = self.state_linearity(previous_state)
# Main LSTM equations using relevant chunks of the big linear
# projections of the hidden state and inputs.
input_gate = torch.sigmoid(projected_input[:, 0 * self.hidden_size:1 * self.hidden_size] +
projected_state[:, 0 * self.hidden_size:1 * self.hidden_size])
forget_gate = torch.sigmoid(projected_input[:, 1 * self.hidden_size:2 * self.hidden_size] +
projected_state[:, 1 * self.hidden_size:2 * self.hidden_size])
memory_init = torch.tanh(projected_input[:, 2 * self.hidden_size:3 * self.hidden_size] +
projected_state[:, 2 * self.hidden_size:3 * self.hidden_size])
output_gate = torch.sigmoid(projected_input[:, 3 * self.hidden_size:4 * self.hidden_size] +
projected_state[:, 3 * self.hidden_size:4 * self.hidden_size])
memory = input_gate * memory_init + forget_gate * previous_memory
timestep_output = output_gate * torch.tanh(memory)
if self.use_highway:
highway_gate = torch.sigmoid(projected_input[:, 4 * self.hidden_size:5 * self.hidden_size] +
projected_state[:, 4 * self.hidden_size:5 * self.hidden_size])
highway_input_projection = projected_input[:, 5 * self.hidden_size:6 * self.hidden_size]
timestep_output = highway_gate * timestep_output + (1 - highway_gate) * highway_input_projection
# Only do dropout if the dropout prob is > 0.0 and we are in training mode.
if dropout_mask is not None and self.training:
timestep_output = timestep_output * dropout_mask
return timestep_output, memory
def forward(self, # pylint: disable=arguments-differ
inputs: PackedSequence,
initial_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
labels=None, boxes_for_nms=None):
"""
Parameters
----------
inputs : PackedSequence, required.
A tensor of shape (batch_size, num_timesteps, input_size)
to apply the LSTM over.
initial_state : Tuple[torch.Tensor, torch.Tensor], optional, (default = None)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM. Each tensor has shape (1, batch_size, output_dimension).
Returns
-------
A PackedSequence containing a torch.FloatTensor of shape
(batch_size, num_timesteps, output_dimension) representing
the outputs of the LSTM per timestep and a tuple containing
the LSTM state, with shape (1, batch_size, hidden_size) to
match the Pytorch API.
"""
if not isinstance(inputs, PackedSequence):
raise ValueError('inputs must be PackedSequence but got %s' % (type(inputs)))
assert isinstance(inputs, PackedSequence)
sequence_tensor, batch_lengths = inputs
batch_size = batch_lengths[0]
# We're just doing an LSTM decoder here so ignore states, etc
if initial_state is None:
previous_memory = Variable(sequence_tensor.data.new()
.resize_(batch_size, self.hidden_size).fill_(0))
previous_state = Variable(sequence_tensor.data.new()
.resize_(batch_size, self.hidden_size).fill_(0))
else:
assert len(initial_state) == 2
previous_state = initial_state[0].squeeze(0)
previous_memory = initial_state[1].squeeze(0)
previous_embed = self.obj_embed.weight[0, None].expand(batch_size, 100)
if self.recurrent_dropout_probability > 0.0:
dropout_mask = get_dropout_mask(self.recurrent_dropout_probability, previous_memory)
else:
dropout_mask = None
# Only accumulating label predictions here, discarding everything else
out_dists = []
out_commitments = []
end_ind = 0
for i, l_batch in enumerate(batch_lengths):
start_ind = end_ind
end_ind = end_ind + l_batch
if previous_memory.size(0) != l_batch:
previous_memory = previous_memory[:l_batch]
previous_state = previous_state[:l_batch]
previous_embed = previous_embed[:l_batch]
if dropout_mask is not None:
dropout_mask = dropout_mask[:l_batch]
timestep_input = torch.cat((sequence_tensor[start_ind:end_ind], previous_embed), 1)
previous_state, previous_memory = self.lstm_equations(timestep_input, previous_state,
previous_memory, dropout_mask=dropout_mask)
pred_dist = self.out(previous_state)
out_dists.append(pred_dist)
if self.training:
labels_to_embed = labels[start_ind:end_ind].clone()
# Whenever labels are 0 set input to be our max prediction
nonzero_pred = pred_dist[:, 1:].max(1)[1] + 1
is_bg = (labels_to_embed.data == 0).nonzero()
if is_bg.dim() > 0:
labels_to_embed[is_bg.squeeze(1)] = nonzero_pred[is_bg.squeeze(1)]
out_commitments.append(labels_to_embed)
previous_embed = self.obj_embed(labels_to_embed+1)
else:
assert l_batch == 1
out_dist_sample = F.softmax(pred_dist, dim=1)
# if boxes_for_nms is not None:
# out_dist_sample[domains_allowed[i] == 0] = 0.0
# Greedily take the max here amongst non-bgs
best_ind = out_dist_sample[:, 1:].max(1)[1] + 1
# if boxes_for_nms is not None and i < boxes_for_nms.size(0):
# best_int = int(best_ind.data[0])
# domains_allowed[i:, best_int] *= (1 - is_overlap[i, i:, best_int])
out_commitments.append(best_ind)
previous_embed = self.obj_embed(best_ind+1)
# Do NMS here as a post-processing step
if boxes_for_nms is not None and not self.training:
is_overlap = nms_overlaps(boxes_for_nms.data).view(
boxes_for_nms.size(0), boxes_for_nms.size(0), boxes_for_nms.size(1)
).cpu().numpy() >= self.nms_thresh
# is_overlap[np.arange(boxes_for_nms.size(0)), np.arange(boxes_for_nms.size(0))] = False
out_dists_sampled = F.softmax(torch.cat(out_dists,0), 1).data.cpu().numpy()
out_dists_sampled[:,0] = 0
out_commitments = out_commitments[0].data.new(len(out_commitments)).fill_(0)
for i in range(out_commitments.size(0)):
box_ind, cls_ind = np.unravel_index(out_dists_sampled.argmax(), out_dists_sampled.shape)
out_commitments[int(box_ind)] = int(cls_ind)
out_dists_sampled[is_overlap[box_ind,:,cls_ind], cls_ind] = 0.0
out_dists_sampled[box_ind] = -1.0 # This way we won't re-sample
out_commitments = Variable(out_commitments)
else:
out_commitments = torch.cat(out_commitments, 0)
return torch.cat(out_dists, 0), out_commitments
| 12,192 | 47.384921 | 109 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/lstm/__init__.py | 0 | 0 | 0 | py | |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/lstm/highway_lstm_cuda/alternating_highway_lstm.py | from typing import Tuple
from overrides import overrides
import torch
from torch.autograd import Function, Variable
from torch.nn import Parameter
from torch.nn.utils.rnn import PackedSequence, pad_packed_sequence, pack_padded_sequence
import itertools
from ._ext import highway_lstm_layer
def block_orthogonal(tensor, split_sizes, gain=1.0):
"""
An initializer which allows initializing model parameters in "blocks". This is helpful
in the case of recurrent cores which use multiple gates applied to linear projections,
which can be computed efficiently if they are concatenated together. However, they are
separate parameters which should be initialized independently.
Parameters
----------
tensor : ``torch.Tensor``, required.
A tensor to initialize.
split_sizes : List[int], required.
A list of length ``tensor.ndim()`` specifying the size of the
blocks along that particular dimension. E.g. ``[10, 20]`` would
result in the tensor being split into chunks of size 10 along the
first dimension and 20 along the second.
gain : float, optional (default = 1.0)
The gain (scaling) applied to the orthogonal initialization.
"""
if isinstance(tensor, Variable):
block_orthogonal(tensor.data, split_sizes, gain)
return tensor
sizes = list(tensor.size())
if any([a % b != 0 for a, b in zip(sizes, split_sizes)]):
raise ValueError("tensor dimensions must be divisible by their respective "
"split_sizes. Found size: {} and split_sizes: {}".format(sizes, split_sizes))
indexes = [list(range(0, max_size, split))
for max_size, split in zip(sizes, split_sizes)]
# Iterate over all possible blocks within the tensor.
for block_start_indices in itertools.product(*indexes):
# A list of tuples containing the index to start at for this block
# and the appropriate step size (i.e split_size[i] for dimension i).
index_and_step_tuples = zip(block_start_indices, split_sizes)
# This is a tuple of slices corresponding to:
# tensor[index: index + step_size, ...]. This is
# required because we could have an arbitrary number
# of dimensions. The actual slices we need are the
# start_index: start_index + step for each dimension in the tensor.
block_slice = tuple([slice(start_index, start_index + step)
for start_index, step in index_and_step_tuples])
# let's not initialize empty things to 0s because THAT SOUNDS REALLY BAD
assert len(block_slice) == 2
sizes = [x.stop - x.start for x in block_slice]
tensor_copy = tensor.new(max(sizes), max(sizes))
torch.nn.init.orthogonal(tensor_copy, gain=gain)
tensor[block_slice] = tensor_copy[0:sizes[0], 0:sizes[1]]
class _AlternatingHighwayLSTMFunction(Function):
def __init__(self, input_size: int, hidden_size: int, num_layers: int, train: bool) -> None:
super(_AlternatingHighwayLSTMFunction, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.train = train
@overrides
def forward(self, # pylint: disable=arguments-differ
inputs: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor,
state_accumulator: torch.Tensor,
memory_accumulator: torch.Tensor,
dropout_mask: torch.Tensor,
lengths: torch.Tensor,
gates: torch.Tensor) -> Tuple[torch.Tensor, None]:
sequence_length, batch_size, input_size = inputs.size()
tmp_i = inputs.new(batch_size, 6 * self.hidden_size)
tmp_h = inputs.new(batch_size, 5 * self.hidden_size)
is_training = 1 if self.train else 0
highway_lstm_layer.highway_lstm_forward_cuda(input_size, # type: ignore # pylint: disable=no-member
self.hidden_size,
batch_size,
self.num_layers,
sequence_length,
inputs,
lengths,
state_accumulator,
memory_accumulator,
tmp_i,
tmp_h,
weight,
bias,
dropout_mask,
gates,
is_training)
self.save_for_backward(inputs, lengths, weight, bias, state_accumulator,
memory_accumulator, dropout_mask, gates)
# The state_accumulator has shape: (num_layers, sequence_length + 1, batch_size, hidden_size)
# so for the output, we want the last layer and all but the first timestep, which was the
# initial state.
output = state_accumulator[-1, 1:, :, :]
return output, state_accumulator[:, 1:, :, :]
@overrides
def backward(self, grad_output, grad_hy): # pylint: disable=arguments-differ
(inputs, lengths, weight, bias, state_accumulator, # pylint: disable=unpacking-non-sequence
memory_accumulator, dropout_mask, gates) = self.saved_tensors
inputs = inputs.contiguous()
sequence_length, batch_size, input_size = inputs.size()
parameters_need_grad = 1 if self.needs_input_grad[1] else 0 # pylint: disable=unsubscriptable-object
grad_input = inputs.new().resize_as_(inputs).zero_()
grad_state_accumulator = inputs.new().resize_as_(state_accumulator).zero_()
grad_memory_accumulator = inputs.new().resize_as_(memory_accumulator).zero_()
grad_weight = inputs.new()
grad_bias = inputs.new()
grad_dropout = None
grad_lengths = None
grad_gates = None
if parameters_need_grad:
grad_weight.resize_as_(weight).zero_()
grad_bias.resize_as_(bias).zero_()
tmp_i_gates_grad = inputs.new().resize_(batch_size, 6 * self.hidden_size).zero_()
tmp_h_gates_grad = inputs.new().resize_(batch_size, 5 * self.hidden_size).zero_()
is_training = 1 if self.train else 0
highway_lstm_layer.highway_lstm_backward_cuda(input_size, # pylint: disable=no-member
self.hidden_size,
batch_size,
self.num_layers,
sequence_length,
grad_output,
lengths,
grad_state_accumulator,
grad_memory_accumulator,
inputs,
state_accumulator,
memory_accumulator,
weight,
gates,
dropout_mask,
tmp_h_gates_grad,
tmp_i_gates_grad,
grad_hy,
grad_input,
grad_weight,
grad_bias,
is_training,
parameters_need_grad)
return (grad_input, grad_weight, grad_bias, grad_state_accumulator,
grad_memory_accumulator, grad_dropout, grad_lengths, grad_gates)
class AlternatingHighwayLSTM(torch.nn.Module):
"""
A stacked LSTM with LSTM layers which alternate between going forwards over
the sequence and going backwards, with highway connections between each of
the alternating layers. This implementation is based on the description in
`Deep Semantic Role Labelling - What works and what's next
<https://homes.cs.washington.edu/~luheng/files/acl2017_hllz.pdf>`_ .
Parameters
----------
input_size : int, required
The dimension of the inputs to the LSTM.
hidden_size : int, required
The dimension of the outputs of the LSTM.
num_layers : int, required
The number of stacked LSTMs to use.
recurrent_dropout_probability: float, optional (default = 0.0)
The dropout probability to be used in a dropout scheme as stated in
`A Theoretically Grounded Application of Dropout in Recurrent Neural Networks
<https://arxiv.org/abs/1512.05287>`_ .
Returns
-------
output : PackedSequence
The outputs of the interleaved LSTMs per timestep. A tensor of shape
(batch_size, max_timesteps, hidden_size) where for a given batch
element, all outputs past the sequence length for that batch are
zero tensors.
"""
def __init__(self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
recurrent_dropout_probability: float = 0) -> None:
super(AlternatingHighwayLSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.recurrent_dropout_probability = recurrent_dropout_probability
self.training = True
# Input dimensions consider the fact that we do
# all of the LSTM projections (and highway parts)
# in a single matrix multiplication.
input_projection_size = 6 * hidden_size
state_projection_size = 5 * hidden_size
bias_size = 5 * hidden_size
# Here we are creating a single weight and bias with the
# parameters for all layers unfolded into it. This is necessary
# because unpacking and re-packing the weights inside the
# kernel would be slow, as it would happen every time it is called.
total_weight_size = 0
total_bias_size = 0
for layer in range(num_layers):
layer_input_size = input_size if layer == 0 else hidden_size
input_weights = input_projection_size * layer_input_size
state_weights = state_projection_size * hidden_size
total_weight_size += input_weights + state_weights
total_bias_size += bias_size
self.weight = Parameter(torch.FloatTensor(total_weight_size))
self.bias = Parameter(torch.FloatTensor(total_bias_size))
self.reset_parameters()
def reset_parameters(self) -> None:
self.bias.data.zero_()
weight_index = 0
bias_index = 0
for i in range(self.num_layers):
input_size = self.input_size if i == 0 else self.hidden_size
# Create a tensor of the right size and initialize it.
init_tensor = self.weight.data.new(input_size, self.hidden_size * 6).zero_()
block_orthogonal(init_tensor, [input_size, self.hidden_size])
# Copy it into the flat weight.
self.weight.data[weight_index: weight_index + init_tensor.nelement()] \
.view_as(init_tensor).copy_(init_tensor)
weight_index += init_tensor.nelement()
# Same for the recurrent connection weight.
init_tensor = self.weight.data.new(self.hidden_size, self.hidden_size * 5).zero_()
block_orthogonal(init_tensor, [self.hidden_size, self.hidden_size])
self.weight.data[weight_index: weight_index + init_tensor.nelement()] \
.view_as(init_tensor).copy_(init_tensor)
weight_index += init_tensor.nelement()
# Set the forget bias to 1.
self.bias.data[bias_index + self.hidden_size:bias_index + 2 * self.hidden_size].fill_(1)
bias_index += 5 * self.hidden_size
def forward(self, inputs, initial_state=None) -> Tuple[PackedSequence, torch.Tensor]:
"""
Parameters
----------
inputs : ``PackedSequence``, required.
A batch first ``PackedSequence`` to run the stacked LSTM over.
initial_state : Tuple[torch.Tensor, torch.Tensor], optional, (default = None)
Currently, this is ignored.
Returns
-------
output_sequence : ``PackedSequence``
The encoded sequence of shape (batch_size, sequence_length, hidden_size)
final_states: ``torch.Tensor``
The per-layer final (state, memory) states of the LSTM, each with shape
(num_layers, batch_size, hidden_size).
"""
inputs, lengths = pad_packed_sequence(inputs, batch_first=False)
sequence_length, batch_size, _ = inputs.size()
accumulator_shape = [self.num_layers, sequence_length + 1, batch_size, self.hidden_size]
state_accumulator = Variable(inputs.data.new(*accumulator_shape).zero_(), requires_grad=False)
memory_accumulator = Variable(inputs.data.new(*accumulator_shape).zero_(), requires_grad=False)
dropout_weights = inputs.data.new().resize_(self.num_layers, batch_size, self.hidden_size).fill_(1.0)
if self.training:
# Normalize by 1 - dropout_prob to preserve the output statistics of the layer.
dropout_weights.bernoulli_(1 - self.recurrent_dropout_probability) \
.div_((1 - self.recurrent_dropout_probability))
dropout_weights = Variable(dropout_weights, requires_grad=False)
gates = Variable(inputs.data.new().resize_(self.num_layers,
sequence_length,
batch_size, 6 * self.hidden_size))
lengths_variable = Variable(torch.IntTensor(lengths))
implementation = _AlternatingHighwayLSTMFunction(self.input_size,
self.hidden_size,
num_layers=self.num_layers,
train=self.training)
output, _ = implementation(inputs, self.weight, self.bias, state_accumulator,
memory_accumulator, dropout_weights, lengths_variable, gates)
output = pack_padded_sequence(output, lengths, batch_first=False)
return output, None
| 15,176 | 48.924342 | 109 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/lstm/highway_lstm_cuda/__init__.py | 0 | 0 | 0 | py | |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/lstm/highway_lstm_cuda/build.py | # pylint: disable=invalid-name
import os
import torch
from torch.utils.ffi import create_extension
if not torch.cuda.is_available():
raise Exception('HighwayLSTM can only be compiled with CUDA')
sources = ['src/highway_lstm_cuda.c']
headers = ['src/highway_lstm_cuda.h']
defines = [('WITH_CUDA', None)]
with_cuda = True
this_file = os.path.dirname(os.path.realpath(__file__))
extra_objects = ['src/highway_lstm_kernel.cu.o']
extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
ffi = create_extension(
'_ext.highway_lstm_layer',
headers=headers,
sources=sources,
define_macros=defines,
relative_to=__file__,
with_cuda=with_cuda,
extra_objects=extra_objects
)
if __name__ == '__main__':
ffi.build()
| 798 | 25.633333 | 75 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/lstm/highway_lstm_cuda/_ext/__init__.py | 0 | 0 | 0 | py | |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/lstm/highway_lstm_cuda/_ext/highway_lstm_layer/__init__.py |
from torch.utils.ffi import _wrap_function
from ._highway_lstm_layer import lib as _lib, ffi as _ffi
__all__ = []
def _import_symbols(locals):
for symbol in dir(_lib):
fn = getattr(_lib, symbol)
locals[symbol] = _wrap_function(fn, _ffi)
__all__.append(symbol)
_import_symbols(locals())
| 317 | 23.461538 | 57 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/box_utils.py | import torch
import numpy as np
from torch.nn import functional as F
from lib.fpn.box_intersections_cpu.bbox import bbox_overlaps as bbox_overlaps_np
from lib.fpn.box_intersections_cpu.bbox import bbox_intersections as bbox_intersections_np
def bbox_loss(prior_boxes, deltas, gt_boxes, eps=1e-4, scale_before=1):
"""
Computes the loss for predicting the GT boxes from prior boxes
:param prior_boxes: [num_boxes, 4] (x1, y1, x2, y2)
:param deltas: [num_boxes, 4] (tx, ty, th, tw)
:param gt_boxes: [num_boxes, 4] (x1, y1, x2, y2)
:return:
"""
prior_centers = center_size(prior_boxes) #(cx, cy, w, h)
gt_centers = center_size(gt_boxes) #(cx, cy, w, h)
center_targets = (gt_centers[:, :2] - prior_centers[:, :2]) / prior_centers[:, 2:]
size_targets = torch.log(gt_centers[:, 2:]) - torch.log(prior_centers[:, 2:])
all_targets = torch.cat((center_targets, size_targets), 1)
loss = F.smooth_l1_loss(deltas, all_targets, size_average=False)/(eps + prior_centers.size(0))
return loss
def bbox_preds(boxes, deltas):
"""
Converts "deltas" (predicted by the network) along with prior boxes
into (x1, y1, x2, y2) representation.
:param boxes: Prior boxes, represented as (x1, y1, x2, y2)
:param deltas: Offsets (tx, ty, tw, th)
:param box_strides [num_boxes,] distance apart between boxes. anchor box can't go more than
\pm box_strides/2 from its current position. If None then we'll use the widths
and heights
:return: Transformed boxes
"""
if boxes.size(0) == 0:
return boxes
prior_centers = center_size(boxes)
xys = prior_centers[:, :2] + prior_centers[:, 2:] * deltas[:, :2]
whs = torch.exp(deltas[:, 2:]) * prior_centers[:, 2:]
return point_form(torch.cat((xys, whs), 1))
def center_size(boxes):
""" Convert prior_boxes to (cx, cy, w, h)
representation for comparison to center-size form ground truth data.
Args:
boxes: (tensor) point_form boxes
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
wh = boxes[:, 2:] - boxes[:, :2] + 1.0
if isinstance(boxes, np.ndarray):
return np.column_stack((boxes[:, :2] + 0.5 * wh, wh))
return torch.cat((boxes[:, :2] + 0.5 * wh, wh), 1)
def point_form(boxes):
""" Convert prior_boxes to (xmin, ymin, xmax, ymax)
representation for comparison to point form ground truth data.
Args:
boxes: (tensor) center-size default boxes from priorbox layers.
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
if isinstance(boxes, np.ndarray):
return np.column_stack((boxes[:, :2] - 0.5 * boxes[:, 2:],
boxes[:, :2] + 0.5 * (boxes[:, 2:] - 2.0)))
return torch.cat((boxes[:, :2] - 0.5 * boxes[:, 2:],
boxes[:, :2] + 0.5 * (boxes[:, 2:] - 2.0)), 1) # xmax, ymax
###########################################################################
### Torch Utils, creds to Max de Groot
###########################################################################
def bbox_intersections(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
"""
if isinstance(box_a, np.ndarray):
assert isinstance(box_b, np.ndarray)
return bbox_intersections_np(box_a, box_b)
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy + 1.0), min=0)
return inter[:, :, 0] * inter[:, :, 1]
def bbox_overlaps(box_a, box_b):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes. Here we operate on
ground truth boxes and default boxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
Return:
jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
"""
if isinstance(box_a, np.ndarray):
assert isinstance(box_b, np.ndarray)
return bbox_overlaps_np(box_a, box_b)
inter = bbox_intersections(box_a, box_b)
area_a = ((box_a[:, 2] - box_a[:, 0] + 1.0) *
(box_a[:, 3] - box_a[:, 1] + 1.0)).unsqueeze(1).expand_as(inter) # [A,B]
area_b = ((box_b[:, 2] - box_b[:, 0] + 1.0) *
(box_b[:, 3] - box_b[:, 1] + 1.0)).unsqueeze(0).expand_as(inter) # [A,B]
union = area_a + area_b - inter
return inter / union # [A,B]
def nms_overlaps(boxes):
""" get overlaps for each channel"""
assert boxes.dim() == 3
N = boxes.size(0)
nc = boxes.size(1)
max_xy = torch.min(boxes[:, None, :, 2:].expand(N, N, nc, 2),
boxes[None, :, :, 2:].expand(N, N, nc, 2))
min_xy = torch.max(boxes[:, None, :, :2].expand(N, N, nc, 2),
boxes[None, :, :, :2].expand(N, N, nc, 2))
inter = torch.clamp((max_xy - min_xy + 1.0), min=0)
# n, n, 151
inters = inter[:,:,:,0]*inter[:,:,:,1]
boxes_flat = boxes.view(-1, 4)
areas_flat = (boxes_flat[:,2]- boxes_flat[:,0]+1.0)*(
boxes_flat[:,3]- boxes_flat[:,1]+1.0)
areas = areas_flat.view(boxes.size(0), boxes.size(1))
union = -inters + areas[None] + areas[:, None]
return inters / union
| 5,965 | 37.24359 | 98 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/generate_anchors.py | # --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
from config import IM_SCALE
import numpy as np
def generate_anchors(base_size=16, feat_stride=16, anchor_scales=(8,16,32), anchor_ratios=(0.5,1,2)):
""" A wrapper function to generate anchors given different scales
Also return the number of anchors in variable 'length'
"""
anchors = generate_base_anchors(base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
A = anchors.shape[0]
shift_x = np.arange(0, IM_SCALE // feat_stride) * feat_stride # Same as shift_x
shift_x, shift_y = np.meshgrid(shift_x, shift_x)
shifts = np.stack([shift_x, shift_y, shift_x, shift_y], -1) # h, w, 4
all_anchors = shifts[:, :, None] + anchors[None, None] #h, w, A, 4
return all_anchors
def generate_base_anchors(base_size=16, ratios=[0.5, 1, 2], scales=2 ** np.arange(3, 6)):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
"""
base_anchor = np.array([1, 1, base_size, base_size]) - 1
ratio_anchors = _ratio_enum(base_anchor, ratios)
anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)
for i in range(ratio_anchors.shape[0])])
return anchors
def _whctrs(anchor):
"""
Return width, height, x center, and y center for an anchor (window).
"""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def _mkanchors(ws, hs, x_ctr, y_ctr):
"""
Given a vector of widths (ws) and heights (hs) around a center
(x_ctr, y_ctr), output a set of anchors (windows).
"""
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack((x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1)))
return anchors
def _ratio_enum(anchor, ratios):
"""
Enumerate a set of anchors for each aspect ratio wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
size = w * h
size_ratios = size / ratios
# NOTE: CHANGED TO NOT HAVE ROUNDING
ws = np.sqrt(size_ratios)
hs = ws * ratios
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _scale_enum(anchor, scales):
"""
Enumerate a set of anchors for each scale wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
| 2,824 | 29.053191 | 101 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/anchor_targets.py | """
Generates anchor targets to train the detector. Does this during the collate step in training
as it's much cheaper to do this on a separate thread.
Heavily adapted from faster_rcnn/rpn_msr/anchor_target_layer.py.
"""
import numpy as np
import numpy.random as npr
from config import IM_SCALE, RPN_NEGATIVE_OVERLAP, RPN_POSITIVE_OVERLAP, \
RPN_BATCHSIZE, RPN_FG_FRACTION, ANCHOR_SIZE, ANCHOR_SCALES, ANCHOR_RATIOS
from lib.fpn.box_intersections_cpu.bbox import bbox_overlaps
from lib.fpn.generate_anchors import generate_anchors
def anchor_target_layer(gt_boxes, im_size,
allowed_border=0):
"""
Assign anchors to ground-truth targets. Produces anchor classification
labels and bounding-box regression targets.
for each (H, W) location i
generate 3 anchor boxes centered on cell i
filter out-of-image anchors
measure GT overlap
:param gt_boxes: [x1, y1, x2, y2] boxes. These are assumed to be at the same scale as
the image (IM_SCALE)
:param im_size: Size of the image (h, w). This is assumed to be scaled to IM_SCALE
"""
if max(im_size) != IM_SCALE:
raise ValueError("im size is {}".format(im_size))
h, w = im_size
# Get the indices of the anchors in the feature map.
# h, w, A, 4
ans_np = generate_anchors(base_size=ANCHOR_SIZE,
feat_stride=16,
anchor_scales=ANCHOR_SCALES,
anchor_ratios=ANCHOR_RATIOS,
)
ans_np_flat = ans_np.reshape((-1, 4))
inds_inside = np.where(
(ans_np_flat[:, 0] >= -allowed_border) &
(ans_np_flat[:, 1] >= -allowed_border) &
(ans_np_flat[:, 2] < w + allowed_border) & # width
(ans_np_flat[:, 3] < h + allowed_border) # height
)[0]
good_ans_flat = ans_np_flat[inds_inside]
if good_ans_flat.size == 0:
raise ValueError("There were no good anchors for an image of size {} with boxes {}".format(im_size, gt_boxes))
# overlaps between the anchors and the gt boxes [num_anchors, num_gtboxes]
overlaps = bbox_overlaps(good_ans_flat, gt_boxes)
anchor_to_gtbox = overlaps.argmax(axis=1)
max_overlaps = overlaps[np.arange(anchor_to_gtbox.shape[0]), anchor_to_gtbox]
gtbox_to_anchor = overlaps.argmax(axis=0)
gt_max_overlaps = overlaps[gtbox_to_anchor, np.arange(overlaps.shape[1])]
gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]
# Good anchors are those that match SOMEWHERE within a decent tolerance
# label: 1 is positive, 0 is negative, -1 is dont care.
# assign bg labels first so that positive labels can clobber them
labels = (-1) * np.ones(overlaps.shape[0], dtype=np.int64)
labels[max_overlaps < RPN_NEGATIVE_OVERLAP] = 0
labels[gt_argmax_overlaps] = 1
labels[max_overlaps >= RPN_POSITIVE_OVERLAP] = 1
# subsample positive labels if we have too many
num_fg = int(RPN_FG_FRACTION * RPN_BATCHSIZE)
fg_inds = np.where(labels == 1)[0]
if len(fg_inds) > num_fg:
labels[npr.choice(fg_inds, size=(len(fg_inds) - num_fg), replace=False)] = -1
# subsample negative labels if we have too many
num_bg = RPN_BATCHSIZE - np.sum(labels == 1)
bg_inds = np.where(labels == 0)[0]
if len(bg_inds) > num_bg:
labels[npr.choice(bg_inds, size=(len(bg_inds) - num_bg), replace=False)] = -1
# Get the labels at the original size
labels_unmap = (-1) * np.ones(ans_np_flat.shape[0], dtype=np.int64)
labels_unmap[inds_inside] = labels
# h, w, A
labels_unmap_res = labels_unmap.reshape(ans_np.shape[:-1])
anchor_inds = np.column_stack(np.where(labels_unmap_res >= 0))
# These ought to be in the same order
anchor_inds_flat = np.where(labels >= 0)[0]
anchors = good_ans_flat[anchor_inds_flat]
bbox_targets = gt_boxes[anchor_to_gtbox[anchor_inds_flat]]
labels = labels[anchor_inds_flat]
assert np.all(labels >= 0)
return anchors, anchor_inds, bbox_targets, labels
| 4,047 | 40.306122 | 118 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/proposal_assignments/proposal_assignments_rel.py | # --------------------------------------------------------
# Goal: assign ROIs to targets
# --------------------------------------------------------
import numpy as np
import numpy.random as npr
from config import BG_THRESH_HI, BG_THRESH_LO, FG_FRACTION_REL, ROIS_PER_IMG_REL, REL_FG_FRACTION, \
RELS_PER_IMG
from lib.fpn.box_utils import bbox_overlaps
from lib.pytorch_misc import to_variable, nonintersecting_2d_inds
from collections import defaultdict
import torch
@to_variable
def proposal_assignments_rel(rpn_rois, gt_boxes, gt_classes, gt_rels, image_offset, fg_thresh=0.5):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
:param rpn_rois: [img_ind, x1, y1, x2, y2]
:param gt_boxes: [num_boxes, 4] array of x0, y0, x1, y1]
:param gt_classes: [num_boxes, 2] array of [img_ind, class]
:param gt_rels [num_boxes, 4] array of [img_ind, box_0, box_1, rel type]
:param Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
:return:
rois: [num_rois, 5]
labels: [num_rois] array of labels
bbox_targets [num_rois, 4] array of targets for the labels.
rel_labels: [num_rels, 4] (img ind, box0 ind, box1ind, rel type)
"""
fg_rois_per_image = int(np.round(ROIS_PER_IMG_REL * FG_FRACTION_REL))
fg_rels_per_image = int(np.round(REL_FG_FRACTION * RELS_PER_IMG))
pred_inds_np = rpn_rois[:, 0].cpu().numpy().astype(np.int64)
pred_boxes_np = rpn_rois[:, 1:].cpu().numpy()
gt_boxes_np = gt_boxes.cpu().numpy()
gt_classes_np = gt_classes.cpu().numpy()
gt_rels_np = gt_rels.cpu().numpy()
gt_classes_np[:, 0] -= image_offset
gt_rels_np[:, 0] -= image_offset
num_im = gt_classes_np[:, 0].max()+1
rois = []
obj_labels = []
rel_labels = []
bbox_targets = []
num_box_seen = 0
for im_ind in range(num_im):
pred_ind = np.where(pred_inds_np == im_ind)[0]
gt_ind = np.where(gt_classes_np[:, 0] == im_ind)[0]
gt_boxes_i = gt_boxes_np[gt_ind]
gt_classes_i = gt_classes_np[gt_ind, 1]
gt_rels_i = gt_rels_np[gt_rels_np[:, 0] == im_ind, 1:]
pred_boxes_i = np.concatenate((pred_boxes_np[pred_ind], gt_boxes_i), 0)
ious = bbox_overlaps(pred_boxes_i, gt_boxes_i)
obj_inds_i, obj_labels_i, obj_assignments_i = _sel_inds(ious, gt_classes_i,
fg_thresh, fg_rois_per_image, ROIS_PER_IMG_REL)
all_rels_i = _sel_rels(ious[obj_inds_i], pred_boxes_i[obj_inds_i], obj_labels_i,
gt_classes_i, gt_rels_i,
fg_thresh=fg_thresh, fg_rels_per_image=fg_rels_per_image)
all_rels_i[:,0:2] += num_box_seen
rois.append(np.column_stack((
im_ind * np.ones(obj_inds_i.shape[0], dtype=np.float32),
pred_boxes_i[obj_inds_i],
)))
obj_labels.append(obj_labels_i)
rel_labels.append(np.column_stack((
im_ind*np.ones(all_rels_i.shape[0], dtype=np.int64),
all_rels_i,
)))
# print("Gtboxes i {} obj assignments i {}".format(gt_boxes_i, obj_assignments_i))
bbox_targets.append(gt_boxes_i[obj_assignments_i])
num_box_seen += obj_inds_i.size
rois = torch.FloatTensor(np.concatenate(rois, 0)).cuda(rpn_rois.get_device(), async=True)
labels = torch.LongTensor(np.concatenate(obj_labels, 0)).cuda(rpn_rois.get_device(), async=True)
bbox_targets = torch.FloatTensor(np.concatenate(bbox_targets, 0)).cuda(rpn_rois.get_device(),
async=True)
rel_labels = torch.LongTensor(np.concatenate(rel_labels, 0)).cuda(rpn_rois.get_device(),
async=True)
return rois, labels, bbox_targets, rel_labels
def _sel_rels(ious, pred_boxes, pred_labels, gt_classes, gt_rels, fg_thresh=0.5, fg_rels_per_image=128, num_sample_per_gt=1, filter_non_overlap=True):
"""
Selects the relations needed
:param ious: [num_pred', num_gt]
:param pred_boxes: [num_pred', num_gt]
:param pred_labels: [num_pred']
:param gt_classes: [num_gt]
:param gt_rels: [num_gtrel, 3]
:param fg_thresh:
:param fg_rels_per_image:
:return: new rels, [num_predrel, 3] where each is (pred_ind1, pred_ind2, predicate)
"""
is_match = (ious >= fg_thresh) & (pred_labels[:, None] == gt_classes[None, :])
pbi_iou = bbox_overlaps(pred_boxes, pred_boxes)
# Limit ourselves to only IOUs that overlap, but are not the exact same box
# since we duplicated stuff earlier.
if filter_non_overlap:
rel_possibilities = (pbi_iou < 1) & (pbi_iou > 0)
rels_intersect = rel_possibilities
else:
rel_possibilities = np.ones((pred_labels.shape[0], pred_labels.shape[0]),
dtype=np.int64) - np.eye(pred_labels.shape[0], dtype=np.int64)
rels_intersect = (pbi_iou < 1) & (pbi_iou > 0)
# ONLY select relations between ground truth because otherwise we get useless data
rel_possibilities[pred_labels == 0] = 0
rel_possibilities[:,pred_labels == 0] = 0
# For each GT relationship, sample exactly 1 relationship.
fg_rels = []
p_size = []
for i, (from_gtind, to_gtind, rel_id) in enumerate(gt_rels):
fg_rels_i = []
fg_scores_i = []
for from_ind in np.where(is_match[:,from_gtind])[0]:
for to_ind in np.where(is_match[:,to_gtind])[0]:
if from_ind != to_ind:
fg_rels_i.append((from_ind, to_ind, rel_id))
fg_scores_i.append((ious[from_ind, from_gtind]*ious[to_ind, to_gtind]))
rel_possibilities[from_ind, to_ind] = 0
if len(fg_rels_i) == 0:
continue
p = np.array(fg_scores_i)
p = p/p.sum()
p_size.append(p.shape[0])
num_to_add = min(p.shape[0], num_sample_per_gt)
for rel_to_add in npr.choice(p.shape[0], p=p, size=num_to_add, replace=False):
fg_rels.append(fg_rels_i[rel_to_add])
bg_rels = np.column_stack(np.where(rel_possibilities))
bg_rels = np.column_stack((bg_rels, np.zeros(bg_rels.shape[0], dtype=np.int64)))
fg_rels = np.array(fg_rels, dtype=np.int64)
if fg_rels.size > 0 and fg_rels.shape[0] > fg_rels_per_image:
fg_rels = fg_rels[npr.choice(fg_rels.shape[0], size=fg_rels_per_image, replace=False)]
# print("{} scores for {} GT. max={} min={} BG rels {}".format(
# fg_rels_scores.shape[0], gt_rels.shape[0], fg_rels_scores.max(), fg_rels_scores.min(),
# bg_rels.shape))
elif fg_rels.size == 0:
fg_rels = np.zeros((0,3), dtype=np.int64)
num_bg_rel = min(RELS_PER_IMG - fg_rels.shape[0], bg_rels.shape[0])
if bg_rels.size > 0:
# Sample 4x as many intersecting relationships as non-intersecting.
bg_rels_intersect = rels_intersect[bg_rels[:,0], bg_rels[:,1]]
p = bg_rels_intersect.astype(np.float32)
p[bg_rels_intersect == 0] = 0.2
p[bg_rels_intersect == 1] = 0.8
p /= p.sum()
bg_rels = bg_rels[np.random.choice(bg_rels.shape[0], p=p, size=num_bg_rel, replace=False)]
else:
bg_rels = np.zeros((0,3), dtype=np.int64)
#print("GTR {} -> AR {} vs {}".format(gt_rels.shape, fg_rels.shape, bg_rels.shape))
all_rels = np.concatenate((fg_rels, bg_rels), 0)
# Sort by 2nd ind and then 1st ind
all_rels = all_rels[np.lexsort((all_rels[:, 1], all_rels[:, 0]))]
return all_rels
def _sel_inds(ious, gt_classes_i, fg_thresh=0.5, fg_rois_per_image=128, rois_per_image=256, n_sample_per=1):
#gt_assignment = ious.argmax(1)
#max_overlaps = ious[np.arange(ious.shape[0]), gt_assignment]
#fg_inds = np.where(max_overlaps >= fg_thresh)[0]
fg_ious = ious.T >= fg_thresh #[num_gt, num_pred]
#is_bg = ~fg_ious.any(0)
# Sample K inds per GT image.
fg_inds = []
for i, (ious_i, cls_i) in enumerate(zip(fg_ious, gt_classes_i)):
n_sample_this_roi = min(n_sample_per, ious_i.sum())
if n_sample_this_roi > 0:
p = ious_i.astype(np.float64) / ious_i.sum()
for ind in npr.choice(ious_i.shape[0], p=p, size=n_sample_this_roi, replace=False):
fg_inds.append((ind, i))
fg_inds = np.array(fg_inds, dtype=np.int64)
if fg_inds.size == 0:
fg_inds = np.zeros((0, 2), dtype=np.int64)
elif fg_inds.shape[0] > fg_rois_per_image:
#print("sample FG")
fg_inds = fg_inds[npr.choice(fg_inds.shape[0], size=fg_rois_per_image, replace=False)]
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
max_overlaps = ious.max(1)
bg_inds = np.where((max_overlaps < BG_THRESH_HI) & (max_overlaps >= BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = min(rois_per_image-fg_inds.shape[0], bg_inds.size)
# Sample background regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False)
# FIx for format issues
obj_inds = np.concatenate((fg_inds[:,0], bg_inds), 0)
obj_assignments_i = np.concatenate((fg_inds[:,1], np.zeros(bg_inds.shape[0], dtype=np.int64)))
obj_labels_i = gt_classes_i[obj_assignments_i]
obj_labels_i[fg_inds.shape[0]:] = 0
#print("{} FG and {} BG".format(fg_inds.shape[0], bg_inds.shape[0]))
return obj_inds, obj_labels_i, obj_assignments_i
| 9,678 | 41.451754 | 150 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/proposal_assignments/proposal_assignments_gtbox.py | from lib.pytorch_misc import enumerate_by_image, gather_nd, random_choose
from lib.fpn.box_utils import bbox_preds, center_size, bbox_overlaps
import torch
from lib.pytorch_misc import diagonal_inds, to_variable
from config import RELS_PER_IMG, REL_FG_FRACTION
@to_variable
def proposal_assignments_gtbox(rois, gt_boxes, gt_classes, gt_rels, image_offset, fg_thresh=0.5):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
:param rpn_rois: [img_ind, x1, y1, x2, y2]
:param gt_boxes: [num_boxes, 4] array of x0, y0, x1, y1]. Not needed it seems
:param gt_classes: [num_boxes, 2] array of [img_ind, class]
Note, the img_inds here start at image_offset
:param gt_rels [num_boxes, 4] array of [img_ind, box_0, box_1, rel type].
Note, the img_inds here start at image_offset
:param Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
:return:
rois: [num_rois, 5]
labels: [num_rois] array of labels
bbox_targets [num_rois, 4] array of targets for the labels.
rel_labels: [num_rels, 4] (img ind, box0 ind, box1ind, rel type)
"""
im_inds = rois[:,0].long()
num_im = im_inds[-1] + 1
# Offset the image indices in fg_rels to refer to absolute indices (not just within img i)
fg_rels = gt_rels.clone()
fg_rels[:,0] -= image_offset
offset = {}
for i, s, e in enumerate_by_image(im_inds):
offset[i] = s
for i, s, e in enumerate_by_image(fg_rels[:, 0]):
fg_rels[s:e, 1:3] += offset[i]
# Try ALL things, not just intersections.
is_cand = (im_inds[:, None] == im_inds[None])
is_cand.view(-1)[diagonal_inds(is_cand)] = 0
# # Compute salience
# gt_inds = fg_rels[:, 1:3].contiguous().view(-1)
# labels_arange = labels.data.new(labels.size(0))
# torch.arange(0, labels.size(0), out=labels_arange)
# salience_labels = ((gt_inds[:, None] == labels_arange[None]).long().sum(0) > 0).long()
# labels = torch.stack((labels, salience_labels), 1)
# Add in some BG labels
# NOW WE HAVE TO EXCLUDE THE FGs.
# TODO: check if this causes an error if many duplicate GTs havent been filtered out
is_cand.view(-1)[fg_rels[:,1]*im_inds.size(0) + fg_rels[:,2]] = 0
is_bgcand = is_cand.nonzero()
# TODO: make this sample on a per image case
# If too many then sample
num_fg = min(fg_rels.size(0), int(RELS_PER_IMG * REL_FG_FRACTION * num_im))
if num_fg < fg_rels.size(0):
fg_rels = random_choose(fg_rels, num_fg)
# If too many then sample
num_bg = min(is_bgcand.size(0) if is_bgcand.dim() > 0 else 0,
int(RELS_PER_IMG * num_im) - num_fg)
if num_bg > 0:
bg_rels = torch.cat((
im_inds[is_bgcand[:, 0]][:, None],
is_bgcand,
(is_bgcand[:, 0, None] < -10).long(),
), 1)
if num_bg < is_bgcand.size(0):
bg_rels = random_choose(bg_rels, num_bg)
rel_labels = torch.cat((fg_rels, bg_rels), 0)
else:
rel_labels = fg_rels
# last sort by rel.
_, perm = torch.sort(rel_labels[:, 0]*(gt_boxes.size(0)**2) +
rel_labels[:,1]*gt_boxes.size(0) + rel_labels[:,2])
rel_labels = rel_labels[perm].contiguous()
labels = gt_classes[:,1].contiguous()
return rois, labels, rel_labels
| 3,434 | 38.034091 | 97 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/proposal_assignments/rel_assignments.py | # --------------------------------------------------------
# Goal: assign ROIs to targets
# --------------------------------------------------------
import numpy as np
import numpy.random as npr
from config import BG_THRESH_HI, BG_THRESH_LO, REL_FG_FRACTION, RELS_PER_IMG_REFINE
from lib.fpn.box_utils import bbox_overlaps
from lib.pytorch_misc import to_variable, nonintersecting_2d_inds
from collections import defaultdict
import torch
@to_variable
def rel_assignments(im_inds, rpn_rois, roi_gtlabels, gt_boxes, gt_classes, gt_rels, image_offset,
fg_thresh=0.5, num_sample_per_gt=4, filter_non_overlap=True):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
:param rpn_rois: [img_ind, x1, y1, x2, y2]
:param gt_boxes: [num_boxes, 4] array of x0, y0, x1, y1]
:param gt_classes: [num_boxes, 2] array of [img_ind, class]
:param gt_rels [num_boxes, 4] array of [img_ind, box_0, box_1, rel type]
:param Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
:return:
rois: [num_rois, 5]
labels: [num_rois] array of labels
bbox_targets [num_rois, 4] array of targets for the labels.
rel_labels: [num_rels, 4] (img ind, box0 ind, box1ind, rel type)
"""
fg_rels_per_image = int(np.round(REL_FG_FRACTION * 64))
pred_inds_np = im_inds.cpu().numpy()
pred_boxes_np = rpn_rois.cpu().numpy()
pred_boxlabels_np = roi_gtlabels.cpu().numpy()
gt_boxes_np = gt_boxes.cpu().numpy()
gt_classes_np = gt_classes.cpu().numpy()
gt_rels_np = gt_rels.cpu().numpy()
gt_classes_np[:, 0] -= image_offset
gt_rels_np[:, 0] -= image_offset
num_im = gt_classes_np[:, 0].max()+1
# print("Pred inds {} pred boxes {} pred box labels {} gt classes {} gt rels {}".format(
# pred_inds_np, pred_boxes_np, pred_boxlabels_np, gt_classes_np, gt_rels_np
# ))
rel_labels = []
num_box_seen = 0
for im_ind in range(num_im):
pred_ind = np.where(pred_inds_np == im_ind)[0]
gt_ind = np.where(gt_classes_np[:, 0] == im_ind)[0]
gt_boxes_i = gt_boxes_np[gt_ind]
gt_classes_i = gt_classes_np[gt_ind, 1]
gt_rels_i = gt_rels_np[gt_rels_np[:, 0] == im_ind, 1:]
# [num_pred, num_gt]
pred_boxes_i = pred_boxes_np[pred_ind]
pred_boxlabels_i = pred_boxlabels_np[pred_ind]
ious = bbox_overlaps(pred_boxes_i, gt_boxes_i)
is_match = (pred_boxlabels_i[:,None] == gt_classes_i[None]) & (ious >= fg_thresh)
# FOR BG. Limit ourselves to only IOUs that overlap, but are not the exact same box
pbi_iou = bbox_overlaps(pred_boxes_i, pred_boxes_i)
if filter_non_overlap:
rel_possibilities = (pbi_iou < 1) & (pbi_iou > 0)
rels_intersect = rel_possibilities
else:
rel_possibilities = np.ones((pred_boxes_i.shape[0], pred_boxes_i.shape[0]),
dtype=np.int64) - np.eye(pred_boxes_i.shape[0],
dtype=np.int64)
rels_intersect = (pbi_iou < 1) & (pbi_iou > 0)
# ONLY select relations between ground truth because otherwise we get useless data
rel_possibilities[pred_boxlabels_i == 0] = 0
rel_possibilities[:, pred_boxlabels_i == 0] = 0
# Sample the GT relationships.
fg_rels = []
p_size = []
for i, (from_gtind, to_gtind, rel_id) in enumerate(gt_rels_i):
fg_rels_i = []
fg_scores_i = []
for from_ind in np.where(is_match[:, from_gtind])[0]:
for to_ind in np.where(is_match[:, to_gtind])[0]:
if from_ind != to_ind:
fg_rels_i.append((from_ind, to_ind, rel_id))
fg_scores_i.append((ious[from_ind, from_gtind] * ious[to_ind, to_gtind]))
rel_possibilities[from_ind, to_ind] = 0
if len(fg_rels_i) == 0:
continue
p = np.array(fg_scores_i)
p = p / p.sum()
p_size.append(p.shape[0])
num_to_add = min(p.shape[0], num_sample_per_gt)
for rel_to_add in npr.choice(p.shape[0], p=p, size=num_to_add, replace=False):
fg_rels.append(fg_rels_i[rel_to_add])
fg_rels = np.array(fg_rels, dtype=np.int64)
if fg_rels.size > 0 and fg_rels.shape[0] > fg_rels_per_image:
fg_rels = fg_rels[npr.choice(fg_rels.shape[0], size=fg_rels_per_image, replace=False)]
elif fg_rels.size == 0:
fg_rels = np.zeros((0, 3), dtype=np.int64)
bg_rels = np.column_stack(np.where(rel_possibilities))
bg_rels = np.column_stack((bg_rels, np.zeros(bg_rels.shape[0], dtype=np.int64)))
num_bg_rel = min(64 - fg_rels.shape[0], bg_rels.shape[0])
if bg_rels.size > 0:
# Sample 4x as many intersecting relationships as non-intersecting.
# bg_rels_intersect = rels_intersect[bg_rels[:, 0], bg_rels[:, 1]]
# p = bg_rels_intersect.astype(np.float32)
# p[bg_rels_intersect == 0] = 0.2
# p[bg_rels_intersect == 1] = 0.8
# p /= p.sum()
bg_rels = bg_rels[
np.random.choice(bg_rels.shape[0],
#p=p,
size=num_bg_rel, replace=False)]
else:
bg_rels = np.zeros((0, 3), dtype=np.int64)
if fg_rels.size == 0 and bg_rels.size == 0:
# Just put something here
bg_rels = np.array([[0, 0, 0]], dtype=np.int64)
# print("GTR {} -> AR {} vs {}".format(gt_rels.shape, fg_rels.shape, bg_rels.shape))
all_rels_i = np.concatenate((fg_rels, bg_rels), 0)
all_rels_i[:,0:2] += num_box_seen
all_rels_i = all_rels_i[np.lexsort((all_rels_i[:,1], all_rels_i[:,0]))]
rel_labels.append(np.column_stack((
im_ind*np.ones(all_rels_i.shape[0], dtype=np.int64),
all_rels_i,
)))
num_box_seen += pred_boxes_i.shape[0]
rel_labels = torch.LongTensor(np.concatenate(rel_labels, 0)).cuda(rpn_rois.get_device(),
async=True)
return rel_labels
| 6,381 | 42.712329 | 98 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/proposal_assignments/proposal_assignments_det.py |
import numpy as np
import numpy.random as npr
from config import BG_THRESH_HI, BG_THRESH_LO, FG_FRACTION, ROIS_PER_IMG
from lib.fpn.box_utils import bbox_overlaps
from lib.pytorch_misc import to_variable
import torch
#############################################################
# The following is only for object detection
@to_variable
def proposal_assignments_det(rpn_rois, gt_boxes, gt_classes, image_offset, fg_thresh=0.5):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
:param rpn_rois: [img_ind, x1, y1, x2, y2]
:param gt_boxes: [num_boxes, 4] array of x0, y0, x1, y1
:param gt_classes: [num_boxes, 2] array of [img_ind, class]
:param Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
:return:
rois: [num_rois, 5]
labels: [num_rois] array of labels
bbox_targets [num_rois, 4] array of targets for the labels.
"""
fg_rois_per_image = int(np.round(ROIS_PER_IMG * FG_FRACTION))
gt_img_inds = gt_classes[:, 0] - image_offset
all_boxes = torch.cat([rpn_rois[:, 1:], gt_boxes], 0)
ims_per_box = torch.cat([rpn_rois[:, 0].long(), gt_img_inds], 0)
im_sorted, idx = torch.sort(ims_per_box, 0)
all_boxes = all_boxes[idx]
# Assume that the GT boxes are already sorted in terms of image id
num_images = int(im_sorted[-1]) + 1
labels = []
rois = []
bbox_targets = []
for im_ind in range(num_images):
g_inds = (gt_img_inds == im_ind).nonzero()
if g_inds.dim() == 0:
continue
g_inds = g_inds.squeeze(1)
g_start = g_inds[0]
g_end = g_inds[-1] + 1
t_inds = (im_sorted == im_ind).nonzero().squeeze(1)
t_start = t_inds[0]
t_end = t_inds[-1] + 1
# Max overlaps: for each predicted box, get the max ROI
# Get the indices into the GT boxes too (must offset by the box start)
ious = bbox_overlaps(all_boxes[t_start:t_end], gt_boxes[g_start:g_end])
max_overlaps, gt_assignment = ious.max(1)
max_overlaps = max_overlaps.cpu().numpy()
# print("Best overlap is {}".format(max_overlaps.max()))
# print("\ngt assignment is {} while g_start is {} \n ---".format(gt_assignment, g_start))
gt_assignment += g_start
keep_inds_np, num_fg = _sel_inds(max_overlaps, fg_thresh, fg_rois_per_image,
ROIS_PER_IMG)
if keep_inds_np.size == 0:
continue
keep_inds = torch.LongTensor(keep_inds_np).cuda(rpn_rois.get_device())
labels_ = gt_classes[:, 1][gt_assignment[keep_inds]]
bbox_target_ = gt_boxes[gt_assignment[keep_inds]]
# Clamp labels_ for the background RoIs to 0
if num_fg < labels_.size(0):
labels_[num_fg:] = 0
rois_ = torch.cat((
im_sorted[t_start:t_end, None][keep_inds].float(),
all_boxes[t_start:t_end][keep_inds],
), 1)
labels.append(labels_)
rois.append(rois_)
bbox_targets.append(bbox_target_)
rois = torch.cat(rois, 0)
labels = torch.cat(labels, 0)
bbox_targets = torch.cat(bbox_targets, 0)
return rois, labels, bbox_targets
def _sel_inds(max_overlaps, fg_thresh=0.5, fg_rois_per_image=128, rois_per_image=256):
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= fg_thresh)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = min(fg_rois_per_image, fg_inds.shape[0])
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((max_overlaps < BG_THRESH_HI) & (max_overlaps >= BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size)
# Sample background regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False)
return np.append(fg_inds, bg_inds), fg_rois_per_this_image
| 4,477 | 36.949153 | 98 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/proposal_assignments/proposal_assignments_postnms.py | # --------------------------------------------------------
# Goal: assign ROIs to targets
# --------------------------------------------------------
import numpy as np
import numpy.random as npr
from .proposal_assignments_rel import _sel_rels
from lib.fpn.box_utils import bbox_overlaps
from lib.pytorch_misc import to_variable
import torch
@to_variable
def proposal_assignments_postnms(
rois, gt_boxes, gt_classes, gt_rels, nms_inds, image_offset, fg_thresh=0.5,
max_objs=100, max_rels=100, rand_val=0.01):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
:param rpn_rois: [img_ind, x1, y1, x2, y2]
:param gt_boxes: [num_boxes, 4] array of x0, y0, x1, y1]
:param gt_classes: [num_boxes, 2] array of [img_ind, class]
:param gt_rels [num_boxes, 4] array of [img_ind, box_0, box_1, rel type]
:param Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
:return:
rois: [num_rois, 5]
labels: [num_rois] array of labels
rel_labels: [num_rels, 4] (img ind, box0 ind, box1ind, rel type)
"""
pred_inds_np = rois[:, 0].cpu().numpy().astype(np.int64)
pred_boxes_np = rois[:, 1:].cpu().numpy()
nms_inds_np = nms_inds.cpu().numpy()
sup_inds_np = np.setdiff1d(np.arange(pred_boxes_np.shape[0]), nms_inds_np)
# split into chosen and suppressed
chosen_inds_np = pred_inds_np[nms_inds_np]
chosen_boxes_np = pred_boxes_np[nms_inds_np]
suppre_inds_np = pred_inds_np[sup_inds_np]
suppre_boxes_np = pred_boxes_np[sup_inds_np]
gt_boxes_np = gt_boxes.cpu().numpy()
gt_classes_np = gt_classes.cpu().numpy()
gt_rels_np = gt_rels.cpu().numpy()
gt_classes_np[:, 0] -= image_offset
gt_rels_np[:, 0] -= image_offset
num_im = gt_classes_np[:, 0].max()+1
rois = []
obj_labels = []
rel_labels = []
num_box_seen = 0
for im_ind in range(num_im):
chosen_ind = np.where(chosen_inds_np == im_ind)[0]
suppre_ind = np.where(suppre_inds_np == im_ind)[0]
gt_ind = np.where(gt_classes_np[:, 0] == im_ind)[0]
gt_boxes_i = gt_boxes_np[gt_ind]
gt_classes_i = gt_classes_np[gt_ind, 1]
gt_rels_i = gt_rels_np[gt_rels_np[:, 0] == im_ind, 1:]
# Get IOUs between chosen and GT boxes and if needed we'll add more in
chosen_boxes_i = chosen_boxes_np[chosen_ind]
suppre_boxes_i = suppre_boxes_np[suppre_ind]
n_chosen = chosen_boxes_i.shape[0]
n_suppre = suppre_boxes_i.shape[0]
n_gt_box = gt_boxes_i.shape[0]
# add a teensy bit of random noise because some GT boxes might be duplicated, etc.
pred_boxes_i = np.concatenate((chosen_boxes_i, suppre_boxes_i, gt_boxes_i), 0)
ious = bbox_overlaps(pred_boxes_i, gt_boxes_i) + rand_val*(
np.random.rand(pred_boxes_i.shape[0], gt_boxes_i.shape[0])-0.5)
# Let's say that a box can only be assigned ONCE for now because we've already done
# the NMS and stuff.
is_hit = ious > fg_thresh
obj_assignments_i = is_hit.argmax(1)
obj_assignments_i[~is_hit.any(1)] = -1
vals, first_occurance_ind = np.unique(obj_assignments_i, return_index=True)
obj_assignments_i[np.setdiff1d(
np.arange(obj_assignments_i.shape[0]), first_occurance_ind)] = -1
extra_to_add = np.where(obj_assignments_i[n_chosen:] != -1)[0] + n_chosen
# Add them in somewhere at random
num_inds_to_have = min(max_objs, n_chosen + extra_to_add.shape[0])
boxes_i = np.zeros((num_inds_to_have, 4), dtype=np.float32)
labels_i = np.zeros(num_inds_to_have, dtype=np.int64)
inds_from_nms = np.sort(np.random.choice(num_inds_to_have, size=n_chosen, replace=False))
inds_from_elsewhere = np.setdiff1d(np.arange(num_inds_to_have), inds_from_nms)
boxes_i[inds_from_nms] = chosen_boxes_i
labels_i[inds_from_nms] = gt_classes_i[obj_assignments_i[:n_chosen]]
boxes_i[inds_from_elsewhere] = pred_boxes_i[extra_to_add]
labels_i[inds_from_elsewhere] = gt_classes_i[obj_assignments_i[extra_to_add]]
# Now, we do the relationships. same as for rle
all_rels_i = _sel_rels(bbox_overlaps(boxes_i, gt_boxes_i),
boxes_i,
labels_i,
gt_classes_i,
gt_rels_i,
fg_thresh=fg_thresh,
fg_rels_per_image=100)
all_rels_i[:,0:2] += num_box_seen
rois.append(np.column_stack((
im_ind * np.ones(boxes_i.shape[0], dtype=np.float32),
boxes_i,
)))
obj_labels.append(labels_i)
rel_labels.append(np.column_stack((
im_ind*np.ones(all_rels_i.shape[0], dtype=np.int64),
all_rels_i,
)))
num_box_seen += boxes_i.size
rois = torch.FloatTensor(np.concatenate(rois, 0)).cuda(gt_boxes.get_device(), async=True)
labels = torch.LongTensor(np.concatenate(obj_labels, 0)).cuda(gt_boxes.get_device(), async=True)
rel_labels = torch.LongTensor(np.concatenate(rel_labels, 0)).cuda(gt_boxes.get_device(),
async=True)
return rois, labels, rel_labels
| 5,420 | 39.455224 | 100 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/box_intersections_cpu/setup.py | from distutils.core import setup
from Cython.Build import cythonize
import numpy
setup(name="bbox_cython", ext_modules=cythonize('bbox.pyx'), include_dirs=[numpy.get_include()]) | 178 | 34.8 | 96 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/roi_align/__init__.py | 0 | 0 | 0 | py | |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/roi_align/build.py | import os
import torch
from torch.utils.ffi import create_extension
# Might have to export PATH=/usr/local/cuda-8.0/bin${PATH:+:${PATH}}
# sources = ['src/roi_align.c']
# headers = ['src/roi_align.h']
sources = []
headers = []
defines = []
with_cuda = False
if torch.cuda.is_available():
print('Including CUDA code.')
sources += ['src/roi_align_cuda.c']
headers += ['src/roi_align_cuda.h']
defines += [('WITH_CUDA', None)]
with_cuda = True
this_file = os.path.dirname(os.path.realpath(__file__))
print(this_file)
extra_objects = ['src/cuda/roi_align.cu.o']
extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
ffi = create_extension(
'_ext.roi_align',
headers=headers,
sources=sources,
define_macros=defines,
relative_to=__file__,
with_cuda=with_cuda,
extra_objects=extra_objects
)
if __name__ == '__main__':
ffi.build()
| 901 | 23.378378 | 75 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/roi_align/functions/roi_align.py | """
performs ROI aligning
"""
import torch
from torch.autograd import Function
from .._ext import roi_align
class RoIAlignFunction(Function):
def __init__(self, aligned_height, aligned_width, spatial_scale):
self.aligned_width = int(aligned_width)
self.aligned_height = int(aligned_height)
self.spatial_scale = float(spatial_scale)
self.feature_size = None
def forward(self, features, rois):
self.save_for_backward(rois)
rois_normalized = rois.clone()
self.feature_size = features.size()
batch_size, num_channels, data_height, data_width = self.feature_size
height = (data_height -1) / self.spatial_scale
width = (data_width - 1) / self.spatial_scale
rois_normalized[:,1] /= width
rois_normalized[:,2] /= height
rois_normalized[:,3] /= width
rois_normalized[:,4] /= height
num_rois = rois.size(0)
output = features.new(num_rois, num_channels, self.aligned_height,
self.aligned_width).zero_()
if features.is_cuda:
res = roi_align.roi_align_forward_cuda(self.aligned_height,
self.aligned_width,
self.spatial_scale, features,
rois_normalized, output)
assert res == 1
else:
raise ValueError
return output
def backward(self, grad_output):
assert(self.feature_size is not None and grad_output.is_cuda)
rois = self.saved_tensors[0]
rois_normalized = rois.clone()
batch_size, num_channels, data_height, data_width = self.feature_size
height = (data_height -1) / self.spatial_scale
width = (data_width - 1) / self.spatial_scale
rois_normalized[:,1] /= width
rois_normalized[:,2] /= height
rois_normalized[:,3] /= width
rois_normalized[:,4] /= height
grad_input = rois_normalized.new(batch_size, num_channels, data_height,
data_width).zero_()
res = roi_align.roi_align_backward_cuda(self.aligned_height,
self.aligned_width,
self.spatial_scale, grad_output,
rois_normalized, grad_input)
assert res == 1
return grad_input, None
| 2,455 | 31.746667 | 79 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/roi_align/functions/__init__.py | 0 | 0 | 0 | py | |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/roi_align/modules/roi_align.py | from torch.nn.modules.module import Module
from torch.nn.functional import avg_pool2d, max_pool2d
from ..functions.roi_align import RoIAlignFunction
class RoIAlign(Module):
def __init__(self, aligned_height, aligned_width, spatial_scale):
super(RoIAlign, self).__init__()
self.aligned_width = int(aligned_width)
self.aligned_height = int(aligned_height)
self.spatial_scale = float(spatial_scale)
def forward(self, features, rois):
return RoIAlignFunction(self.aligned_height, self.aligned_width,
self.spatial_scale)(features, rois)
class RoIAlignAvg(Module):
def __init__(self, aligned_height, aligned_width, spatial_scale):
super(RoIAlignAvg, self).__init__()
self.aligned_width = int(aligned_width)
self.aligned_height = int(aligned_height)
self.spatial_scale = float(spatial_scale)
def forward(self, features, rois):
x = RoIAlignFunction(self.aligned_height+1, self.aligned_width+1,
self.spatial_scale)(features, rois)
return avg_pool2d(x, kernel_size=2, stride=1)
class RoIAlignMax(Module):
def __init__(self, aligned_height, aligned_width, spatial_scale):
super(RoIAlignMax, self).__init__()
self.aligned_width = int(aligned_width)
self.aligned_height = int(aligned_height)
self.spatial_scale = float(spatial_scale)
def forward(self, features, rois):
x = RoIAlignFunction(self.aligned_height+1, self.aligned_width+1,
self.spatial_scale)(features, rois)
return max_pool2d(x, kernel_size=2, stride=1)
| 1,672 | 37.906977 | 74 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/roi_align/modules/__init__.py | 0 | 0 | 0 | py | |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/roi_align/_ext/__init__.py | 0 | 0 | 0 | py | |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/roi_align/_ext/roi_align/__init__.py |
from torch.utils.ffi import _wrap_function
from ._roi_align import lib as _lib, ffi as _ffi
__all__ = []
def _import_symbols(locals):
for symbol in dir(_lib):
fn = getattr(_lib, symbol)
locals[symbol] = _wrap_function(fn, _ffi)
__all__.append(symbol)
_import_symbols(locals())
| 308 | 22.769231 | 49 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/nms/build.py | import os
import torch
from torch.utils.ffi import create_extension
# Might have to export PATH=/usr/local/cuda-8.0/bin${PATH:+:${PATH}}
sources = []
headers = []
defines = []
with_cuda = False
if torch.cuda.is_available():
print('Including CUDA code.')
sources += ['src/nms_cuda.c']
headers += ['src/nms_cuda.h']
defines += [('WITH_CUDA', None)]
with_cuda = True
this_file = os.path.dirname(os.path.realpath(__file__))
print(this_file)
extra_objects = ['src/cuda/nms.cu.o']
extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
ffi = create_extension(
'_ext.nms',
headers=headers,
sources=sources,
define_macros=defines,
relative_to=__file__,
with_cuda=with_cuda,
extra_objects=extra_objects
)
if __name__ == '__main__':
ffi.build()
| 814 | 21.638889 | 75 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/nms/functions/nms.py | # Le code for doing NMS
import torch
import numpy as np
from .._ext import nms
def apply_nms(scores, boxes, pre_nms_topn=12000, post_nms_topn=2000, boxes_per_im=None,
nms_thresh=0.7):
"""
Note - this function is non-differentiable so everything is assumed to be a tensor, not
a variable.
"""
just_inds = boxes_per_im is None
if boxes_per_im is None:
boxes_per_im = [boxes.size(0)]
s = 0
keep = []
im_per = []
for bpi in boxes_per_im:
e = s + int(bpi)
keep_im = _nms_single_im(scores[s:e], boxes[s:e], pre_nms_topn, post_nms_topn, nms_thresh)
keep.append(keep_im + s)
im_per.append(keep_im.size(0))
s = e
inds = torch.cat(keep, 0)
if just_inds:
return inds
return inds, im_per
def _nms_single_im(scores, boxes, pre_nms_topn=12000, post_nms_topn=2000, nms_thresh=0.7):
keep = torch.IntTensor(scores.size(0))
vs, idx = torch.sort(scores, dim=0, descending=True)
if idx.size(0) > pre_nms_topn:
idx = idx[:pre_nms_topn]
boxes_sorted = boxes[idx].contiguous()
num_out = nms.nms_apply(keep, boxes_sorted, nms_thresh)
num_out = min(num_out, post_nms_topn)
keep = keep[:num_out].long()
keep = idx[keep.cuda(scores.get_device())]
return keep
| 1,312 | 27.543478 | 98 | py |
MRE-ISE | MRE-ISE-main/processor/create_bow.py | import numpy as np
import os
from sklearn.cluster import KMeans
from PIL import Image
import cv2
import pickle
from transformers import CLIPModel, CLIPProcessor
import torch
import json
from tqdm import tqdm
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import stopwords as stop_words
from scipy.cluster.vq import kmeans, vq
def create_tbow(data, textual_bow_size, target_file, stopwords_language="english"):
"""
create text bow vocabulary
"""
stopwords = set(stop_words.words(stopwords_language))
vectorizer = CountVectorizer(max_features=textual_bow_size, stop_words=stopwords)
text_for_bow = []
for d in data:
text_for_bow.append(' '.join(d['token']))
vectorizer.fit(text_for_bow)
vocab = vectorizer.get_feature_names()
with open(target_file, 'wb') as f:
pickle.dump([vocab, vectorizer], f)
return vocab
def create_vbow(data, visual_bow_size, mode, original_img_dir, target_file,
clip_version="openai/clip-vit-base-patch32"):
"""
create Visual words
:param data: input data.
:param visual_bow_size: the vocabulary size of visual bow.
:param mode: 'train' / 'val' / 'test'
:param target_file: the final target file to storage visual words.
:param clip_version: the vision of pre-trained clip model.
"""
print('prepare vision features extractor ...')
vision_model = CLIPModel.from_pretrained(clip_version)
for name, param in vision_model.named_parameters():
param.requires_grad = False
vision_model.eval()
processor = CLIPProcessor.from_pretrained(clip_version)
if torch.cuda.is_available():
vision_model.to(torch.device('cuda'))
print('extract the visual words')
with torch.no_grad():
des_features = []
for d in tqdm(data, total=len(data)):
imgid = d['img_id']
img_path = os.path.join(original_img_dir, mode, imgid)
bbox = d['VSG']['bbox']
for b in bbox:
crop_img = cv2.imread(img_path)
try:
crop_region = crop_img[b[1]:b[3], b[0]:b[2]]
except TypeError as e:
print(e)
print(bbox)
print(b)
print(imgid)
exit(0)
im = Image.fromarray(crop_region, mode="RGB")
# print(im.size)
images = processor(images=im, return_tensors="pt")
images = images.to(torch.device('cuda'))
image_features = vision_model.get_image_features(**images).squeeze()
des_features.append(image_features.numpy())
kmeans = KMeans(n_clusters=visual_bow_size, random_state=0, n_init=10)
img_cluster = kmeans.fit(np.array(des_features))
visual_word = img_cluster.cluster_centers_ # ndarray of shape (n_clusters, n_features)
# labels = img_cluster.labels_
with open(target_file, 'wb') as f:
pickle.dump([visual_word, kmeans], f)
return visual_word
def extract_visual_words(vision_model, processor, data, visual_bow_size, original_img_dir, target_file):
"""
create Visual words
:param data: input data.
:param visual_bow_size: the vocabulary size of visual bow.
:param target_file: the final target file to storage visual words.
"""
print('extract the visual words')
with torch.no_grad():
des_features = []
for d in tqdm(data, total=len(data)):
imgid = d['img_id']
img_path = os.path.join(original_img_dir, imgid)
bbox = d['VSG']['bbox']
for b in bbox:
crop_img = cv2.imread(img_path)
try:
crop_region = crop_img[b[1]:b[3], b[0]:b[2]]
except TypeError as e:
print(e)
print(bbox)
print(b)
print(imgid)
exit(0)
im = Image.fromarray(crop_region, mode="RGB")
# print(im.size)
images = processor(images=im, return_tensors="pt")
images = images.to(torch.device('cuda'))
image_features = vision_model.get_image_features(**images).squeeze()
des_features.append(image_features.numpy())
kmeans = KMeans(n_clusters=visual_bow_size, random_state=0, n_init=10)
img_cluster = kmeans.fit(np.array(des_features))
visual_word = img_cluster.cluster_centers_ # ndarray of shape (n_clusters, n_features)
# labels = img_cluster.labels_
id2token = {}
vocab = []
for idx in range(visual_word.shape(0)):
vocab.append('vword_' + str(idx))
for k, v in zip(range(0, len(vocab)), vocab):
id2token[k] = v
with open(target_file, 'wb') as f:
pickle.dump([img_cluster, vocab, id2token, visual_word], f)
return img_cluster, vocab, id2token, visual_word
def extract_vbow_features(file_path, visual_word_path, visual_bow_size, original_img_dir,
clip_version="openai/clip-vit-base-patch32"):
print('prepare vision features extractor ...')
vision_model = CLIPModel.from_pretrained(clip_version)
for name, param in vision_model.named_parameters():
param.requires_grad = False
vision_model.eval()
processor = CLIPProcessor.from_pretrained(clip_version)
if torch.cuda.is_available():
vision_model.to(torch.device('cuda'))
print('extract visual bow features .....')
with open(file_path, 'r') as f:
data = json.load(f)
if os.path.exists(visual_word_path):
with open(visual_word_path, 'rb') as f:
img_cluster, vocab, id2token, visual_word = pickle.load(f)
else:
# data, visual_bow_size, mode, original_img_dir, target_file,
# clip_version = "openai/clip-vit-base-patch32"
img_cluster, vocab, id2token, visual_word = extract_visual_words(vision_model, processor,
data, visual_bow_size, original_img_dir,
visual_word_path)
des_list = []
for d in tqdm(data, total=len(data)):
imgid = d['img_id']
img_path = os.path.join(original_img_dir, 'train', imgid)
bbox = d['VSG']['bbox']
features_list = []
for b in bbox:
crop_img = cv2.imread(img_path)
try:
crop_region = crop_img[b[1]:b[3], b[0]:b[2]]
except TypeError as e:
print(e)
print(bbox)
print(b)
print(imgid)
exit(0)
im = Image.fromarray(crop_region, mode="RGB")
# print(im.size)
images = processor(images=im, return_tensors="pt")
images = images.to(torch.device('cuda'))
image_features = vision_model.get_image_features(**images).squeeze()
features_list.append(image_features.numpy())
des_list.append((d['img_id'], d['VSG']['bbox'], np.array(features_list)))
vbow_features = np.zeros((len(des_list), len(visual_word)), "float32")
for i in tqdm(range(len(des_list)), total=len(des_list)):
words, distance = vq(des_list[i][2], visual_word)
assert len(words) == len(distance) == len(des_list[i][1])
for w in words:
vbow_features[i][w] += 1
return vbow_features, id2token
def extract_text_bow_vocab(train_file_path, target_file, textual_bow_size, stopwords_language="english"):
with open(train_file_path, 'rb') as f:
data = json.load(f)
text_for_bow = []
for d in tqdm(data, total=len(data)):
text_for_bow.append(' '.join(d['token']))
stopwords = set(stop_words.words(stopwords_language))
vectorizer = CountVectorizer(max_features=textual_bow_size, stop_words=stopwords)
vectorizer.fit(text_for_bow)
# train_bow_embeddings = vectorizer.fit_transform(text_for_bow)
vocab = vectorizer.get_feature_names()
id2token = {k: v for k, v in zip(range(0, len(vocab)), vocab)}
with open(target_file, 'wb') as f:
pickle.dump([vectorizer, vocab, id2token], f)
return vectorizer, vocab, id2token
def extract_tbow_features(file_path, textual_word_path, textual_bow_size):
print('extract textual bow features .....')
if os.path.exists(textual_word_path):
with open(textual_word_path, 'rb') as f:
vectorizer, vocab, id2token = pickle.load(f)
else:
vectorizer, vocab, id2token = extract_text_bow_vocab(file_path, textual_word_path, textual_bow_size)
with open(file_path, 'rb') as f:
data = json.load(f)
text_for_bow = []
for d in tqdm(data, total=len(data)):
text_for_bow.append(' '.join(d['token']))
tbow_features = vectorizer.transform(text_for_bow)
return tbow_features, id2token
if __name__ == '__main__':
FILE_DIR = '../data/vsg_tsg/'
with open(os.path.join(FILE_DIR, 'ours_train.json'), 'r') as f:
data = json.load(f)
print('create textual bow')
target_tbow = 'tbow.pt'
textual_bow_size = 2000
create_tbow(data, textual_bow_size, os.path.join(FILE_DIR, target_tbow))
print('create visual bow')
target_tbow = 'vbow.pt'
IMG_DIR = '../data/img_org/'
visual_bow_size = 2000
create_vbow(data, visual_bow_size, 'train', IMG_DIR, os.path.join(FILE_DIR, target_tbow))
| 9,510 | 38.962185 | 113 | py |
MRE-ISE | MRE-ISE-main/processor/dataset.py | import pickle
import random
import os
import numpy as np
import torch
import json
import ast
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer
from torchvision import transforms
from transformers import CLIPTokenizer
from torch_geometric.utils import to_dense_adj, dense_to_sparse, add_self_loops
import logging
from processor.create_bow import extract_tbow_features, extract_vbow_features
logger = logging.getLogger(__name__)
def printf(param, name):
print(name, param)
class Vocabulary():
def __init__(self):
self.UNK = 'UNK'
self.PAD = 'PAD'
self.vocab = {self.UNK: 0, self.PAD: 1}
self.rev_vocab = {0: self.UNK, 1: self.PAD}
def build_vocab(self, data):
for m in data:
for n in m:
if n not in self.vocab:
idx = len(self.vocab)
self.vocab[n] = idx
self.rev_vocab[idx] = n
def id2token(self, idx):
return self.rev_vocab.get(idx) if self.rev_vocab.get(idx) else self.rev_vocab.get(0)
def token2id(self, token):
return self.vocab.get(token) if self.vocab.get(token) else self.vocab.get(self.UNK)
def construct_adjacent_matrix(relation, seq_len):
matrix = torch.tensor([[0 for _ in range(seq_len)] for _ in range(seq_len)], dtype=torch.float)
for i, r in enumerate(relation):
matrix[i][i] = 1
if r != 0:
matrix[i][r - 1] = 1
return matrix
class MMREProcessor(object):
def __init__(self, data_path, re_path, img_path, vit_name,
visual_bow_size=2000, textual_bow_size=2000,
clip_processor=None):
self.data_path = data_path
self.re_path = re_path
self.img_path = img_path
self.visual_bow_size = visual_bow_size
self.textual_bow_size = textual_bow_size
self.vit_name = vit_name
self.tokenizer = CLIPTokenizer.from_pretrained(vit_name, do_lower_case=True)
self.clip_processor = clip_processor
def load_from_json(self, mode="train"):
load_file = self.data_path[mode]
logger.info("Loading data from {}".format(load_file))
words, relations, heads, tails, imgids, dataid, VSG, TSG = [], [], [], [], [], [], [], []
with open(os.path.join(load_file)) as f:
lines = json.load(f)
for i, line in enumerate(lines):
words.append(line['token'])
relations.append(line['relation'])
heads.append(line['h']) # {name, pos}
tails.append(line['t'])
imgids.append(line['img_id'])
VSG.append(line['VSG'])
TSG.append(line['TSG'])
dataid.append(i)
assert len(words) == len(relations) == len(heads) == len(tails) == (len(imgids)) == len(VSG) == len(TSG)
# file_path, visual_word_path, visual_bow_size, original_img_dir, clip_version = "openai/clip-vit-base-patch32"
vbow_features, vbow_id2token = extract_vbow_features(self.data_path[mode], self.data_path['vbow'],
visual_bow_size=self.visual_bow_size,
original_img_dir=self.img_path['train'],
clip_version=self.vit_name)
# file_path, textual_word_path, textual_bow_size
tbow_features, tbow_id2token = extract_tbow_features(self.data_path[mode], self.data_path['tbow'],
textual_bow_size=self.textual_bow_size)
return {'words': words, 'relations': relations, 'heads': heads, 'tails': tails, 'imgids': imgids,
'VSG': VSG, 'TSG': TSG, 'dataid': dataid,
'vbow_features': vbow_features, 'vbow_id2token': vbow_id2token,
'tbow_features': tbow_features, 'tbow_id2token': tbow_id2token}
def get_relation_dict(self):
with open(self.re_path, 'r', encoding="utf-8") as f:
line = f.readlines()[0]
re_dict = json.loads(line)
return re_dict
def get_rel2id(self, train_path):
with open(self.re_path, 'r', encoding="utf-8") as f:
line = f.readlines()[0]
re_dict = json.loads(line)
re2id = {key: [] for key in re_dict.keys()}
with open(train_path, "r", encoding="utf-8") as f:
lines = f.readlines()
for i, line in enumerate(lines):
line = ast.literal_eval(line) # str to dict
assert line['relation'] in re2id
re2id[line['relation']].append(i)
return re2id
class NewMMREDatasetForIB(Dataset):
def __init__(self, processor, transform, img_path=None, max_seq=40,
mode="train", max_obj_num=40) -> None:
self.processor = processor
self.transform = transform
self.max_seq = max_seq
self.img_path = img_path[mode] if img_path is not None else img_path
self.mode = mode
self.data_dict = self.processor.load_from_json(mode)
self.re_dict = self.processor.get_relation_dict()
self.tokenizer = self.processor.tokenizer
self.clip_processor = self.processor.clip_processor
self.max_obj_num = max_obj_num
self.text_bow_size = len(self.data_dict['tbow_id2token'])
self.visual_bow_size = len(self.data_dict['vbow_id2token'])
# self.tfms = transforms.Compose([transforms.Resize(model.image_size), transforms.ToTensor(),
# transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), ])
def __len__(self):
return len(self.data_dict['words'])
def __getitem__(self, idx):
word_list, relation, head_d, tail_d, imgid = self.data_dict['words'][idx], self.data_dict['relations'][idx], \
self.data_dict['heads'][idx], self.data_dict['tails'][idx], \
self.data_dict['imgids'][idx]
item_id = self.data_dict['dataid'][idx]
# [CLS] ... <s> head </s> ... <o> tail <o/> .. [SEP]
head_pos, tail_pos = head_d['pos'], tail_d['pos']
head_tail_pos = torch.tensor(head_d['pos'] + tail_d['pos'])
tokens = [self.tokenizer.tokenize(word) for word in word_list]
pieces = [piece for pieces in tokens for piece in pieces]
_bert_inputs = self.tokenizer.convert_tokens_to_ids(pieces)
_bert_inputs = [self.tokenizer.cls_token_id] + _bert_inputs + [self.tokenizer.sep_token_id]
input_ids = np.zeros(self.max_seq, np.int)
input_ids[:len(_bert_inputs)] = _bert_inputs
input_ids = torch.tensor(input_ids)
attention_mask = torch.zeros(self.max_seq, dtype=torch.long)
attention_mask[:len(_bert_inputs)] = 1
token_type_ids = torch.zeros(self.max_seq, dtype=torch.long)
length = len(word_list)
_pieces2word = np.zeros((length, len(_bert_inputs)), dtype=np.bool)
start = 0
for i, pieces in enumerate(tokens):
if len(pieces) == 0:
continue
pieces = list(range(start, start + len(pieces)))
_pieces2word[i, pieces[0] + 1:pieces[-1] + 2] = 1
start += len(pieces)
# max_pie = np.max([len(x) for x in tokens])
pieces2word = np.zeros((self.max_seq, self.max_seq), dtype=np.bool)
pieces2word[:_pieces2word.shape[0], :_pieces2word.shape[1]] = _pieces2word
pieces2word = torch.tensor(pieces2word)
re_label = self.re_dict[relation] # label to id
dep_head = [k if i - 1 < 0 else i - 1 for k, i in enumerate(self.data_dict['dep_head'][idx])]
dep_tail = [i for i in range(0, len(dep_head))]
edge_index = torch.tensor([dep_head, dep_tail], dtype=torch.long)
edge_index = add_self_loops(edge_index)[0]
adj_matrix = to_dense_adj(edge_index, max_num_nodes=self.max_seq).squeeze()
edge_mask = torch.zeros(self.max_seq + self.max_obj_num, self.max_seq + self.max_obj_num)
edge_mask[:length, :length] = 1
edge_mask[self.max_seq + self.max_obj_num:, :length] = 1
edge_mask[self.max_seq:self.max_seq + self.max_obj_num] = 1
edge_mask[self.max_seq:self.max_seq + self.max_obj_num, self.max_seq:self.max_seq + self.max_obj_num] = 1
# text_bow features
tbow_features = self.data_dict['tbow_features'][idx]
# visual_bow features
vbow_features = self.data_dict['vbow_features'][idx]
# image process
if self.img_path is not None:
try:
img_path = os.path.join(self.img_path, imgid)
image = Image.open(img_path).convert('RGB')
# image = self.transform(image)
image = self.clip_processor(images=image, return_tensors='pt')['pixel_values'].squeeze()
except:
img_path = os.path.join(self.img_path, 'inf.png')
image = Image.open(img_path).convert('RGB')
image = self.clip_processor(images=image, return_tensors='pt')['pixel_values'].squeeze()
if self.aux_img_path is not None:
# detected object img
aux_imgs = []
aux_img_paths = []
imgid = imgid.split(".")[0]
if item_id in self.data_dict['aux_imgs']:
aux_img_paths = self.data_dict['aux_imgs'][item_id]
aux_img_paths = [os.path.join(self.aux_img_path, path) for path in aux_img_paths]
# select 3 img
for i in range(min(3, len(aux_img_paths))):
aux_img = Image.open(aux_img_paths[i]).convert('RGB')
aux_img = self.aux_processor(images=aux_img, return_tensors='pt')['pixel_values'].squeeze()
aux_imgs.append(aux_img)
# padding
aux_mask = torch.tensor([1 for _ in range(len(aux_imgs))] + [0 for _ in range(3 - len(aux_imgs))])
for i in range(3 - len(aux_imgs)):
aux_imgs.append(torch.zeros((3, self.aux_size, self.aux_size)))
aux_imgs = torch.stack(aux_imgs, dim=0)
assert len(aux_imgs) == 3
return input_ids, pieces2word, attention_mask, token_type_ids, adj_matrix, head_tail_pos, torch.tensor(
re_label), image, aux_imgs, aux_mask, edge_mask, vbow_features, tbow_features
return input_ids, pieces2word, attention_mask, token_type_ids, adj_matrix, head_tail_pos, torch.tensor(
re_label), image, edge_mask, vbow_features, tbow_features
return input_ids, pieces2word, attention_mask, token_type_ids, adj_matrix, head_tail_pos, torch.tensor(
re_label), edge_mask, vbow_features, tbow_features
def extend_tensor(tensor, extended_shape, fill=0):
tensor_shape = tensor.shape
extended_tensor = torch.zeros(extended_shape, dtype=tensor.dtype).to(tensor.device)
extended_tensor = extended_tensor.fill_(fill)
if len(tensor_shape) == 1:
extended_tensor[:tensor_shape[0]] = tensor
elif len(tensor_shape) == 2:
extended_tensor[:tensor_shape[0], :tensor_shape[1]] = tensor
elif len(tensor_shape) == 3:
extended_tensor[:tensor_shape[0], :tensor_shape[1], :tensor_shape[2]] = tensor
elif len(tensor_shape) == 4:
extended_tensor[:tensor_shape[0], :tensor_shape[1], :tensor_shape[2], :tensor_shape[3]] = tensor
return extended_tensor
def padded_stack(tensors, padding=0):
dim_count = len(tensors[0].shape)
max_shape = [max([t.shape[d] for t in tensors]) for d in range(dim_count)]
padded_tensors = []
for t in tensors:
e = extend_tensor(t, max_shape, fill=padding)
padded_tensors.append(e)
stacked = torch.stack(padded_tensors)
return stacked
# def collate_fn_padding(batch):
# data_types = len(batch[0])
# bsz = len(batch)
#
# for i in range(data_types):
# samples = [x for b in range(bsz) for x in batch[b][i]]
# if not batch[0][i].shape:
# padded_batch[key] = torch.stack(samples)
# else:
# padded_batch[key] = padded_stack([s[key] for s in batch])
#
# return padded_batch
#
# padded_batch = dict()
# keys = batch[0].keys()
| 12,476 | 42.024138 | 120 | py |
MRE-ISE | MRE-ISE-main/cores/__init__.py | 0 | 0 | 0 | py | |
MRE-ISE | MRE-ISE-main/cores/lamo/decoding_network.py | import torch
from torch import nn
from torch.nn import functional as F
from cores.lamo.inference_network import CombinedInferenceNetwork, ContextualInferenceNetwork
class DecoderNetwork(nn.Module):
def __init__(self, text_input_size, visual_input_size, bert_size, infnet, n_components=10, model_type='prodLDA',
hidden_sizes=(100,100), activation='softplus', dropout=0.2,
learn_priors=True, label_size=0):
"""
Initialize InferenceNetwork.
Args
text_input_size : int, dimension of text input
visual_input_size : int, dimension of visual input
n_components : int, number of topic components, (default 10)
model_type : string, 'prodLDA' or 'LDA' (default 'prodLDA')
hidden_sizes : tuple, length = n_layers, (default (100, 100))
activation : string, 'softplus', 'relu', (default 'softplus')
learn_priors : bool, make priors learnable parameter
"""
super(DecoderNetwork, self).__init__()
assert isinstance(text_input_size, int), "text input_size must by type int."
assert isinstance(visual_input_size, int), "visual input_size must by type int."
assert isinstance(n_components, int) and n_components > 0, \
"n_components must be type int > 0."
assert model_type in ['prodLDA', 'LDA'], \
"model type must be 'prodLDA' or 'LDA'"
assert isinstance(hidden_sizes, tuple), \
"hidden_sizes must be type tuple."
assert activation in ['softplus', 'relu'], \
"activation must be 'softplus' or 'relu'."
assert dropout >= 0, "dropout must be >= 0."
self.text_input_size = text_input_size
self.visual_input_size = visual_input_size
self.n_components = n_components
self.model_type = model_type
self.hidden_sizes = hidden_sizes
self.activation = activation
self.dropout = dropout
self.learn_priors = learn_priors
self.topic_text_word_matrix = None
self.topic_visual_word_matrix = None
if infnet == "zeroshot":
self.inf_net = ContextualInferenceNetwork(
text_input_size, visual_input_size, bert_size, n_components, hidden_sizes, activation, label_size=label_size)
elif infnet == "combined":
self.inf_net = CombinedInferenceNetwork(
text_input_size, visual_input_size, bert_size, n_components, hidden_sizes, activation, label_size=label_size)
else:
raise Exception('Missing infnet parameter, options are zeroshot and combined')
if label_size != 0:
self.label_classification = nn.Linear(n_components, label_size)
# init prior parameters
# \mu_1k = log \alpha_k + 1/K \sum_i log \alpha_i;
# \alpha = 1 \forall \alpha
topic_prior_mean = 0.0
self.prior_mean = torch.tensor(
[topic_prior_mean] * n_components)
if torch.cuda.is_available():
self.prior_mean = self.prior_mean.cuda()
if self.learn_priors:
self.prior_mean = nn.Parameter(self.prior_mean)
# \Sigma_1kk = 1 / \alpha_k (1 - 2/K) + 1/K^2 \sum_i 1 / \alpha_k;
# \alpha = 1 \forall \alpha
topic_prior_variance = 1. - (1. / self.n_components)
self.prior_variance = torch.tensor(
[topic_prior_variance] * n_components)
if torch.cuda.is_available():
self.prior_variance = self.prior_variance.cuda()
if self.learn_priors:
self.prior_variance = nn.Parameter(self.prior_variance)
self.beta = torch.Tensor(n_components, text_input_size)
if torch.cuda.is_available():
self.beta = self.beta.cuda()
self.beta = nn.Parameter(self.beta)
nn.init.xavier_uniform_(self.beta)
self.beta_batchnorm = nn.BatchNorm1d(text_input_size, affine=False)
# dropout on theta
self.drop_theta = nn.Dropout(p=self.dropout)
self.alpha = torch.Tensor(n_components, visual_input_size)
if torch.cuda.is_available():
self.alpha = self.alpha.cuda()
self.alpha = nn.Parameter(self.alpha)
nn.init.xavier_uniform_(self.alpha)
self.alpha_batchnorm = nn.BatchNorm1d(visual_input_size, affine=False)
# dropout on theta
self.drop_alpha = nn.Dropout(p=self.dropout)
@staticmethod
def reparameterize(mu, logvar):
"""Reparameterize the theta distribution."""
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
def forward(self, x, x_bert, labels=None):
"""Forward pass."""
# batch_size x n_components
posterior_mu, posterior_log_sigma = self.inf_net(x, x_bert, labels)
posterior_sigma = torch.exp(posterior_log_sigma)
# generate samples from theta
theta = F.softmax(
self.reparameterize(posterior_mu, posterior_log_sigma), dim=1)
theta = self.drop_theta(theta)
# prodLDA vs LDA
if self.model_type == 'prodLDA':
# in: batch_size x input_size x n_components
text_word_dist = F.softmax(
self.beta_batchnorm(torch.matmul(theta, self.beta)), dim=1)
# word_dist: batch_size x input_size
self.topic_text_word_matrix = self.beta
# in: batch_size x input_size x n_components
visual_word_dist = F.softmax(
self.alpha_batchnorm(torch.matmul(theta, self.alpha)), dim=1)
# visual_word_dist: batch_size x input_size
self.topic_visual_word_matrix = self.alpha
elif self.model_type == 'LDA':
# simplex constrain on Beta
beta = F.softmax(self.beta_batchnorm(self.beta), dim=1)
self.topic_text_word_matrix = beta
text_word_dist = torch.matmul(theta, beta)
# word_dist: batch_size x input_size
# simplex constrain on Beta
alpha = F.softmax(self.alpha_batchnorm(self.alpha), dim=1)
self.topic_visual_word_matrix = alpha
visual_word_dist = torch.matmul(theta, alpha)
# word_dist: batch_size x input_size
else:
raise NotImplementedError("Model Type Not Implemented")
# classify labels
estimated_labels = None
if labels is not None:
estimated_labels = self.label_classification(theta)
return self.prior_mean, self.prior_variance, \
posterior_mu, posterior_sigma, posterior_log_sigma, text_word_dist, visual_word_dist, estimated_labels
def get_theta(self, x, x_bert, labels=None):
with torch.no_grad():
# batch_size x n_components
posterior_mu, posterior_log_sigma = self.inf_net(x, x_bert, labels)
#posterior_sigma = torch.exp(posterior_log_sigma)
# generate samples from theta
theta = F.softmax(
self.reparameterize(posterior_mu, posterior_log_sigma), dim=1)
return theta
| 7,129 | 39.977011 | 125 | py |
MRE-ISE | MRE-ISE-main/cores/lamo/ctm.py | import datetime
import multiprocessing as mp
import os
import warnings
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import torch
import wordcloud
from scipy.special import softmax
from torch import optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from tqdm import tqdm
from cores.lamo.early_stopping import EarlyStopping
from cores.lamo.decoding_network import DecoderNetwork
class CTM:
"""Class to train the contextualized topic model. This is the more general class that we are keeping to
avoid braking code, users should use the two subclasses ZeroShotTM and CombinedTm to do topic modeling.
:param text_bow_size: int, dimension of input
:param contextual_size: int, dimension of input that comes from BERT embeddings
:param inference_type: string, you can choose between the contextual model and the combined model
:param n_components: int, number of topic components, (default 10)
:param model_type: string, 'prodLDA' or 'LDA' (default 'prodLDA')
:param hidden_sizes: tuple, length = n_layers, (default (100, 100))
:param activation: string, 'softplus', 'relu', (default 'softplus')
:param dropout: float, dropout to use (default 0.2)
:param learn_priors: bool, make priors a learnable parameter (default True)
:param batch_size: int, size of batch to use for training (default 64)
:param lr: float, learning rate to use for training (default 2e-3)
:param momentum: float, momentum to use for training (default 0.99)
:param solver: string, optimizer 'adam' or 'sgd' (default 'adam')
:param num_epochs: int, number of epochs to train for, (default 100)
:param reduce_on_plateau: bool, reduce learning rate by 10x on plateau of 10 epochs (default False)
:param num_data_loader_workers: int, number of data loader workers (default cpu_count). set it to 0 if you are using Windows
:param label_size: int, number of total labels (default: 0)
:param loss_weights: dict, it contains the name of the weight parameter (key) and the weight (value) for each loss.
It supports only the weight parameter beta for now. If None, then the weights are set to 1 (default: None).
"""
def __init__(self, text_bow_size, visual_bow_size, contextual_size, inference_type="combined", n_components=10,
model_type='prodLDA', hidden_sizes=(100, 100), activation='softplus', dropout=0.2,
learn_priors=True, batch_size=64, lr=2e-3, momentum=0.99, solver='adam', num_epochs=100,
reduce_on_plateau=False, num_data_loader_workers=mp.cpu_count(), label_size=0, loss_weights=None):
self.device = (
torch.device("cuda")
if torch.cuda.is_available()
else torch.device("cpu")
)
if self.__class__.__name__ == "CTM":
raise Exception("You cannot call this class. Use ZeroShotTM or CombinedTM")
assert isinstance(text_bow_size, int) and text_bow_size > 0, \
"input_size must by type int > 0."
assert isinstance(visual_bow_size, int) and visual_bow_size > 0, \
"input_size must by type int > 0."
assert isinstance(n_components, int) and text_bow_size > 0, \
"n_components must by type int > 0."
assert model_type in ['LDA', 'prodLDA'], \
"model must be 'LDA' or 'prodLDA'."
assert isinstance(hidden_sizes, tuple), \
"hidden_sizes must be type tuple."
assert activation in ['softplus', 'relu'], \
"activation must be 'softplus' or 'relu'."
assert dropout >= 0, "dropout must be >= 0."
assert isinstance(learn_priors, bool), "learn_priors must be boolean."
assert isinstance(batch_size, int) and batch_size > 0, \
"batch_size must be int > 0."
assert lr > 0, "lr must be > 0."
assert isinstance(momentum, float) and 0 < momentum <= 1, \
"momentum must be 0 < float <= 1."
assert solver in ['adam', 'sgd'], "solver must be 'adam' or 'sgd'."
assert isinstance(reduce_on_plateau, bool), \
"reduce_on_plateau must be type bool."
assert isinstance(num_data_loader_workers, int) and num_data_loader_workers >= 0, \
"num_data_loader_workers must by type int >= 0. set 0 if you are using windows"
self.text_bow_size = text_bow_size
self.visual_bow_size = visual_bow_size
self.n_components = n_components
self.model_type = model_type
self.hidden_sizes = hidden_sizes
self.activation = activation
self.dropout = dropout
self.learn_priors = learn_priors
self.batch_size = batch_size
self.lr = lr
self.contextual_size = contextual_size
self.momentum = momentum
self.solver = solver
self.num_epochs = num_epochs
self.reduce_on_plateau = reduce_on_plateau
self.num_data_loader_workers = num_data_loader_workers
self.training_doc_topic_distributions = None
if loss_weights:
self.weights = loss_weights
else:
self.weights = {"beta": 1}
self.model = DecoderNetwork(
text_bow_size, visual_bow_size, self.contextual_size, inference_type, n_components, model_type, hidden_sizes, activation,
dropout, learn_priors, label_size=label_size)
self.early_stopping = None
# init optimizer
if self.solver == 'adam':
self.optimizer = optim.Adam(
self.model.parameters(), lr=lr, betas=(self.momentum, 0.99))
elif self.solver == 'sgd':
self.optimizer = optim.SGD(
self.model.parameters(), lr=lr, momentum=self.momentum)
# init lr scheduler
if self.reduce_on_plateau:
self.scheduler = ReduceLROnPlateau(self.optimizer, patience=10)
# performance attributes
self.best_loss_train = float('inf')
# training attributes
self.model_dir = None
self.nn_epoch = None
# validation attributes
self.validation_data = None
# learned topics
self.best_T_components = None
self.best_V_components = None
# Use cuda if available
if torch.cuda.is_available():
self.USE_CUDA = True
else:
self.USE_CUDA = False
self.model = self.model.to(self.device)
def _loss(self, text_inputs, visual_inputs, text_word_dists, visual_word_dists, prior_mean, prior_variance,
posterior_mean, posterior_variance, posterior_log_variance):
# KL term
# var division term
var_division = torch.sum(posterior_variance / prior_variance, dim=1)
# diff means term
diff_means = prior_mean - posterior_mean
diff_term = torch.sum(
(diff_means * diff_means) / prior_variance, dim=1)
# logvar det division term
logvar_det_division = \
prior_variance.log().sum() - posterior_log_variance.sum(dim=1)
# combine terms
KL = 0.5 * (
var_division + diff_term - self.n_components + logvar_det_division)
# Reconstruction term
T_RL = -torch.sum(text_inputs * torch.log(text_word_dists + 1e-10), dim=1)
V_RL = -torch.sum(visual_inputs * torch.log(visual_word_dists + 1e-10), dim=1)
#loss = self.weights["beta"]*KL + RL
return KL, T_RL, V_RL
def _train_epoch(self, loader):
"""Train epoch."""
self.model.train()
train_loss = 0
samples_processed = 1
for batch_samples in loader:
# batch_size x vocab_size
X_T_bow = batch_samples['X_T_bow']
X_T_bow = X_T_bow.reshape(X_T_bow.shape[0], -1)
X_V_bow = batch_samples['X_V_bow']
X_V_bow = X_V_bow.reshape(X_V_bow.shape[0], -1)
X_contextual = batch_samples['X_contextual']
if "labels" in batch_samples.keys():
labels = batch_samples["labels"]
labels = labels.reshape(labels.shape[0], -1)
labels = labels.to(self.device)
else:
labels = None
if self.USE_CUDA:
X_T_bow = X_T_bow.cuda()
X_V_bow = X_V_bow.cuda()
X_contextual = X_contextual.cuda()
# forward pass
self.model.zero_grad()
prior_mean, prior_variance, posterior_mean, posterior_variance,\
posterior_log_variance, text_word_dists, visual_word_dists, estimated_labels = self.model(X_T_bow, X_contextual, labels)
# backward pass
kl_loss, t_rl_loss, v_rl_loss = self._loss(
X_T_bow, X_V_bow, text_word_dists, visual_word_dists, prior_mean, prior_variance,
posterior_mean, posterior_variance, posterior_log_variance)
loss = self.weights["beta"]*kl_loss + t_rl_loss + v_rl_loss
loss = loss.sum()
if labels is not None:
target_labels = torch.argmax(labels, 1)
label_loss = torch.nn.CrossEntropyLoss()(estimated_labels, target_labels)
loss += label_loss
loss.backward()
self.optimizer.step()
# compute train loss
samples_processed += X_T_bow.size()[0]
train_loss += loss.item()
train_loss /= samples_processed
return samples_processed, train_loss
def fit(self, train_dataset, validation_dataset=None, save_dir=None, verbose=False, patience=5, delta=0,
n_samples=20):
"""
Train the CTM model.
:param train_dataset: PyTorch Dataset class for training data.
:param validation_dataset: PyTorch Dataset class for validation data. If not None, the training stops if validation loss doesn't improve after a given patience
:param save_dir: directory to save checkpoint models to.
:param verbose: verbose
:param patience: How long to wait after last time validation loss improved. Default: 5
:param delta: Minimum change in the monitored quantity to qualify as an improvement. Default: 0
:param n_samples: int, number of samples of the document topic distribution (default: 20)
"""
# Print settings to output file
if verbose:
print("Settings: \n\
N Components: {}\n\
Topic Prior Mean: {}\n\
Topic Prior Variance: {}\n\
Model Type: {}\n\
Hidden Sizes: {}\n\
Activation: {}\n\
Dropout: {}\n\
Learn Priors: {}\n\
Learning Rate: {}\n\
Momentum: {}\n\
Reduce On Plateau: {}\n\
Save Dir: {}".format(
self.n_components, 0.0,
1. - (1. / self.n_components), self.model_type,
self.hidden_sizes, self.activation, self.dropout, self.learn_priors,
self.lr, self.momentum, self.reduce_on_plateau, save_dir))
self.model_dir = save_dir
self.idx_2_T_token = train_dataset.idx_2_T_token
self.idx_2_V_token = train_dataset.idx_2_V_token
train_data = train_dataset
self.validation_data = validation_dataset
if self.validation_data is not None:
self.early_stopping = EarlyStopping(patience=patience, verbose=verbose, path=save_dir, delta=delta)
train_loader = DataLoader(
train_data, batch_size=self.batch_size, shuffle=True,
num_workers=self.num_data_loader_workers, drop_last=True)
# init training variables
train_loss = 0
samples_processed = 0
# train loop
pbar = tqdm(self.num_epochs, position=0, leave=True)
for epoch in range(self.num_epochs):
self.nn_epoch = epoch
# train epoch
s = datetime.datetime.now()
sp, train_loss = self._train_epoch(train_loader)
samples_processed += sp
e = datetime.datetime.now()
pbar.update(1)
if self.validation_data is not None:
validation_loader = DataLoader(self.validation_data, batch_size=self.batch_size, shuffle=True,
num_workers=self.num_data_loader_workers, drop_last=True)
# train epoch
s = datetime.datetime.now()
val_samples_processed, val_loss = self._validation(validation_loader)
e = datetime.datetime.now()
# report
if verbose:
print("Epoch: [{}/{}]\tSamples: [{}/{}]\tValidation Loss: {}\tTime: {}".format(
epoch + 1, self.num_epochs, val_samples_processed,
len(self.validation_data) * self.num_epochs, val_loss, e - s))
pbar.set_description("Epoch: [{}/{}]\t Seen Samples: [{}/{}]\tTrain Loss: {}\tValid Loss: {}\tTime: {}".format(
epoch + 1, self.num_epochs, samples_processed,
len(train_data) * self.num_epochs, train_loss, val_loss, e - s))
self.early_stopping(val_loss, self)
if self.early_stopping.early_stop:
print("Early stopping")
break
else:
# save last epoch
self.best_T_components = self.model.beta
self.best_V_components = self.model.alpha
if save_dir is not None:
self.save(save_dir)
pbar.set_description("Epoch: [{}/{}]\t Seen Samples: [{}/{}]\tTrain Loss: {}\tTime: {}".format(
epoch + 1, self.num_epochs, samples_processed,
len(train_data) * self.num_epochs, train_loss, e - s))
pbar.close()
self.training_doc_topic_distributions = self.get_doc_topic_distribution(train_dataset, n_samples)
def _validation(self, loader):
"""Validation epoch."""
self.model.eval()
val_loss = 0
samples_processed = 0
for batch_samples in loader:
# batch_size x vocab_size
X_T_bow = batch_samples['X_T_bow']
X_T_bow = X_T_bow.reshape(X_T_bow.shape[0], -1)
X_V_bow = batch_samples['X_V_bow']
X_V_bow = X_V_bow.reshape(X_V_bow.shape[0], -1)
X_contextual = batch_samples['X_contextual']
if "labels" in batch_samples.keys():
labels = batch_samples["labels"]
labels = labels.reshape(labels.shape[0], -1)
labels = labels.to(self.device)
else:
labels = None
if self.USE_CUDA:
X_T_bow = X_T_bow.cuda()
X_V_bow = X_V_bow.cuda()
X_contextual = X_contextual.cuda()
# # forward pass
# self.model.zero_grad()
# prior_mean, prior_variance, posterior_mean, posterior_variance, posterior_log_variance, word_dists, \
# estimated_labels =\
# self.model(X_bow, X_contextual, labels)
#
# kl_loss, rl_loss = self._loss(X_bow, word_dists, prior_mean, prior_variance,
# posterior_mean, posterior_variance, posterior_log_variance)
# forward pass
self.model.zero_grad()
prior_mean, prior_variance, posterior_mean, posterior_variance, \
posterior_log_variance, text_word_dists, visual_word_dists, estimated_labels = self.model(X_T_bow,
X_contextual,
labels)
# backward pass
kl_loss, t_rl_loss, v_rl_loss = self._loss(
X_T_bow, X_V_bow, text_word_dists, visual_word_dists, prior_mean, prior_variance,
posterior_mean, posterior_variance, posterior_log_variance)
loss = self.weights["beta"]*kl_loss + t_rl_loss + v_rl_loss
loss = loss.sum()
if labels is not None:
target_labels = torch.argmax(labels, 1)
label_loss = torch.nn.CrossEntropyLoss()(estimated_labels, target_labels)
loss += label_loss
# compute train loss
samples_processed += X_T_bow.size()[0]
val_loss += loss.item()
val_loss /= samples_processed
return samples_processed, val_loss
def get_thetas(self, dataset, n_samples=20):
"""
Get the document-topic distribution for a example_dataset of topics. Includes multiple sampling to reduce variation via
the parameter n_sample.
:param dataset: a PyTorch Dataset containing the documents
:param n_samples: the number of sample to collect to estimate the final distribution (the more the better).
"""
return self.get_doc_topic_distribution(dataset, n_samples=n_samples)
def get_doc_topic_distribution(self, dataset, n_samples=20):
"""
Get the document-topic distribution for a example_dataset of topics. Includes multiple sampling to reduce variation via
the parameter n_sample.
:param dataset: a PyTorch Dataset containing the documents
:param n_samples: the number of sample to collect to estimate the final distribution (the more the better).
"""
self.model.eval()
loader = DataLoader(
dataset, batch_size=self.batch_size, shuffle=False,
num_workers=self.num_data_loader_workers)
pbar = tqdm(n_samples, position=0, leave=True)
final_thetas = []
final_alphas = []
for sample_index in range(n_samples):
with torch.no_grad():
collect_theta = []
collect_alpha = []
for batch_samples in loader:
# batch_size x vocab_size
X_T_bow = batch_samples['X_T_bow']
X_T_bow = X_T_bow.reshape(X_T_bow.shape[0], -1)
X_V_bow = batch_samples['X_V_bow']
X_V_bow = X_V_bow.reshape(X_V_bow.shape[0], -1)
X_contextual = batch_samples['X_contextual']
if "labels" in batch_samples.keys():
labels = batch_samples["labels"]
labels = labels.reshape(labels.shape[0], -1)
labels = labels.to(self.device)
else:
labels = None
if self.USE_CUDA:
X_T_bow = X_T_bow.cuda()
X_V_bow = X_V_bow.cuda()
X_contextual = X_contextual.cuda()
# forward pass
self.model.zero_grad()
collect_theta.extend(self.model.get_theta(X_T_bow, X_contextual, labels).cpu().numpy().tolist())
pbar.update(1)
pbar.set_description("Sampling: [{}/{}]".format(sample_index + 1, n_samples))
final_thetas.append(np.array(collect_theta))
pbar.close()
return np.sum(final_thetas, axis=0) / n_samples
def get_most_likely_topic(self, doc_topic_distribution):
""" get the most likely topic for each document
:param doc_topic_distribution: ndarray representing the topic distribution of each document
"""
return np.argmax(doc_topic_distribution, axis=0)
def get_topics(self, k=10):
"""
Retrieve topic words.
:param k: int, number of words to return per topic, default 10.
"""
assert k <= self.text_bow_size, "k must be <= input size."
T_component_dists = self.best_T_components
V_component_dists = self.best_V_components
T_topics = defaultdict(list)
V_topics = defaultdict(list)
for i in range(self.n_components):
# obtain the topic textual word
_, idxs = torch.topk(T_component_dists[i], k)
component_words = [self.idx_2_T_token[idx]
for idx in idxs.cpu().numpy()]
T_topics[i] = component_words
# obtain the topic visual word
_, idxs = torch.topk(V_component_dists[i], k)
component_words = [self.idx_2_T_token[idx]
for idx in idxs.cpu().numpy()]
V_topics[i] = component_words
return T_topics, V_topics
def get_topic_lists(self, k=10):
"""
Retrieve the lists of topic words.
:param k: (int) number of words to return per topic, default 10.
"""
assert k <= self.text_bow_size, "k must be <= text input size."
assert k <= self.visual_bow_size, "k must be <= visual input size."
# TODO: collapse this method with the one that just returns the topics
T_component_dists = self.best_T_components
V_component_dists = self.best_V_components
T_topics, V_topics = [], []
for i in range(self.n_components):
_, idxs = torch.topk(T_component_dists[i], k)
component_words = [self.idx_2_T_token[idx]
for idx in idxs.cpu().numpy()]
T_topics.append(component_words)
_, idxs = torch.topk(V_component_dists[i], k)
component_words = [self.idx_2_V_token[idx]
for idx in idxs.cpu().numpy()]
V_topics.append(component_words)
return T_topics, V_topics
def _format_file(self):
model_dir = "contextualized_topic_model_nc_{}_tpm_{}_tpv_{}_hs_{}_ac_{}_do_{}_lr_{}_mo_{}_rp_{}". \
format(self.n_components, 0.0, 1 - (1. / self.n_components),
self.model_type, self.hidden_sizes, self.activation,
self.dropout, self.lr, self.momentum,
self.reduce_on_plateau)
return model_dir
def save(self, models_dir=None):
"""
Save model. (Experimental Feature, not tested)
:param models_dir: path to directory for saving NN models.
"""
warnings.simplefilter('always', Warning)
warnings.warn("This is an experimental feature that we has not been fully tested. Refer to the following issue:"
"https://github.com/MilaNLProc/contextualized-topic-models/issues/38",
Warning)
if (self.model is not None) and (models_dir is not None):
model_dir = self._format_file()
if not os.path.isdir(os.path.join(models_dir, model_dir)):
os.makedirs(os.path.join(models_dir, model_dir))
filename = "epoch_{}".format(self.nn_epoch) + '.pth'
fileloc = os.path.join(models_dir, model_dir, filename)
with open(fileloc, 'wb') as file:
torch.save({'state_dict': self.model.state_dict(),
'dcue_dict': self.__dict__}, file)
def load(self, model_dir, epoch):
"""
Load a previously trained model. (Experimental Feature, not tested)
:param model_dir: directory where models are saved.
:param epoch: epoch of model to load.
"""
warnings.simplefilter('always', Warning)
warnings.warn("This is an experimental feature that we has not been fully tested. Refer to the following issue:"
"https://github.com/MilaNLProc/contextualized-topic-models/issues/38",
Warning)
epoch_file = "epoch_" + str(epoch) + ".pth"
models_dir = self._format_file()
model_file = os.path.join(model_dir, models_dir, epoch_file)
with open(model_file, 'rb') as model_dict:
checkpoint = torch.load(model_dict, map_location=torch.device(self.device))
for (k, v) in checkpoint['dcue_dict'].items():
setattr(self, k, v)
self.model.load_state_dict(checkpoint['state_dict'])
def get_topic_text_word_matrix(self):
"""
Return the topic-word matrix (dimensions: number of topics x length of the vocabulary).
If model_type is LDA, the matrix is normalized; otherwise the matrix is unnormalized.
"""
return self.model.topic_text_word_matrix.cpu().detach().numpy()
def get_topic_text_word_distribution(self):
"""
Return the topic-word distribution (dimensions: number of topics x length of the vocabulary).
"""
mat = self.get_topic_text_word_matrix()
return softmax(mat, axis=1)
def get_topic_visual_word_matrix(self):
"""
Return the topic-word matrix (dimensions: number of topics x length of the vocabulary).
If model_type is LDA, the matrix is normalized; otherwise the matrix is unnormalized.
"""
return self.model.topic_visual_word_matrix.cpu().detach().numpy()
def get_topic_visual_word_distribution(self):
"""
Return the topic-word distribution (dimensions: number of topics x length of the vocabulary).
"""
mat = self.get_topic_visual_word_matrix()
return softmax(mat, axis=1)
def get_text_word_distribution_by_topic_id(self, topic):
"""
Return the word probability distribution of a topic sorted by probability.
:param topic: id of the topic (int)
:returns list of tuples (word, probability) sorted by the probability in descending order
"""
if topic >= self.n_components:
raise Exception('Topic id must be lower than the number of topics')
else:
wd = self.get_topic_text_word_distribution()
t = [(word, wd[topic][idx]) for idx, word in self.idx_2_T_token.items()]
t = sorted(t, key=lambda x: -x[1])
return t
def get_visual_word_distribution_by_topic_id(self, topic):
"""
Return the word probability distribution of a topic sorted by probability.
:param topic: id of the topic (int)
:returns list of tuples (word, probability) sorted by the probability in descending order
"""
if topic >= self.n_components:
raise Exception('Topic id must be lower than the number of topics')
else:
wd = self.get_topic_visual_word_distribution()
t = [(word, wd[topic][idx]) for idx, word in self.idx_2_V_token.items()]
t = sorted(t, key=lambda x: -x[1])
return t
def get_wordcloud(self, topic_id, n_words=5, background_color="black", width=1000, height=400):
"""
Plotting the wordcloud. It is an adapted version of the code found here:
http://amueller.github.io/word_cloud/auto_examples/simple.html#sphx-glr-auto-examples-simple-py and
here https://github.com/ddangelov/Top2Vec/blob/master/top2vec/Top2Vec.py
:param topic_id: id of the topic
:param n_words: number of words to show in word cloud
:param background_color: color of the background
:param width: width of the produced image
:param height: height of the produced image
"""
word_score_list = self.get_text_word_distribution_by_topic_id(topic_id)[:n_words]
word_score_dict = {tup[0]: tup[1] for tup in word_score_list}
plt.figure(figsize=(10, 4), dpi=200)
plt.axis("off")
plt.imshow(wordcloud.WordCloud(width=width, height=height, background_color=background_color
).generate_from_frequencies(word_score_dict))
plt.title("Displaying Topic " + str(topic_id), loc='center', fontsize=24)
plt.show()
def get_predicted_topics(self, dataset, n_samples):
"""
Return the a list containing the predicted topic for each document (length: number of documents).
:param dataset: CTMDataset to infer topics
:param n_samples: number of sampling of theta
:return: the predicted topics
"""
predicted_topics = []
thetas = self.get_doc_topic_distribution(dataset, n_samples)
for idd in range(len(dataset)):
predicted_topic = np.argmax(thetas[idd] / np.sum(thetas[idd]))
predicted_topics.append(predicted_topic)
return predicted_topics
def get_ldavis_data_format(self, vocab, dataset, n_samples):
"""
Returns the data that can be used in input to pyldavis to plot
the topics
"""
term_frequency = np.ravel(dataset.X_bow.sum(axis=0))
doc_lengths = np.ravel(dataset.X_bow.sum(axis=1))
term_topic = self.get_topic_text_word_distribution()
doc_topic_distribution = self.get_doc_topic_distribution(dataset, n_samples=n_samples)
data = {'topic_term_dists': term_topic,
'doc_topic_dists': doc_topic_distribution,
'doc_lengths': doc_lengths,
'vocab': vocab,
'term_frequency': term_frequency}
return data
def get_top_documents_per_topic_id(self, unpreprocessed_corpus, document_topic_distributions, topic_id, k=5):
probability_list = document_topic_distributions.T[topic_id]
ind = probability_list.argsort()[-k:][::-1]
res = []
for i in ind:
res.append((unpreprocessed_corpus[i], document_topic_distributions[i][topic_id]))
return res
class ZeroShotTM(CTM):
"""ZeroShotTM, as described in https://arxiv.org/pdf/2004.07737v1.pdf
"""
def __init__(self, **kwargs):
inference_type = "zeroshot"
super().__init__(**kwargs, inference_type=inference_type)
class CombinedTM(CTM):
"""CombinedTM, as described in https://arxiv.org/pdf/2004.03974.pdf
"""
def __init__(self, **kwargs):
inference_type = "combined"
super().__init__(**kwargs, inference_type=inference_type)
| 30,164 | 41.545839 | 167 | py |
MRE-ISE | MRE-ISE-main/cores/lamo/early_stopping.py | import numpy as np
import torch
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience.
Source code: https://github.com/Bjarten/early-stopping-pytorch """
def __init__(self, patience=7, verbose=False, delta=0, path='checkpoint.pt', trace_func=print):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
path (str): Path for the checkpoint to be saved to.
Default: 'checkpoint.pt'
trace_func (function): trace print function.
Default: print
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
if path is None:
self.path = 'checkpoint.pt'
else:
self.path = path
self.trace_func = trace_func
def __call__(self, val_loss, model):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model)
model.best_components = model.model.beta.clone()
elif score < self.best_score + self.delta:
self.counter += 1
self.trace_func(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
model.best_components = model.model.beta.clone()
self.save_checkpoint(val_loss, model)
self.counter = 0
def save_checkpoint(self, val_loss, model):
"""Saves model when validation loss decrease."""
if self.verbose:
self.trace_func(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
model.save(self.path)
self.val_loss_min = val_loss
| 2,353 | 36.967742 | 121 | py |
MRE-ISE | MRE-ISE-main/cores/lamo/inference_network.py | from collections import OrderedDict
from torch import nn
import torch
class ContextualInferenceNetwork(nn.Module):
"""Inference Network."""
def __init__(self, text_input_size, visual_input_size, bert_size, output_size, hidden_sizes,
activation='softplus', dropout=0.2, label_size=0):
"""
# TODO: check dropout in main caller
Initialize InferenceNetwork.
Args
text_input_size : int, dimension of text input
visual_input_size : int, dimension of visual input
output_size : int, dimension of output
hidden_sizes : tuple, length = n_layers
activation : string, 'softplus' or 'relu', default 'softplus'
dropout : float, default 0.2, default 0.2
"""
super(ContextualInferenceNetwork, self).__init__()
assert isinstance(text_input_size, int), "text input_size must by type int."
assert isinstance(visual_input_size, int), "visual input_size must by type int."
assert isinstance(output_size, int), "output_size must be type int."
assert isinstance(hidden_sizes, tuple), \
"hidden_sizes must be type tuple."
assert activation in ['softplus', 'relu'], \
"activation must be 'softplus' or 'relu'."
assert dropout >= 0, "dropout must be >= 0."
self.text_input_size = text_input_size
self.visual_input_size = visual_input_size
self.output_size = output_size
self.hidden_sizes = hidden_sizes
self.dropout = dropout
if activation == 'softplus':
self.activation = nn.Softplus()
elif activation == 'relu':
self.activation = nn.ReLU()
self.input_layer = nn.Linear(bert_size + label_size, hidden_sizes[0])
#self.adapt_bert = nn.Linear(bert_size, hidden_sizes[0])
self.hiddens = nn.Sequential(OrderedDict([
('l_{}'.format(i), nn.Sequential(nn.Linear(h_in, h_out), self.activation))
for i, (h_in, h_out) in enumerate(zip(hidden_sizes[:-1], hidden_sizes[1:]))]))
self.f_mu = nn.Linear(hidden_sizes[-1], output_size)
self.f_mu_batchnorm = nn.BatchNorm1d(output_size, affine=False)
self.f_sigma = nn.Linear(hidden_sizes[-1], output_size)
self.f_sigma_batchnorm = nn.BatchNorm1d(output_size, affine=False)
self.dropout_enc = nn.Dropout(p=self.dropout)
def forward(self, x, x_bert, labels=None):
"""Forward pass."""
x = x_bert
if labels:
x = torch.cat((x_bert, labels), 1)
x = self.input_layer(x)
x = self.activation(x)
x = self.hiddens(x)
x = self.dropout_enc(x)
mu = self.f_mu_batchnorm(self.f_mu(x))
log_sigma = self.f_sigma_batchnorm(self.f_sigma(x))
return mu, log_sigma
class CombinedInferenceNetwork(nn.Module):
"""Inference Network."""
def __init__(self, text_input_size, visual_input_size, bert_size, output_size, hidden_sizes,
activation='softplus', dropout=0.2, label_size=0):
"""
Initialize InferenceNetwork.
Args
text_input_size : int, dimension of text input
visual_input_size : int, dimension of visual input
output_size : int, dimension of output
hidden_sizes : tuple, length = n_layers
activation : string, 'softplus' or 'relu', default 'softplus'
dropout : float, default 0.2, default 0.2
"""
super(CombinedInferenceNetwork, self).__init__()
assert isinstance(text_input_size, int), "text input_size must by type int."
assert isinstance(visual_input_size, int), "visual input_size must by type int."
assert isinstance(output_size, int), "output_size must be type int."
assert isinstance(hidden_sizes, tuple), \
"hidden_sizes must be type tuple."
assert activation in ['softplus', 'relu'], \
"activation must be 'softplus' or 'relu'."
assert dropout >= 0, "dropout must be >= 0."
self.text_input_size = text_input_size
self.visual_input_size = visual_input_size
self.output_size = output_size
self.hidden_sizes = hidden_sizes
self.dropout = dropout
if activation == 'softplus':
self.activation = nn.Softplus()
elif activation == 'relu':
self.activation = nn.ReLU()
self.adapt_bert = nn.Linear(bert_size, text_input_size)
#self.bert_layer = nn.Linear(hidden_sizes[0], hidden_sizes[0])
self.input_layer = nn.Linear(text_input_size + visual_input_size + label_size, hidden_sizes[0])
self.hiddens = nn.Sequential(OrderedDict([
('l_{}'.format(i), nn.Sequential(nn.Linear(h_in, h_out), self.activation))
for i, (h_in, h_out) in enumerate(zip(hidden_sizes[:-1], hidden_sizes[1:]))]))
self.f_mu = nn.Linear(hidden_sizes[-1], output_size)
self.f_mu_batchnorm = nn.BatchNorm1d(output_size, affine=False)
self.f_sigma = nn.Linear(hidden_sizes[-1], output_size)
self.f_sigma_batchnorm = nn.BatchNorm1d(output_size, affine=False)
self.dropout_enc = nn.Dropout(p=self.dropout)
def forward(self, x, x_bert, labels=None):
"""Forward pass."""
x_bert = self.adapt_bert(x_bert)
x = torch.cat((x, x_bert), 1)
if labels is not None:
x = torch.cat((x, labels), 1)
x = self.input_layer(x)
x = self.activation(x)
x = self.hiddens(x)
x = self.dropout_enc(x)
mu = self.f_mu_batchnorm(self.f_mu(x))
log_sigma = self.f_sigma_batchnorm(self.f_sigma(x))
return mu, log_sigma
| 5,742 | 37.033113 | 103 | py |
MRE-ISE | MRE-ISE-main/cores/lamo/__init__.py | 0 | 0 | 0 | py | |
MRE-ISE | MRE-ISE-main/cores/lamo/evaluation/measures.py | from gensim.corpora.dictionary import Dictionary
from gensim.models.coherencemodel import CoherenceModel
from gensim.models import KeyedVectors
import gensim.downloader as api
from scipy.spatial.distance import cosine
import abc
from contextualized_topic_models.evaluation.rbo import rbo
import numpy as np
import itertools
class Measure:
def __init__(self):
pass
def score(self):
pass
class TopicDiversity(Measure):
def __init__(self, topics):
super().__init__()
self.topics = topics
def score(self, topk=25):
"""
:param topk: topk words on which the topic diversity will be computed
:return:
"""
if topk > len(self.topics[0]):
raise Exception('Words in topics are less than topk')
else:
unique_words = set()
for t in self.topics:
unique_words = unique_words.union(set(t[:topk]))
td = len(unique_words) / (topk * len(self.topics))
return td
class Coherence(abc.ABC):
"""
:param topics: a list of lists of the top-k words
:param texts: (list of lists of strings) represents the corpus on which
the empirical frequencies of words are computed
"""
def __init__(self, topics, texts):
self.topics = topics
self.texts = texts
self.dictionary = Dictionary(self.texts)
@abc.abstractmethod
def score(self):
pass
class CoherenceNPMI(Coherence):
def __init__(self, topics, texts):
super().__init__(topics, texts)
def score(self, topk=10, per_topic=False):
"""
:param topk: how many most likely words to consider in the evaluation
:param per_topic: if True, returns the coherence value for each topic
(default: False)
:return: NPMI coherence
"""
if topk > len(self.topics[0]):
raise Exception('Words in topics are less than topk')
else:
npmi = CoherenceModel(
topics=self.topics, texts=self.texts,
dictionary=self.dictionary,
coherence='c_npmi', topn=topk)
if per_topic:
return npmi.get_coherence_per_topic()
else:
return npmi.get_coherence()
class CoherenceUMASS(Coherence):
def __init__(self, topics, texts):
super().__init__(topics, texts)
def score(self, topk=10, per_topic=False):
"""
:param topk: how many most likely words to consider in the evaluation
:param per_topic: if True, returns the coherence value for each topic
(default: False)
:return: UMass coherence
"""
if topk > len(self.topics[0]):
raise Exception('Words in topics are less than topk')
else:
umass = CoherenceModel(
topics=self.topics, texts=self.texts,
dictionary=self.dictionary,
coherence='u_mass', topn=topk)
if per_topic:
return umass.get_coherence_per_topic()
else:
return umass.get_coherence()
class CoherenceUCI(Coherence):
def __init__(self, topics, texts):
super().__init__(topics, texts)
def score(self, topk=10, per_topic=False):
"""
:param topk: how many most likely words to consider in the evaluation
:param per_topic: if True, returns the coherence value for each topic
(default: False)
:return: UCI coherence
"""
if topk > len(self.topics[0]):
raise Exception('Words in topics are less than topk')
else:
uci = CoherenceModel(
topics=self.topics, texts=self.texts,
dictionary=self.dictionary,
coherence='c_uci', topn=topk)
if per_topic:
return uci.get_coherence_per_topic()
else:
return uci.get_coherence()
class CoherenceCV(Coherence):
def __init__(self, topics, texts):
super().__init__(topics, texts)
def score(self, topk=10, per_topic=False):
"""
:param topk: how many most likely words to consider in the evaluation
:param per_topic: if True, returns the coherence value for each topic
(default: False)
:return: C_V coherence
"""
if topk > len(self.topics[0]):
raise Exception('Words in topics are less than topk')
else:
cv = CoherenceModel(
topics=self.topics, texts=self.texts,
dictionary=self.dictionary,
coherence='c_v', topn=topk)
if per_topic:
return cv.get_coherence_per_topic()
else:
return cv.get_coherence()
class CoherenceWordEmbeddings(Measure):
def __init__(self, topics, word2vec_path=None, binary=False):
"""
:param topics: a list of lists of the top-n most likely words
:param word2vec_path: if word2vec_file is specified, it retrieves the
word embeddings file (in word2vec format) to compute similarities
between words, otherwise 'word2vec-google-news-300' is downloaded
:param binary: if the word2vec file is binary
"""
super().__init__()
self.topics = topics
self.binary = binary
if word2vec_path is None:
self.wv = api.load('word2vec-google-news-300')
else:
self.wv = KeyedVectors.load_word2vec_format(
word2vec_path, binary=binary)
def score(self, topk=10):
"""
:param topk: how many most likely words to consider in the evaluation
:return: topic coherence computed on the word embeddings similarities
"""
if topk > len(self.topics[0]):
raise Exception('Words in topics are less than topk')
else:
arrays = []
for index, topic in enumerate(self.topics):
if len(topic) > 0:
local_simi = []
for word1, word2 in itertools.combinations(
topic[:topk], 2):
if (word1 in self.wv.index_to_key
and word2 in self.wv.index_to_key):
local_simi.append(self.wv.similarity(word1, word2))
arrays.append(np.mean(local_simi))
return np.mean(arrays)
class InvertedRBO(Measure):
def __init__(self, topics):
"""
:param topics: a list of lists of words
"""
super().__init__()
self.topics = topics
def score(self, topk=10, weight=0.9):
"""
:param weight: p (float), default 1.0: Weight of each agreement at
depth d: p**(d-1). When set to 1.0, there is no weight, the rbo
returns to average overlap.
:return: rank_biased_overlap over the topics
"""
if topk > len(self.topics[0]):
raise Exception('Words in topics are less than topk')
else:
collect = []
for list1, list2 in itertools.combinations(self.topics, 2):
rbo_val = rbo.rbo(list1[:topk], list2[:topk], p=weight)[2]
collect.append(rbo_val)
return 1 - np.mean(collect)
class Matches(Measure):
def __init__(
self, doc_distribution_original_language,
doc_distribution_unseen_language):
"""
:param doc_distribution_original_language: numpy array of the topical
distribution of the documents in the original language
(dim: num docs x num topics)
:param doc_distribution_unseen_language: numpy array of the topical
distribution of the documents in an unseen language
(dim: num docs x num topics)
"""
super().__init__()
self.orig_lang_docs = doc_distribution_original_language
self.unseen_lang_docs = doc_distribution_unseen_language
if len(self.orig_lang_docs) != len(self.unseen_lang_docs):
raise Exception(
'Distributions of the comparable documents must'
' have the same length')
def score(self):
"""
:return: proportion of matches between the predicted topic in the
original language and the predicted topic in the unseen language of
the document distributions
"""
matches = 0
for d1, d2 in zip(self.orig_lang_docs, self.unseen_lang_docs):
if np.argmax(d1) == np.argmax(d2):
matches = matches + 1
return matches/len(self.unseen_lang_docs)
class KLDivergence(Measure):
def __init__(
self, doc_distribution_original_language,
doc_distribution_unseen_language):
"""
:param doc_distribution_original_language: numpy array of the topical
distribution of the documents in the original language
(dim: num docs x num topics)
:param doc_distribution_unseen_language: numpy array of the topical
distribution of the documents in an unseen language
(dim: num docs x num topics)
"""
super().__init__()
self.orig_lang_docs = doc_distribution_original_language
self.unseen_lang_docs = doc_distribution_unseen_language
if len(self.orig_lang_docs) != len(self.unseen_lang_docs):
raise Exception(
'Distributions of the comparable documents must'
' have the same length')
def score(self):
"""
:return: average kullback leibler divergence between the distributions
"""
kl_mean = 0
for d1, d2 in zip(self.orig_lang_docs, self.unseen_lang_docs):
kl_mean = kl_mean + kl_div(d1, d2)
return kl_mean/len(self.unseen_lang_docs)
def kl_div(a, b):
a = np.asarray(a, dtype=np.float)
b = np.asarray(b, dtype=np.float)
return np.sum(np.where(a != 0, a * np.log(a / b), 0))
class CentroidDistance(Measure):
def __init__(
self, doc_distribution_original_language,
doc_distribution_unseen_language, topics, word2vec_path=None,
binary=True, topk=10):
"""
:param doc_distribution_original_language: numpy array of the topical
distribution of the documents in the original language
(dim: num docs x num topics)
:param doc_distribution_unseen_language: numpy array of the topical
distribution of the documents in an unseen language
(dim: num docs x num topics)
:param topics: a list of lists of the top-n most likely words
:param word2vec_path: if word2vec_file is specified, it retrieves the
word embeddings file (in word2vec format) to compute similarities
between words, otherwise
'word2vec-google-news-300' is downloaded
:param binary: if the word2vec file is binary
:param topk: max number of topical words
"""
super().__init__()
self.topics = [t[:topk] for t in topics]
self.orig_lang_docs = doc_distribution_original_language
self.unseen_lang_docs = doc_distribution_unseen_language
if len(self.orig_lang_docs) != len(self.unseen_lang_docs):
raise Exception(
'Distributions of the comparable documents must'
' have the same length')
if word2vec_path is None:
self.wv = api.load('word2vec-google-news-300')
else:
self.wv = KeyedVectors.load_word2vec_format(
word2vec_path, binary=binary)
def score(self):
"""
:return: average centroid distance between the words of the most
likely topic of the document distributions
"""
cd = 0
for d1, d2 in zip(self.orig_lang_docs, self.unseen_lang_docs):
top_words_orig = self.topics[np.argmax(d1)]
top_words_unseen = self.topics[np.argmax(d2)]
centroid_lang = self.get_centroid(top_words_orig)
centroid_en = self.get_centroid(top_words_unseen)
cd += (1 - cosine(centroid_lang, centroid_en))
return cd/len(self.unseen_lang_docs)
def get_centroid(self, word_list):
vector_list = []
for word in word_list:
if word in self.wv.index_to_key:
vector_list.append(self.wv.get_vector(word))
vec = sum(vector_list)
return vec / np.linalg.norm(vec)
| 12,526 | 35.415698 | 79 | py |
MRE-ISE | MRE-ISE-main/cores/lamo/evaluation/__init__.py | 0 | 0 | 0 | py | |
MRE-ISE | MRE-ISE-main/cores/lamo/evaluation/rbo/__init__.py | 0 | 0 | 0 | py | |
MRE-ISE | MRE-ISE-main/cores/lamo/evaluation/rbo/rbo.py | """Rank-biased overlap, a ragged sorted list similarity measure.
See http://doi.acm.org/10.1145/1852102.1852106 for details. All functions
directly corresponding to concepts from the paper are named so that they can be
clearly cross-identified.
The definition of overlap has been modified to account for ties. Without this,
results for lists with tied items were being inflated. The modification itself
is not mentioned in the paper but seems to be reasonable, see function
``overlap()``. Places in the code which diverge from the spec in the paper
because of this are highlighted with comments.
The two main functions for performing an RBO analysis are ``rbo()`` and
``rbo_dict()``; see their respective docstrings for how to use them.
The following doctest just checks that equivalent specifications of a
problem yield the same result using both functions:
>>> lst1 = [{"c", "a"}, "b", "d"]
>>> lst2 = ["a", {"c", "b"}, "d"]
>>> ans_rbo = _round(rbo(lst1, lst2, p=.9))
>>> dct1 = dict(a=1, b=2, c=1, d=3)
>>> dct2 = dict(a=1, b=2, c=2, d=3)
>>> ans_rbo_dict = _round(rbo_dict(dct1, dct2, p=.9, sort_ascending=True))
>>> ans_rbo == ans_rbo_dict
True
"""
from __future__ import division
import math
from bisect import bisect_left
from collections import namedtuple
RBO = namedtuple("RBO", "min res ext")
RBO.__doc__ += ": Result of full RBO analysis"
RBO.min.__doc__ = "Lower bound estimate"
RBO.res.__doc__ = "Residual corresponding to min; min + res is an upper bound estimate"
RBO.ext.__doc__ = "Extrapolated point estimate"
def _round(obj):
if isinstance(obj, RBO):
return RBO(_round(obj.min), _round(obj.res), _round(obj.ext))
else:
return round(obj, 3)
def set_at_depth(lst, depth):
ans = set()
for v in lst[:depth]:
if isinstance(v, set):
ans.update(v)
else:
ans.add(v)
return ans
def raw_overlap(list1, list2, depth):
"""Overlap as defined in the article.
"""
set1, set2 = set_at_depth(list1, depth), set_at_depth(list2, depth)
return len(set1.intersection(set2)), len(set1), len(set2)
def overlap(list1, list2, depth):
"""Overlap which accounts for possible ties.
This isn't mentioned in the paper but should be used in the ``rbo*()``
functions below, otherwise overlap at a given depth might be > depth which
inflates the result.
There are no guidelines in the paper as to what's a good way to calculate
this, but a good guess is agreement scaled by the minimum between the
requested depth and the lengths of the considered lists (overlap shouldn't
be larger than the number of ranks in the shorter list, otherwise results
are conspicuously wrong when the lists are of unequal lengths -- rbo_ext is
not between rbo_min and rbo_min + rbo_res.
>>> overlap("abcd", "abcd", 3)
3.0
>>> overlap("abcd", "abcd", 5)
4.0
>>> overlap(["a", {"b", "c"}, "d"], ["a", {"b", "c"}, "d"], 2)
2.0
>>> overlap(["a", {"b", "c"}, "d"], ["a", {"b", "c"}, "d"], 3)
3.0
"""
return agreement(list1, list2, depth) * min(depth, len(list1), len(list2))
# NOTE: comment the preceding and uncomment the following line if you want
# to stick to the algorithm as defined by the paper
# return raw_overlap(list1, list2, depth)[0]
def agreement(list1, list2, depth):
"""Proportion of shared values between two sorted lists at given depth.
>>> _round(agreement("abcde", "abdcf", 1))
1.0
>>> _round(agreement("abcde", "abdcf", 3))
0.667
>>> _round(agreement("abcde", "abdcf", 4))
1.0
>>> _round(agreement("abcde", "abdcf", 5))
0.8
>>> _round(agreement([{1, 2}, 3], [1, {2, 3}], 1))
0.667
>>> _round(agreement([{1, 2}, 3], [1, {2, 3}], 2))
1.0
"""
len_intersection, len_set1, len_set2 = raw_overlap(list1, list2, depth)
return 2 * len_intersection / (len_set1 + len_set2)
def cumulative_agreement(list1, list2, depth):
return (agreement(list1, list2, d) for d in range(1, depth + 1))
def average_overlap(list1, list2, depth=None):
"""Calculate average overlap between ``list1`` and ``list2``.
>>> _round(average_overlap("abcdefg", "zcavwxy", 1))
0.0
>>> _round(average_overlap("abcdefg", "zcavwxy", 2))
0.0
>>> _round(average_overlap("abcdefg", "zcavwxy", 3))
0.222
>>> _round(average_overlap("abcdefg", "zcavwxy", 4))
0.292
>>> _round(average_overlap("abcdefg", "zcavwxy", 5))
0.313
>>> _round(average_overlap("abcdefg", "zcavwxy", 6))
0.317
>>> _round(average_overlap("abcdefg", "zcavwxy", 7))
0.312
"""
depth = min(len(list1), len(list2)) if depth is None else depth
return sum(cumulative_agreement(list1, list2, depth)) / depth
def rbo_at_k(list1, list2, p, depth=None):
# ``p**d`` here instead of ``p**(d - 1)`` because enumerate starts at
# 0
depth = min(len(list1), len(list2)) if depth is None else depth
d_a = enumerate(cumulative_agreement(list1, list2, depth))
return (1 - p) * sum(p ** d * a for (d, a) in d_a)
def rbo_min(list1, list2, p, depth=None):
"""Tight lower bound on RBO.
See equation (11) in paper.
>>> _round(rbo_min("abcdefg", "abcdefg", .9))
0.767
>>> _round(rbo_min("abcdefgh", "abcdefg", .9))
0.767
"""
depth = min(len(list1), len(list2)) if depth is None else depth
x_k = overlap(list1, list2, depth)
log_term = x_k * math.log(1 - p)
sum_term = sum(
p ** d / d * (overlap(list1, list2, d) - x_k) for d in range(1, depth + 1)
)
return (1 - p) / p * (sum_term - log_term)
def rbo_res(list1, list2, p):
"""Upper bound on residual overlap beyond evaluated depth.
See equation (30) in paper.
NOTE: The doctests weren't verified against manual computations but seem
plausible. In particular, for identical lists, ``rbo_min()`` and
``rbo_res()`` should add up to 1, which is the case.
>>> _round(rbo_res("abcdefg", "abcdefg", .9))
0.233
>>> _round(rbo_res("abcdefg", "abcdefghijklmnopqrstuvwxyz", .9))
0.239
"""
S, L = sorted((list1, list2), key=len)
s, l = len(S), len(L)
x_l = overlap(list1, list2, l)
# since overlap(...) can be fractional in the general case of ties and f
# must be an integer --> math.ceil()
f = int(math.ceil(l + s - x_l))
# upper bound of range() is non-inclusive, therefore + 1 is needed
term1 = s * sum(p ** d / d for d in range(s + 1, f + 1))
term2 = l * sum(p ** d / d for d in range(l + 1, f + 1))
term3 = x_l * (math.log(1 / (1 - p)) - sum(p ** d / d for d in range(1, f + 1)))
return p ** s + p ** l - p ** f - (1 - p) / p * (term1 + term2 + term3)
def rbo_ext(list1, list2, p):
"""RBO point estimate based on extrapolating observed overlap.
See equation (32) in paper.
NOTE: The doctests weren't verified against manual computations but seem
plausible.
>>> _round(rbo_ext("abcdefg", "abcdefg", .9))
1.0
>>> _round(rbo_ext("abcdefg", "bacdefg", .9))
0.9
"""
S, L = sorted((list1, list2), key=len)
s, l = len(S), len(L)
x_l = overlap(list1, list2, l)
x_s = overlap(list1, list2, s)
# the paper says overlap(..., d) / d, but it should be replaced by
# agreement(..., d) defined as per equation (28) so that ties are handled
# properly (otherwise values > 1 will be returned)
# sum1 = sum(p**d * overlap(list1, list2, d)[0] / d for d in range(1, l + 1))
sum1 = sum(p ** d * agreement(list1, list2, d) for d in range(1, l + 1))
sum2 = sum(p ** d * x_s * (d - s) / s / d for d in range(s + 1, l + 1))
term1 = (1 - p) / p * (sum1 + sum2)
term2 = p ** l * ((x_l - x_s) / l + x_s / s)
return term1 + term2
def rbo(list1, list2, p):
"""Complete RBO analysis (lower bound, residual, point estimate).
``list`` arguments should be already correctly sorted iterables and each
item should either be an atomic value or a set of values tied for that
rank. ``p`` is the probability of looking for overlap at rank k + 1 after
having examined rank k.
>>> lst1 = [{"c", "a"}, "b", "d"]
>>> lst2 = ["a", {"c", "b"}, "d"]
>>> _round(rbo(lst1, lst2, p=.9))
RBO(min=0.489, res=0.477, ext=0.967)
"""
if not 0 <= p <= 1:
raise ValueError("The ``p`` parameter must be between 0 and 1.")
args = (list1, list2, p)
return RBO(rbo_min(*args), rbo_res(*args), rbo_ext(*args))
def sort_dict(dct, *, ascending=False):
"""Sort keys in ``dct`` according to their corresponding values.
Sorts in descending order by default, because the values are
typically scores, i.e. the higher the better. Specify
``ascending=True`` if the values are ranks, or some sort of score
where lower values are better.
Ties are handled by creating sets of tied keys at the given position
in the sorted list.
>>> dct = dict(a=1, b=2, c=1, d=3)
>>> list(sort_dict(dct)) == ['d', 'b', {'a', 'c'}]
True
>>> list(sort_dict(dct, ascending=True)) == [{'a', 'c'}, 'b', 'd']
True
"""
scores = []
items = []
# items should be unique, scores don't have to
for item, score in dct.items():
if not ascending:
score *= -1
i = bisect_left(scores, score)
if i == len(scores):
scores.append(score)
items.append(item)
elif scores[i] == score:
existing_item = items[i]
if isinstance(existing_item, set):
existing_item.add(item)
else:
items[i] = {existing_item, item}
else:
scores.insert(i, score)
items.insert(i, item)
return items
def rbo_dict(dict1, dict2, p, *, sort_ascending=False):
"""Wrapper around ``rbo()`` for dict input.
Each dict maps items to be sorted to the score according to which
they should be sorted. The RBO analysis is then performed on the
resulting sorted lists.
The sort is descending by default, because scores are typically the
higher the better, but this can be overridden by specifying
``sort_ascending=True``.
>>> dct1 = dict(a=1, b=2, c=1, d=3)
>>> dct2 = dict(a=1, b=2, c=2, d=3)
>>> _round(rbo_dict(dct1, dct2, p=.9, sort_ascending=True))
RBO(min=0.489, res=0.477, ext=0.967)
"""
list1, list2 = (
sort_dict(dict1, ascending=sort_ascending),
sort_dict(dict2, ascending=sort_ascending),
)
return rbo(list1, list2, p)
if __name__ in ("__main__", "__console__"):
import doctest
doctest.testmod()
| 10,640 | 31.944272 | 87 | py |
MRE-ISE | MRE-ISE-main/cores/gene/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch_geometric.utils import dense_to_sparse
from cores.gene.backbone import GAT, FustionLayer, GraphLearner
from cores.lamo.decoding_network import DecoderNetwork
class MRE(nn.Module):
def __init__(self, args, vision_config, text_config, vision_model, text_model, num_labels,
text_bow_size, visual_bow_size, tokenizer, processor):
super(MRE, self).__init__()
self.args = args
self.hid_size = args.hid_size
self.num_layers = args.num_layers
self.num_labels = num_labels
# encode image and text
self.vision_model = vision_model
self.text_model = text_model
self.tokenizer = tokenizer
self.processor = processor
self.device = torch.device(args.device)
# construct cross-modal graph
self.text_linear = nn.Linear(text_config.hidden_size, args.hid_size)
self.vision_linear = nn.Linear(vision_config.hidden_size, args.hid_size)
self.fuse = FustionLayer(args.hid_size)
self.cross_GAT_layers = GAT(args.hid_size, args.hid_size, 0, args.dropout, alpha=0)
# learn the best graph for MRE
self.adjust_layers = nn.ModuleList([GAT(args.hid_size, args.hid_size, 0, args.dropout, alpha=0) for i in range(self.num_layers)])
self.graph_learner = GraphLearner(input_size=args.hid_size, hidden_size=args.hid_size,
graph_type=args.graph_type, top_k=args.top_k,
epsilon=args.epsilon, num_pers=args.num_per,
metric_type=args.graph_metric_type, temperature=args.temperature,
feature_denoise=args.feature_denoise, device=self.device)
self.fc_mu = nn.Linear(3 * args.hid_size, 3 * args.hid_size) # mu var
self.fc_logvar = nn.Linear(3 * args.hid_size, 3 * args.hid_size)
self.topic_kl_weight = args.topic_beta
self.topic_model = DecoderNetwork(
text_bow_size, visual_bow_size, args.hid_size, args.inference_type, args.n_topics, args.model_type,
args.hid_size, args.activation,
args.dropout, args.learn_priors, label_size=args.label_size)
self.topic_keywords_number = args.topic_keywords_number
self.classifier1 = nn.Linear(args.hid_size, num_labels)
self.classifier2 = nn.Linear(args.hid_size * 3, num_labels)
def forward(self, input_ids=None, attention_mask=None, head_tail_pos=None, piece2word=None,
adj_matrix=None, labels=None, aux_imgs=None, aux_mask=None, edge_mask=None, writer=None, step=None,
X_T_bow=None, X_V_bow=None):
"""
:param input_ids: [batch_size, seq_len]
:param attention_mask: [batch_size, seq_len]
:param head_tail_pos: [batch_size, 4]
:param piece2word: [batch_size, seq_len, seq_len]
:param adj_matrix: [batch_size, seq_len, seq_len]
"""
bsz = input_ids.size(0)
text_hidden_state = self.text_model(input_ids, attention_mask).last_hidden_state
length = piece2word.size(1)
min_value = torch.min(text_hidden_state).item()
# Max pooling word representations from pieces
_bert_embs = text_hidden_state.unsqueeze(1).expand(-1, length, -1, -1)
_bert_embs = torch.masked_fill(_bert_embs, piece2word.eq(0).unsqueeze(-1), min_value)
text_hidden_states, _ = torch.max(_bert_embs, dim=2)
imgs_hidden_states = []
aux_num = aux_imgs.size(1)
for i in range(bsz):
temp = []
for j in range(aux_num):
_temp = self.vision_model(aux_imgs[i, j, :, :].unsqueeze(0)).pooler_output
temp.append(_temp.squeeze())
imgs_hidden_states.append(torch.stack(temp, dim=0))
imgs_hidden_states = torch.stack(imgs_hidden_states, dim=0)
text_hidden_states = self.text_linear(text_hidden_states)
imgs_hidden_states = self.vision_linear(imgs_hidden_states)
adj = self.fuse(text_hidden_states, attention_mask, adj_matrix, self.args.threshold, imgs_hidden_states)
hidden_states = torch.cat([text_hidden_states, imgs_hidden_states], dim=1)
hidden_states = self.cross_GAT_layers(hidden_states, adj)
prior_mean, prior_variance, posterior_mean, posterior_variance, \
posterior_log_variance, text_word_dists, visual_word_dists, estimated_labels = self.topic_model(X_T_bow,
hidden_states,
labels)
# backward pass
kl_loss, t_rl_loss, v_rl_loss = self._topic_reconstruction_loss(
X_T_bow, X_V_bow, text_word_dists, visual_word_dists, prior_mean, prior_variance,
posterior_mean, posterior_variance, posterior_log_variance)
topic_loss = self.topic_kl_weight * kl_loss + t_rl_loss + v_rl_loss
topic_loss = topic_loss.sum()
node_mask = torch.cat([attention_mask, aux_mask], dim=-1)
for layer in self.adjust_layers:
new_feature, new_adj = self.learn_graph(node_features=hidden_states,
graph_skip_conn=self.args.graph_skip_conn,
graph_include_self=self.args.graph_include_self,
init_adj=adj, node_mask=node_mask)
adj = torch.mul(new_adj, edge_mask)
hidden_states = layer(new_feature, adj)
# hidden_states = new_hidden_states
# adj = new_adj
edge_number = self.cnt_edges(adj)
writer.add_scalar(tag='edge_number', scalar_value=edge_number/bsz,
global_step=step) # tensorbordx
a = torch.mean(hidden_states, dim=1)
entity_hidden_state = torch.Tensor(bsz, 2 * self.args.hid_size) # batch, 2*hidden
for i in range(bsz):
head_idx = head_tail_pos[i][:2]
tail_idx = head_tail_pos[i][2:]
head_hidden = torch.max(hidden_states[i, head_idx, :], dim=0).values
tail_hidden = torch.max(hidden_states[i, tail_idx, :], dim=0).values
entity_hidden_state[i] = torch.cat([head_hidden, tail_hidden], dim=-1)
entity_hidden_state = entity_hidden_state.to(self.args.device)
z_hat = torch.cat([entity_hidden_state, a], dim=-1)
mu = self.fc_mu(z_hat)
std = F.softplus(self.fc_logvar(z_hat))
z = self.reparametrize_n(mu, std)
logits1 = self.classifier1(z)
# topic integration
topic_distribution = self.topic_model.get_theta(X_T_bow, hidden_states, labels)
topic_label = torch.argmax(topic_distribution, dim=-1)
visual_topic_keywords_distribution = self.topic_model.beta # n_topics * Vocab
textual_topic_keywords_distribution = self.topic_model.alpha
_, T_idxs = torch.topk(torch.index_select(textual_topic_keywords_distribution, dim=0, index=topic_label),
self.topic_keywords_number)
T_idxs = T_idxs.cpu().numpy()
T_component_words = [[self.idx_2_T_token[idx] for idx in T_idxs[i]] for i in T_idxs.shape[0]]
_, V_idxs = torch.topk(torch.index_select(visual_topic_keywords_distribution, dim=0, index=topic_label),
self.topic_keywords_number)
V_idxs = V_idxs.cpu().numpy()
V_component_words = [[self.idx_2_V_token[idx] for idx in V_idxs[i]] for i in V_idxs.shape[0]]
T_component_words_emb = self.text_model(self.tokenizer(T_component_words)).last_hidden_state
V_component_words_emb = self.vision_model(self.processor(V_component_words)).last_hidden_state
T_topic_inte = self._topic_words_attention(T_component_words_emb, z)
V_topic_inte = self._topic_words_attention(V_component_words_emb, z)
s = torch.cat([z, T_topic_inte, V_topic_inte], dim=-1)
logits2 = self.classifier2(s)
return (mu, std), logits1, logits2, topic_loss
def _topic_words_attention(self, topic_words_rep, compressed_rep):
"""
Integrate the topic keywords retrieved from the latent topic model into the compressed representation for
enhancing the context.
"""
_x = compressed_rep.unsqueeze(1).repeat(1, topic_words_rep.size(1), 1)
attn_weights = torch.sigmoid(torch.sum(_x * topic_words_rep, dim=-1))
attn_weights = attn_weights.unsqueeze(2).repeat(1, 1, topic_words_rep.size(2))
out = torch.sum(topic_words_rep*attn_weights, dim=1)
return out
def _topic_reconstruction_loss(self, text_inputs, visual_inputs, text_word_dists, visual_word_dists, prior_mean,
prior_variance, posterior_mean, posterior_variance, posterior_log_variance):
# KL term
# var division term
var_division = torch.sum(posterior_variance / prior_variance, dim=1)
# diff means term
diff_means = prior_mean - posterior_mean
diff_term = torch.sum(
(diff_means * diff_means) / prior_variance, dim=1)
# logvar det division term
logvar_det_division = \
prior_variance.log().sum() - posterior_log_variance.sum(dim=1)
# combine terms
KL = 0.5 * (
var_division + diff_term - self.n_components + logvar_det_division)
# Reconstruction term
T_RL = -torch.sum(text_inputs * torch.log(text_word_dists + 1e-10), dim=1)
V_RL = -torch.sum(visual_inputs * torch.log(visual_word_dists + 1e-10), dim=1)
return KL, T_RL, V_RL
def learn_graph(self, node_features, graph_skip_conn=None, graph_include_self=False, init_adj=None,
node_mask=None):
new_feature, new_adj = self.graph_learner(node_features, node_mask=node_mask)
bsz = node_features.size(0)
if graph_skip_conn in (0.0, None):
# add I
if graph_include_self:
if torch.cuda.is_available():
new_adj = new_adj + torch.stack([torch.eye(new_adj.size(1)) for _ in range(bsz)], dim=0).cuda()
else:
new_adj = new_adj + torch.stack([torch.eye(new_adj.size(1)) for _ in range(bsz)], dim=0)
else:
# skip connection
new_adj = graph_skip_conn * init_adj + (1 - graph_skip_conn) * new_adj
return new_feature, new_adj
def reparametrize_n(self, mu, std, n=1):
eps = Variable(std.data.new(std.size()).normal_())
return mu + eps * std
def cnt_edges(self, adj):
e = torch.ones_like(adj)
o = torch.zeros_like(adj)
a = torch.where(adj > 0.0, e, o)
from torch_geometric.utils import remove_self_loops
edge_number = remove_self_loops(edge_index=dense_to_sparse(a)[0])[0].size(1) / 2
return edge_number
def reset_parameters(self):
self.text_linear.reset_parameters()
self.vision_linear.reset_parameters()
self.GAT_layer.reset_parameters()
self.graph_learner.reset_parameters()
for layer in self.layers:
layer.reset_parameters()
| 11,439 | 47.680851 | 137 | py |
MRE-ISE | MRE-ISE-main/cores/gene/backbone.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import linalg as LA
from torch.autograd import Variable
from torch_geometric.utils import to_dense_adj, dense_to_sparse
from torch.distributions.relaxed_bernoulli import RelaxedBernoulli, LogitRelaxedBernoulli
from torch.distributions import Bernoulli, Normal
from torch.distributions.multivariate_normal import MultivariateNormal
VERY_SMALL_NUMBER = 1e-12
INF = 1e20
class GCN(nn.Module):
def __init__(self, in_features, out_features):
super(GCN, self).__init__()
self.linear = nn.Linear(in_features=in_features, out_features=out_features)
self.reset_parameters()
def forward(self, hidden_state=None, adjacent_matrix=None):
"""
:param hidden_state: [batch_size, seq_len, hid_size]
:param adjacent_matrix: [batch_size, seq_len, seq_len]
"""
_x = self.linear(hidden_state)
_x = torch.matmul(adjacent_matrix, _x)
_x = F.relu(_x) + hidden_state
return _x
def reset_parameters(self):
self.linear.reset_parameters()
def __repr__(self):
return self.__class__.__name__
class GAT(nn.Module):
def __init__(self, in_features, out_features, heads, dropout, alpha, concat=True):
super(GAT, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.heads = heads
self.alpha = alpha
self.dropout = dropout
self.concat = concat
self.linear = nn.Linear(in_features=in_features, out_features=out_features, bias=False)
self.a = nn.Parameter(torch.empty(size=(2 * out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
self.reset_parameters()
def forward(self, hidden_state, adjacent_matrix=None):
"""
:param hidden_state: [batch_size, seq_len, in_features]
:param adjacent_matrix: [batch_size, seq_len, seq_len]
"""
_x = self.linear(hidden_state)
e = self._para_attentional_mechanism_input(_x)
zero_vec = -9e15 * torch.ones_like(e)
attention = torch.where(adjacent_matrix > 0.5, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, _x)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def _para_attentional_mechanism_input(self, Wh):
"""
:param Wh: [batch_size, seq_len, out_features]
"""
Wh1 = torch.matmul(Wh, self.a[:self.out_features, :])
Wh2 = torch.matmul(Wh, self.a[self.out_features:, :])
# broadcast add
e = Wh1 + Wh2.transpose(1, 2)
return self.leakyrelu(e)
def reset_parameters(self):
self.linear.reset_parameters()
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
class FustionLayer(nn.Module):
def __init__(self, hid_size):
super(FustionLayer, self).__init__()
self.ln = nn.Sequential(nn.Linear(1 * hid_size, 1 * hid_size),
nn.ReLU())
self.reset_parameters()
def forward(self, text_hidden_states=None, text_attention_mask=None, text_adj_matrix=None, threshold=0.5,
imgs_hidden_states=None, img_attention_mask=None, img_adj_matrix=None):
"""
build cross_modal graph.
we build an edge between a and b only the semantic similarity sem_sim(a, b) > threshold.
:param text_hidden_states: [batch_size, seq_len, in_features]
:param text_adj_matrix: [batch_size, seq_len, seq_len]
:param text_attention_mask: [batch_size, seq_len]
:param imgs_hidden_states: [batch_size, num_objects, in-features]
:param img_adj_matrix: [batch_size, obj_num, obj_num]
:param img_attention_mask: [batch_size, obj_num]
"""
batch_size, seq_len = text_hidden_states.size(0), text_hidden_states.size(1)
num_objects = imgs_hidden_states.size(1)
_x = self.ln(text_hidden_states)
_y = self.ln(imgs_hidden_states)
_temp = F.sigmoid(torch.bmm(_x, _y.transpose(1, 2)))
min_value = torch.min(_temp)
_temp = _temp.masked_fill_(1 - text_attention_mask.byte().unsqueeze(-1), min_value)
if img_attention_mask is not None:
_temp = _temp.masked_fill_(1 - img_attention_mask.byte().unsqueeze(1), min_value)
max_num_nodes = seq_len + num_objects
size = [batch_size, max_num_nodes, max_num_nodes]
new_adj_matrix = []
for b in range(batch_size):
old_text_edges = dense_to_sparse(text_adj_matrix[b])[0]
new_edges = (_temp[b] > threshold).nonzero()
head = new_edges[:, 0]
tail = new_edges[:, 1] + seq_len
if img_attention_mask is not None:
old_img_edges = dense_to_sparse(img_adj_matrix[b])[0]
old_img_edges = [old_img_edges[i]+seq_len for i in range(2)]
edge_index = torch.stack([torch.cat([old_text_edges[0], head, old_img_edges[0]]), torch.cat([old_text_edges[1], tail, old_img_edges[1]])], dim=0)
else:
edge_index = torch.stack([torch.cat([old_text_edges[0], head]), torch.cat([old_text_edges[1], tail])], dim=0)
raw_adj = to_dense_adj(edge_index, max_num_nodes=max_num_nodes)
new_adj_matrix.append(raw_adj)
new_adj = torch.stack(new_adj_matrix, dim=0).squeeze().view(size)
return new_adj
def reset_parameters(self):
for layer in self.ln:
if isinstance(layer, nn.Linear):
layer.reset_parameters()
class GraphLearner(nn.Module):
def __init__(self, input_size, hidden_size, graph_type, top_k=None, epsilon=None, num_pers=1,
metric_type="attention", feature_denoise=True, device=None,
temperature=0.1):
super(GraphLearner, self).__init__()
self.device = device
self.input_size = input_size
self.hidden_size = hidden_size
self.num_pers = num_pers
self.graph_type = graph_type
self.top_k = top_k
self.epsilon = epsilon
self.metric_type = metric_type
self.feature_denoise = feature_denoise
self.temperature = temperature
if metric_type == 'attention':
self.linear_sims = nn.ModuleList(
[nn.Linear(self.input_size, hidden_size, bias=False) for _ in range(num_pers)])
print('[ Multi-perspective {} GraphLearner: {} ]'.format(metric_type, -num_pers))
elif metric_type == 'weighted_cosine':
self.weight_tensor = torch.Tensor(num_pers, self.input_size)
self.weight_tensor = nn.Parameter(nn.init.xavier_uniform_(self.weight_tensor))
print('[ Multi-perspective {} GraphLearner: {} ]'.format(metric_type, num_pers))
elif metric_type == 'gat_attention':
self.linear_sims1 = nn.ModuleList([nn.Linear(input_size, 1, bias=False) for _ in range(num_pers)])
self.linear_sims2 = nn.ModuleList([nn.Linear(input_size, 1, bias=False) for _ in range(num_pers)])
self.leakyrelu = nn.LeakyReLU(0.2)
print('[ GAT_Attention GraphLearner]')
elif metric_type == 'kernel':
self.precision_inv_dis = nn.Parameter(torch.Tensor(1, 1))
self.precision_inv_dis.data.uniform_(0, 1.0)
self.weight = nn.Parameter(nn.init.xavier_uniform_(torch.Tensor(input_size, hidden_size)))
elif metric_type == 'transformer':
self.linear_sim1 = nn.Linear(input_size, hidden_size, bias=False)
self.linear_sim2 = nn.Linear(input_size, hidden_size, bias=False)
elif metric_type == 'cosine':
pass
elif metric_type == 'mlp':
self.lin = nn.Linear(self.input_size*2, 1)
elif metric_type == 'multi_mlp':
self.linear_sims1 = nn.ModuleList(
[nn.Linear(self.input_size, hidden_size, bias=False) for _ in range(num_pers)])
self.linear_sims2 = nn.ModuleList(
[nn.Linear(self.hidden_size, hidden_size, bias=False) for _ in range(num_pers)])
print('[ Multi-perspective {} GraphLearner: {} ]'.format(metric_type, num_pers))
else:
raise ValueError('Unknown metric_type: {}'.format(metric_type))
if self.feature_denoise:
self.feat_mask = self.construct_feat_mask(input_size, init_strategy="constant")
print('[ Graph Learner metric type: {}, Graph Type: {} ]'.format(metric_type, self.graph_type))
def reset_parameters(self):
if self.feature_denoise:
self.feat_mask = self.construct_feat_mask(self.input_size, init_strategy="constant")
if self.metric_type == 'attention':
for module in self.linear_sims:
module.reset_parameters()
elif self.metric_type == 'weighted_cosine':
self.weight_tensor = nn.Parameter(nn.init.xavier_uniform_(self.weight_tensor))
elif self.metric_type == 'gat_attention':
for module in self.linear_sims1:
module.reset_parameters()
for module in self.linear_sims2:
module.reset_parameters()
elif self.metric_type == 'kernel':
self.precision_inv_dis.data.uniform_(0, 1.0)
self.weight = nn.init.xavier_uniform_(self.weight)
elif self.metric_type == 'transformer':
self.linear_sim1.reset_parameters()
self.linear_sim2.reset_parameters()
elif self.metric_type == 'cosine':
pass
elif self.metric_type == 'mlp':
self.lin1.reset_parameters()
self.lin2.reset_parameters()
elif self.metric_type == 'multi_mlp':
for module in self.linear_sims1:
module.reset_parameters()
for module in self.linear_sims2:
module.reset_parameters()
else:
raise ValueError('Unknown metric_type: {}'.format(self.metric_type))
def forward(self, node_features, node_mask=None):
if self.feature_denoise:
masked_features = self.mask_feature(node_features)
learned_adj = self.learn_adj(masked_features, ctx_mask=node_mask)
return masked_features, learned_adj
else:
learned_adj = self.learn_adj(node_features, ctx_mask=node_mask)
return node_features, learned_adj
def learn_adj(self, context, ctx_mask=None):
"""
Parameters
:context, (batch_size, ctx_size, dim)
:ctx_mask, (batch_size, ctx_size)
Returns
:attention, (batch_size, ctx_size, ctx_size)
"""
if self.metric_type == 'attention':
attention = 0
for _ in range(len(self.linear_sims)):
context_fc = torch.relu(self.linear_sims[_](context))
attention += torch.matmul(context_fc, context_fc.transpose(-1, -2))
attention /= len(self.linear_sims)
markoff_value = -INF
elif self.metric_type == 'weighted_cosine':
expand_weight_tensor = self.weight_tensor.unsqueeze(1)
if len(context.shape) == 3:
expand_weight_tensor = expand_weight_tensor.unsqueeze(1)
context_fc = context.unsqueeze(0) * expand_weight_tensor
context_norm = F.normalize(context_fc, p=2, dim=-1)
attention = torch.matmul(context_norm, context_norm.transpose(-1, -2)).mean(0)
markoff_value = 0
elif self.metric_type == 'transformer':
Q = self.linear_sim1(context)
attention = torch.matmul(Q, Q.transpose(-1, -2)) / math.sqrt(Q.shape[-1])
markoff_value = -INF
elif self.metric_type == 'gat_attention':
attention = []
for _ in range(len(self.linear_sims1)):
a_input1 = self.linear_sims1[_](context)
a_input2 = self.linear_sims2[_](context)
attention.append(self.leakyrelu(a_input1 + a_input2.transpose(-1, -2)))
attention = torch.mean(torch.stack(attention, 0), 0)
markoff_value = -INF
# markoff_value = 0
elif self.metric_type == 'kernel':
dist_weight = torch.mm(self.weight, self.weight.transpose(-1, -2))
attention = self.compute_distance_mat(context, dist_weight)
attention = torch.exp(-0.5 * attention * (self.precision_inv_dis ** 2))
markoff_value = 0
elif self.metric_type == 'cosine':
context_norm = context.div(torch.norm(context, p=2, dim=-1, keepdim=True))
attention = torch.mm(context_norm, context_norm.transpose(-1, -2)).detach()
markoff_value = 0
elif self.metric_type == 'mlp':
seq_len = context.size(1)
# context_fc = torch.relu(self.lin2(torch.relu(self.lin1(context))))
# attention = F.sigmoid(torch.matmul(context_fc, context_fc.transpose(-1, -2)))
context_fc = context.unsqueeze(1).repeat(1, seq_len, 1, 1)
context_bc = context.unsqueeze(2).repeat(1, 1, seq_len, 1)
attention = F.sigmoid(self.lin(torch.cat([context_fc, context_bc], dim=-1)).squeeze())
markoff_value = 0
elif self.metric_type == 'multi_mlp':
attention = 0
for _ in range(self.num_pers):
context_fc = torch.relu(self.linear_sims2[_](torch.relu(self.linear_sims1[_](context))))
attention += F.sigmoid(torch.matmul(context_fc, context_fc.transpose(-1, -2)))
attention /= self.num_pers
markoff_value = -INF
if ctx_mask is not None:
attention = attention.masked_fill_(1 - ctx_mask.byte().unsqueeze(1), markoff_value)
attention = attention.masked_fill_(1 - ctx_mask.byte().unsqueeze(-1), markoff_value)
if self.graph_type == 'epsilonNN':
assert self.epsilon is not None
attention = self.build_epsilon_neighbourhood(attention, self.epsilon, markoff_value)
elif self.graph_type == 'KNN':
assert self.top_k is not None
attention = self.build_knn_neighbourhood(attention, self.top_k, markoff_value)
elif self.graph_type == 'prob':
attention = self.build_prob_neighbourhood(attention, self.epsilon, temperature=self.temperature)
else:
raise ValueError('Unknown graph_type: {}'.format(self.graph_type))
if self.graph_type in ['KNN', 'epsilonNN']:
if self.metric_type in ('kernel', 'weighted_cosine'):
assert attention.min().item() >= 0
attention = attention / torch.clamp(torch.sum(attention, dim=-1, keepdim=True), min=VERY_SMALL_NUMBER)
elif self.metric_type == 'cosine':
attention = (attention > 0).float()
attention = self.normalize_adj(attention)
elif self.metric_type in ('transformer', 'attention', 'gat_attention'):
attention = torch.softmax(attention, dim=-1)
return attention
def normalize_adj(mx):
"""Row-normalize matrix: symmetric normalized Laplacian"""
rowsum = mx.sum(1)
r_inv_sqrt = torch.pow(rowsum, -0.5).flatten()
r_inv_sqrt[torch.isinf(r_inv_sqrt)] = 0.
r_mat_inv_sqrt = torch.diag(r_inv_sqrt)
return torch.mm(torch.mm(mx, r_mat_inv_sqrt).transpose(-1, -2), r_mat_inv_sqrt)
def build_knn_neighbourhood(self, attention, top_k, markoff_value):
top_k = min(top_k, attention.size(-1))
knn_val, knn_ind = torch.topk(attention, top_k, dim=-1)
weighted_adjacency_matrix = (markoff_value * torch.ones_like(attention)).scatter_(-1, knn_ind, knn_val)
weighted_adjacency_matrix = weighted_adjacency_matrix.to(self.device)
return weighted_adjacency_matrix
def build_epsilon_neighbourhood(self, attention, epsilon, markoff_value):
attention = torch.sigmoid(attention)
mask = (attention > epsilon).detach().float()
weighted_adjacency_matrix = attention * mask + markoff_value * (1 - mask)
return weighted_adjacency_matrix
def build_prob_neighbourhood(self, attention, epsilon=0.1, temperature=0.1):
# attention = torch.clamp(attention, 0.01, 0.99)
weighted_adjacency_matrix = RelaxedBernoulli(temperature=torch.Tensor([temperature]).to(attention.device),
probs=attention).rsample()
# eps = 0.5
mask = (weighted_adjacency_matrix > epsilon).detach().float()
weighted_adjacency_matrix = weighted_adjacency_matrix * mask + 0.0 * (1 - mask)
return weighted_adjacency_matrix
def compute_distance_mat(self, X, weight=None):
if weight is not None:
trans_X = torch.mm(X, weight)
else:
trans_X = X
norm = torch.sum(trans_X * X, dim=-1)
dists = -2 * torch.matmul(trans_X, X.transpose(-1, -2)) + norm.unsqueeze(0) + norm.unsqueeze(1)
return dists
def construct_feat_mask(self, feat_dim, init_strategy="normal"):
mask = nn.Parameter(torch.FloatTensor(feat_dim))
if init_strategy == "normal":
std = 0.1
with torch.no_grad():
mask.normal_(1.0, std)
elif init_strategy == "constant":
with torch.no_grad():
nn.init.constant_(mask, 0.0)
return mask
def mask_feature(self, x, use_sigmoid=True, marginalize=True):
feat_mask = (torch.sigmoid(self.feat_mask) if use_sigmoid else self.feat_mask).to(self.device)
if marginalize:
std_tensor = torch.ones_like(x, dtype=torch.float) / 2
mean_tensor = torch.zeros_like(x, dtype=torch.float) - x
z = torch.normal(mean=mean_tensor, std=std_tensor).to(self.device)
x = x + z * (1 - feat_mask)
else:
x = x * feat_mask
return x
class DynamicLSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers=1, bias=True, batch_first=True, dropout=0,
bidirectional=False, only_use_last_hidden_state=False, rnn_type='LSTM'):
"""
LSTM which can hold variable length sequence, use like TensorFlow's RNN(input, length...).
:param input_size:The number of expected features in the input x
:param hidden_size:The number of features in the hidden state h
:param num_layers:Number of recurrent layers.
:param bias:If False, then the layer does not use bias weights b_ih and b_hh. Default: True
:param batch_first:If True, then the input and output tensors are provided as (batch, seq, feature)
:param dropout:If non-zero, introduces a dropout layer on the outputs of each RNN layer except the last layer
:param bidirectional:If True, becomes a bidirectional RNN. Default: False
:param rnn_type: {LSTM, GRU, RNN}
"""
super(DynamicLSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.bidirectional = bidirectional
self.only_use_last_hidden_state = only_use_last_hidden_state
self.rnn_type = rnn_type
if self.rnn_type == 'LSTM':
self.RNN = nn.LSTM(
input_size=input_size, hidden_size=hidden_size, num_layers=num_layers,
bias=bias, batch_first=batch_first, dropout=dropout, bidirectional=bidirectional)
elif self.rnn_type == 'GRU':
self.RNN = nn.GRU(
input_size=input_size, hidden_size=hidden_size, num_layers=num_layers,
bias=bias, batch_first=batch_first, dropout=dropout, bidirectional=bidirectional)
elif self.rnn_type == 'RNN':
self.RNN = nn.RNN(
input_size=input_size, hidden_size=hidden_size, num_layers=num_layers,
bias=bias, batch_first=batch_first, dropout=dropout, bidirectional=bidirectional)
def forward(self, x, x_len, h0=None):
"""
sequence -> sort -> pad and pack ->process using RNN -> unpack ->unsort
:param x: sequence embedding vectors
:param x_len: numpy/tensor list
:return:
"""
"""sort"""
x_sort_idx = torch.argsort(-x_len)
x_unsort_idx = torch.argsort(x_sort_idx).long()
x_len = x_len[x_sort_idx]
x = x[x_sort_idx.long()]
"""pack"""
x_emb_p = torch.nn.utils.rnn.pack_padded_sequence(x, x_len, batch_first=self.batch_first)
if self.rnn_type == 'LSTM':
if h0 is None:
out_pack, (ht, ct) = self.RNN(x_emb_p, None)
else:
out_pack, (ht, ct) = self.RNN(x_emb_p, (h0, h0))
else:
if h0 is None:
out_pack, ht = self.RNN(x_emb_p, None)
else:
out_pack, ht = self.RNN(x_emb_p, h0)
ct = None
"""unsort: h"""
ht = torch.transpose(ht, 0, 1)[
x_unsort_idx]
ht = torch.transpose(ht, 0, 1)
if self.only_use_last_hidden_state:
return ht
else:
"""unpack: out"""
out = torch.nn.utils.rnn.pad_packed_sequence(out_pack, batch_first=self.batch_first)
out = out[0] #
out = out[x_unsort_idx]
"""unsort: out c"""
if self.rnn_type == 'LSTM':
ct = torch.transpose(ct, 0, 1)[
x_unsort_idx]
ct = torch.transpose(ct, 0, 1)
return out, (ht, ct)
| 21,984 | 43.414141 | 161 | py |
MRE-ISE | MRE-ISE-main/cores/gene/train.py | import pickle
import torch
import torch.nn as nn
from torch import optim
from tqdm import tqdm
from sklearn.metrics import classification_report
from transformers.optimization import get_linear_schedule_with_warmup
from modules.metrics import eval_result
import math
class Trainer(object):
def __init__(self, train_data=None, dev_data=None, test_data=None, re_dict=None, model=None, process=None,
args=None, logger=None, writer=None) -> None:
self.train_data = train_data
self.dev_data = dev_data
self.test_data = test_data
self.re_dict = re_dict
self.model = model
self.process = process
self.logger = logger
self.writer = writer
self.refresh_step = 2
self.best_dev_metric = 0
self.best_test_metric = 0
self.best_dev_epoch = None
self.best_test_epoch = None
self.optimizer = None
if self.train_data is not None:
self.train_num_steps = len(self.train_data) * args.num_epochs
self.step = 0
self.args = args
if self.args.do_train:
self.before_multimodal_train()
self.loss_func = nn.CrossEntropyLoss()
def train(self):
self.step = 0
self.model.train()
self.logger.info("***** Running training *****")
self.logger.info(" Num instance = %d", len(self.train_data) * self.args.batch_size)
self.logger.info(" Num epoch = %d", self.args.num_epochs)
self.logger.info(" Batch size = %d", self.args.batch_size)
self.logger.info(" Learning rate = {}".format(self.args.lr))
self.logger.info(" Evaluate begin = %d", self.args.eval_begin_epoch)
if self.args.load_path is not None: # load model from load_path
self.logger.info("Loading model from {}".format(self.args.load_path))
self.model.load_state_dict(torch.load(self.args.load_path))
self.logger.info("Load model successful!")
with tqdm(total=self.train_num_steps, postfix='loss:{0:<6.5f}', leave=False, dynamic_ncols=True,
initial=self.step) as pbar:
self.pbar = pbar
avg_loss = 0
for epoch in range(1, self.args.num_epochs + 1):
pbar.set_description_str(desc="Epoch {}/{}".format(epoch, self.args.num_epochs))
for batch in self.train_data:
self.step += 1
batch = (tup.to(self.args.device) if isinstance(tup, torch.Tensor) else tup for tup in batch)
(mu, std), logits1, logits2, labels, topic_loss = self._step(batch, mode="train", step=self.step)
GIB_loss = self.loss_func(logits1, labels.view(-1))
task_loss = self.loss_func(logits2, labels.view(-1))
if self.args.is_IB:
KL_loss = -0.5 * (1 + 2 * std.log() - mu.pow(2) - std.pow(2)).sum(1).mean().div(math.log(2))
GIB_loss += self.args.beta * KL_loss
self.writer.add_scalar(tag='task_loss', scalar_value=task_loss.detach().cpu().item(),
global_step=self.step)
self.writer.add_scalar(tag='GIB_loss', scalar_value=GIB_loss.detach().cpu().item(),
global_step=self.step)
self.writer.add_scalar(tag='topic_loss', scalar_value=topic_loss.detach().cpu().item(),
global_step=self.step)
loss = task_loss + self.args.eta1 * GIB_loss + self.args.eta2 * topic_loss
avg_loss += loss.detach().cpu().item()
loss.backward()
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
if self.step % self.refresh_step == 0:
avg_loss = float(avg_loss) / self.refresh_step
print_output = "loss:{:<6.5f}".format(avg_loss)
pbar.update(self.refresh_step)
pbar.set_postfix_str(print_output)
if self.writer is not None:
self.writer.add_scalar(tag='train_loss', scalar_value=avg_loss,
global_step=self.step) # tensorbordx
avg_loss = 0
if self.step % 50 == 0:
self.evaluate(self.step)
if epoch >= self.args.eval_begin_epoch:
self.test(epoch)
pbar.close()
self.pbar = None
self.logger.info("Get best dev performance at epoch {}, best dev f1 score is {}".format(self.best_dev_epoch,
self.best_dev_metric))
self.logger.info(
"Get best test performance at epoch {}, best test f1 score is {}".format(self.best_test_epoch,
self.best_test_metric))
def evaluate(self, epoch):
self.model.eval()
self.logger.info("***** Running evaluate *****")
self.logger.info(" Num instance = %d", len(self.dev_data) * self.args.batch_size)
self.logger.info(" Batch size = %d", self.args.batch_size)
step = 0
true_labels, pred_labels = [], []
with torch.no_grad():
with tqdm(total=len(self.dev_data), leave=False, dynamic_ncols=True) as pbar:
pbar.set_description_str(desc="Dev")
total_loss = 0
for batch in self.dev_data:
step += 1
batch = (tup.to(self.args.device) if isinstance(tup, torch.Tensor) else tup for tup in
batch) # to cpu/cuda device
(mu, std), logits1, logits2, labels, topic_loss = self._step(batch, mode="dev")
GIB_loss = self.loss_func(logits1, labels.view(-1))
task_loss = self.loss_func(logits2, labels.view(-1))
if self.args.is_IB:
KL_loss = -0.5 * (1 + 2 * std.log() - mu.pow(2) - std.pow(2)).sum(1).mean().div(math.log(2))
GIB_loss += self.args.beta * KL_loss
loss = task_loss + self.args.eta1 * GIB_loss + self.args.eta2 * topic_loss
total_loss += loss.detach().cpu().item()
preds = logits2.argmax(-1)
true_labels.extend(labels.view(-1).detach().cpu().tolist())
pred_labels.extend(preds.view(-1).detach().cpu().tolist())
pbar.update()
# evaluate done
pbar.close()
sk_result = classification_report(y_true=true_labels, y_pred=pred_labels,
labels=list(self.re_dict.values())[1:],
target_names=list(self.re_dict.keys())[1:], digits=4)
self.logger.info("%s\n", sk_result)
result = eval_result(true_labels, pred_labels, self.re_dict, self.logger)
acc, micro_f1 = round(result['acc'] * 100, 4), round(result['micro_f1'] * 100, 4)
if self.writer is not None:
self.writer.add_scalar(tag='dev_acc', scalar_value=acc, global_step=epoch) # tensorbordx
self.writer.add_scalar(tag='dev_f1', scalar_value=micro_f1, global_step=epoch) # tensorbordx
self.writer.add_scalar(tag='dev_loss', scalar_value=total_loss / len(self.test_data),
global_step=epoch) # tensorbordx
self.logger.info("Epoch {}/{}, best dev f1: {}, best epoch: {}, current dev f1 score: {}, acc: {}." \
.format(epoch, self.args.num_epochs, self.best_dev_metric, self.best_dev_epoch,
micro_f1, acc))
if micro_f1 >= self.best_dev_metric: # this epoch get best performance
self.logger.info("Get better performance at epoch {}".format(epoch))
self.best_dev_epoch = epoch
self.best_dev_metric = micro_f1 # update best metric(f1 score)
if self.args.save_path is not None: # save model
torch.save(self.model.state_dict(), self.args.save_path + "/best_model.pth")
self.logger.info("Save best model at {}".format(self.args.save_path))
self.model.train()
def test(self, epoch):
self.model.eval()
self.logger.info("\n***** Running testing *****")
self.logger.info(" Num instance = %d", len(self.test_data) * self.args.batch_size)
self.logger.info(" Batch size = %d", self.args.batch_size)
if self.args.load_path is not None: # load model from load_path
self.logger.info("Loading model from {}".format(self.args.load_path + "/best_model.pth"))
self.model.load_state_dict(torch.load(self.args.load_path + "/best_model.pth"))
self.logger.info("Load model successful!")
self.model.to(self.args.device)
true_labels, pred_labels = [], []
with torch.no_grad():
with tqdm(total=len(self.test_data), leave=False, dynamic_ncols=True) as pbar:
pbar.set_description_str(desc="Testing")
total_loss = 0
for batch in self.test_data:
batch = (tup.to(self.args.device) if isinstance(tup, torch.Tensor) else tup for tup in
batch) # to cpu/cuda device
(mu, std), logits1, logits2, labels, topic_loss = self._step(batch, mode="dev")
GIB_loss = self.loss_func(logits1, labels.view(-1))
task_loss = self.loss_func(logits2, labels.view(-1))
if self.args.is_IB:
KL_loss = -0.5 * (1 + 2 * std.log() - mu.pow(2) - std.pow(2)).sum(1).mean().div(math.log(2))
GIB_loss += self.args.beta * KL_loss
loss = task_loss + self.args.eta1 * GIB_loss + self.args.eta2 * topic_loss
total_loss += loss.detach().cpu().item()
preds = logits2.argmax(-1)
true_labels.extend(labels.view(-1).detach().cpu().tolist())
pred_labels.extend(preds.view(-1).detach().cpu().tolist())
pbar.update()
# evaluate done
pbar.close()
sk_result = classification_report(y_true=true_labels, y_pred=pred_labels,
labels=list(self.re_dict.values())[1:],
target_names=list(self.re_dict.keys())[1:], digits=4)
self.logger.info("%s\n", sk_result)
result = eval_result(true_labels, pred_labels, self.re_dict, self.logger)
acc, micro_f1 = round(result['acc'] * 100, 4), round(result['micro_f1'] * 100, 4)
if self.writer is not None:
self.writer.add_scalar(tag='test_acc', scalar_value=acc, global_step=epoch) # tensorbordx
self.writer.add_scalar(tag='test_f1', scalar_value=micro_f1, global_step=epoch) # tensorbordx
self.writer.add_scalar(tag='test_loss', scalar_value=total_loss / len(self.test_data),
global_step=epoch) # tensorbordx
total_loss = 0
############
self.logger.info("Epoch {}/{}, best test f1: {}, best epoch: {}, current test f1 score: {}, acc: {}" \
.format(epoch, self.args.num_epochs, self.best_test_metric, self.best_test_epoch,
micro_f1, acc))
if micro_f1 >= self.best_test_metric: # this epoch get best performance
self.best_test_metric = micro_f1
self.best_test_epoch = epoch
self.model.train()
def _step(self, batch, mode="train", step=0):
input_ids, pieces2word, attention_mask, token_type_ids, adj_matrix, head_tail_pos, labels, image, aux_imgs, \
aux_mask, edge_mask, vbow_features, tbow_features = batch
(mu, std), logits1, logits2, topic_loss = self.model(input_ids=input_ids, attention_mask=attention_mask,
head_tail_pos=head_tail_pos, piece2word=pieces2word,
adj_matrix=adj_matrix, labels=labels,
aux_imgs=aux_imgs, aux_mask=aux_mask,
edge_mask=edge_mask, writer=self.writer, step=step,
X_T_bow=tbow_features, X_V_bow=vbow_features)
return (mu, std), logits1, logits2, labels, topic_loss
def before_multimodal_train(self):
pretrained_params = []
main_params = []
for name, param in self.model.named_parameters():
if 'text_model' in name:
pretrained_params.append(param)
elif 'vision_model' in name:
pretrained_params.append(param)
else:
main_params.append(param)
optimizer_grouped_parameters = [
{'params': pretrained_params, 'lr': self.args.lr_pretrained, 'weight_decay': 1e-2},
{'params': main_params, 'lr': self.args.lr_main, 'weight_decay': 1e-2},
]
self.optimizer = optim.Adam(optimizer_grouped_parameters)
self.scheduler = get_linear_schedule_with_warmup(optimizer=self.optimizer,
num_warmup_steps=self.args.warmup_ratio * self.train_num_steps,
num_training_steps=self.train_num_steps)
self.model.to(self.args.device)
| 14,294 | 56.874494 | 122 | py |
MRE-ISE | MRE-ISE-main/TSG/textual_scene_graph.py | import os
import json
import subprocess
import threading
import json
import numpy as np
import ast
import tempfile
import re
def load_data(filename):
res = []
with open(filename, mode='r') as f:
for line in f:
json_line = ast.literal_eval(line)
res.append(json_line)
return res
def sparse_textual_scene_graph(data, tmp_dir, out_dir, data_mode='train'):
"""
to parse textual scene graph by using SPICE, see more information in https://github.com/peteanderson80/SPICE
:param data:
:return:
"""
# Prepare temp input file for the SPICE scorer
input_data = []
for id, instance in enumerate(data):
_temp = ' '.join(instance['token'])
img_id = instance['img_id']
input_data.append({
"image_id": img_id,
"test": _temp,
"refs": [_temp]
})
cwd = os.path.dirname(os.path.abspath(__file__))
temp_dir = os.path.join(cwd, tmp_dir)
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
in_file = tempfile.NamedTemporaryFile(delete=False, dir=temp_dir, mode='w')
json.dump(input_data, in_file, indent=2)
in_file.close()
# cwd = os.path.dirname(os.path.abspath(__file__))
# temp_dir = os.path.join(cwd, tmp_dir)
# if not os.path.exists(temp_dir):
# os.makedirs(temp_dir)
# in_file = os.path.join(tmp_dir, data_mode)
# with open(in_file, mode='w') as f:
# json.dump(input_data, f, indent=2)
# Start job
SPICE_JAR = 'spice-1.0.jar'
temp_dir = os.path.join(cwd, out_dir)
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
out_file = os.path.join(temp_dir, data_mode+'.json')
spice_cmd = ['java', '-jar', '-Xmx8G', SPICE_JAR, in_file.name,
'-out', out_file,
'-detailed',
'-subset',
'-silent'
]
subprocess.check_call(spice_cmd, cwd=os.path.dirname(os.path.abspath(__file__)))
# Read and process results
with open(out_file, mode='r') as data_file:
results = json.load(data_file)
return results
def get_index(data, target_name):
start_token = target_name.split()[0]
end_token = target_name.split()[-1]
start_index = data.index(start_token)
end_index = data.index(end_token)
return start_index, end_index
def combine(meta_data, tuple_data, target_file):
"""
combine scene graph and meta data
:param meta_data: the original data
:param tuple_data: the parsed SG data
:param target_file: the target file which saves the final scene graph
:return:
"""
assert len(tuple_data) == len(meta_data)
for sg, md in zip(tuple_data, meta_data):
# md['tuples'] = sg['test_tuples']
_temp_sg = sg['test_tuples']
obj = []
attr = []
relation = []
for i in _temp_sg:
if len(i['tuple']) == 1:
# object
obj.append(i['tuple'])
elif len(i['tuple']) == 2:
# attributes
attr.append(i['tuple'])
elif len(i['tuple']) == 3:
relation.append(i['tuple'])
else:
raise EOFError('no SG obtained')
md['TSG'] = {'obj': obj, 'attr': attr, 'rel': relation}
with open(target_file, 'w', encoding='utf-8') as f:
json.dump(meta_data, f)
if __name__ == '__main__':
print('parsing textual scene graph')
FILE_DIR = '../data/txt/'
INPUT_TMP_DIR = '../data/spice/input/'
OUTPUT_DIR = '../data/spice/output/'
DIST_DIR = '../data/tsg/'
for i in ['ours_train.txt', 'ours_val.txt', 'ours_test.txt']:
print(f'parsing {i} ... ')
base_name = os.path.basename(i).split('.')[0]
data = load_data(os.path.join(FILE_DIR, i))
sg_data = sparse_textual_scene_graph(data, tmp_dir=INPUT_TMP_DIR, out_dir=OUTPUT_DIR,
data_mode=base_name)
combine(data, sg_data, os.path.join(DIST_DIR, f'{base_name}.json'))
# print('parsing train data ... ')
# train_data = load_data(os.path.join(FILE_DIR, 'ours_train.txt'))
# train_tuple_data = sparse_textual_scene_graph(train_data, tmp_dir=INPUT_TMP_DIR, out_dir=OUTPUT_DIR, data_mode='train')
# combine(train_data, train_tuple_data, os.path.join(DIST_DIT, 'train.json'))
#
# print('parsing valid data ... ')
# vaild_data = load_data(os.path.join(FILE_DIR, 'ours_val.txt'))
# valid_tuple_data = sparse_textual_scene_graph(train_data, tmp_dir=INPUT_TMP_DIR, out_dir=OUTPUT_DIR, data_mode='vaild')
# combine(vaild_data, valid_tuple_data, os.path.join(DIST_DIT, 'val.json'))
#
# print('parsing test data ... ')
# test_data = load_data(os.path.join(FILE_DIR, 'ours_test.txt'))
# test_tuple_data = sparse_textual_scene_graph(train_data, tmp_dir=INPUT_TMP_DIR, out_dir=OUTPUT_DIR, data_mode='test')
# combine(test_data, test_tuple_data, os.path.join(DIST_DIT, 'test.json'))
| 5,031 | 32.105263 | 125 | py |
latex2mathml | latex2mathml-master/example.py | from latex2mathml.converter import convert
def convert_to_mathml(latex_input):
mathml_output = convert(latex_input)
print(mathml_output)
if __name__ == "__main__":
convert_to_mathml(r"x = {-b \pm \sqrt{b^2-4ac} \over 2a}")
| 239 | 20.818182 | 62 | py |
latex2mathml | latex2mathml-master/tests/test_symbol_parser.py | import pytest
from latex2mathml.symbols_parser import convert_symbol
@pytest.mark.parametrize(
"latex, expected",
[pytest.param("+", "0002B", id="operator-plus"), pytest.param(r"\to", "02192", id="alias-command")],
)
def test_convert_symbol(latex: str, expected: str) -> None:
assert convert_symbol(latex) == expected
| 333 | 26.833333 | 104 | py |
latex2mathml | latex2mathml-master/tests/test_converter.py | import pytest
from multidict import MultiDict
from xmljson import BadgerFish
# noinspection PyProtectedMember
from latex2mathml.converter import _convert, convert
@pytest.mark.parametrize(
"latex, json",
[
pytest.param("x", {"mi": "x"}, id="single-identifier"),
pytest.param("xyz", MultiDict([("mi", "x"), ("mi", "y"), ("mi", "z")]), id="multiple-identifier"),
pytest.param("3", {"mn": "3"}, id="single-number"),
pytest.param("333", {"mn": "333"}, id="multiple-numbers"),
pytest.param("12.34", {"mn": "12.34"}, id="decimal-numbers"),
pytest.param("12x", MultiDict([("mn", "12"), ("mi", "x")]), id="numbers-and-identifiers"),
pytest.param("+", {"mo": "+"}, id="single-operator"),
pytest.param("3-2", MultiDict([("mn", "3"), ("mo", "−"), ("mn", "2")]), id="numbers-and-operators"),
pytest.param(
"3x*2",
MultiDict([("mn", "3"), ("mi", "x"), ("mo", "*"), ("mn", "2")]),
id="numbers-identifiers-and-operators",
),
pytest.param("{a}", {"mrow": {"mi": "a"}}, id="single-group"),
pytest.param("{a}{b}", MultiDict([("mrow", {"mi": "a"}), ("mrow", {"mi": "b"})]), id="multiple-groups"),
pytest.param(
"{a+{b}}", {"mrow": MultiDict([("mi", "a"), ("mo", "+"), ("mrow", {"mi": "b"})])}, id="inner-group"
),
pytest.param(r"1 \over 2", {"mfrac": MultiDict([("mn", "1"), ("mn", "2")])}, id="over"),
pytest.param(
r"{1 \over 2}", {"mrow": {"mfrac": MultiDict([("mn", "1"), ("mn", "2")])}}, id="over-inside-braces"
),
pytest.param(
r"\begin{matrix}a_{1} & b_{2} \\ c_{3} & d_{4} \end{matrix}",
{
"mtable": MultiDict(
[
(
"mtr",
MultiDict(
[
("mtd", {"msub": MultiDict([("mi", "a"), ("mrow", {"mn": "1"})])}),
("mtd", {"msub": MultiDict([("mi", "b"), ("mrow", {"mn": "2"})])}),
]
),
),
(
"mtr",
MultiDict(
[
("mtd", {"msub": MultiDict([("mi", "c"), ("mrow", {"mn": "3"})])}),
("mtd", {"msub": MultiDict([("mi", "d"), ("mrow", {"mn": "4"})])}),
]
),
),
]
)
},
id="complex-matrix",
),
pytest.param(
r"\left\{ \begin{array} { l } { 3x - 5y + 4z = 0} \\ { x - y + 8z = 0} \\ { 2x - 6y + z = 0} \end{array} "
r"\right.",
{
"mrow": MultiDict(
[
("mo", {"@stretchy": "true", "@fence": "true", "@form": "prefix", "$": "{"}),
(
"mtable",
MultiDict(
[
(
"mtr",
{
"mtd": {
"@columnalign": "left",
"mrow": MultiDict(
[
("mn", "3"),
("mi", "x"),
("mo", "−"),
("mn", "5"),
("mi", "y"),
("mo", "+"),
("mn", "4"),
("mi", "z"),
("mo", "="),
("mn", "0"),
]
),
},
},
),
(
"mtr",
{
"mtd": {
"@columnalign": "left",
"mrow": MultiDict(
[
("mi", "x"),
("mo", "−"),
("mi", "y"),
("mo", "+"),
("mn", "8"),
("mi", "z"),
("mo", "="),
("mn", "0"),
]
),
},
},
),
(
"mtr",
{
"mtd": {
"@columnalign": "left",
"mrow": MultiDict(
[
("mn", "2"),
("mi", "x"),
("mo", "−"),
("mn", "6"),
("mi", "y"),
("mo", "+"),
("mi", "z"),
("mo", "="),
("mn", "0"),
]
),
},
},
),
]
),
),
("mo", {"@stretchy": "true", "@fence": "true", "@form": "postfix"}),
]
)
},
id="null-delimiter",
),
pytest.param("a_b", {"msub": MultiDict([("mi", "a"), ("mi", "b")])}, id="subscript"),
pytest.param("a^b", {"msup": MultiDict([("mi", "a"), ("mi", "b")])}, id="superscript"),
pytest.param(
"a_b^c", {"msubsup": MultiDict([("mi", "a"), ("mi", "b"), ("mi", "c")])}, id="subscript-and-superscript"
),
pytest.param(
"a^b_c", {"msubsup": MultiDict([("mi", "a"), ("mi", "c"), ("mi", "b")])}, id="superscript-and-subscript"
),
pytest.param(
"{a_b}", {"mrow": {"msub": MultiDict([("mi", "a"), ("mi", "b")])}}, id="subscript-within-curly-braces"
),
pytest.param(
"{a^b}", {"mrow": {"msup": MultiDict([("mi", "a"), ("mi", "b")])}}, id="superscript-within-curly-braces"
),
pytest.param(
"a^{i+1}_3",
{
"msubsup": MultiDict(
[("mi", "a"), ("mn", "3"), ("mrow", MultiDict([("mi", "i"), ("mo", "+"), ("mn", "1")]))]
)
},
id="superscript-subscript-and-curly-braces",
),
pytest.param(
r"\frac{1}{2}", {"mfrac": MultiDict([("mrow", {"mn": "1"}), ("mrow", {"mn": "2"})])}, id="simple-fraction"
),
pytest.param(r"\sqrt{2}", {"msqrt": {"mrow": {"mn": "2"}}}, id="square-root"),
pytest.param(r"\sqrt[3]{2}", {"mroot": MultiDict([("mrow", {"mn": "2"}), ("mn", "3")])}, id="root"),
pytest.param(
r"\binom{2}{3}",
MultiDict(
[
("mo", {"@minsize": "2.047em", "@maxsize": "2.047em", "$": "("}),
("mfrac", MultiDict([("@linethickness", "0"), ("mrow", {"mn": "2"}), ("mrow", {"mn": "3"})])),
("mo", {"@minsize": "2.047em", "@maxsize": "2.047em", "$": ")"}),
]
),
id="binomial",
),
pytest.param(
r"\left(x\right)",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mo", {"@stretchy": "true", "@fence": "true", "@form": "prefix", "$": "("}),
("mi", "x"),
("mo", {"@stretchy": "true", "@fence": "true", "@form": "postfix", "$": ")"}),
]
),
),
]
),
id="left-and-right",
),
pytest.param(r"\,", {"mspace": {"@width": "0.167em"}}, id="space"),
pytest.param(
r"\begin{matrix}a & b \\ c & d \end{matrix}",
{
"mtable": MultiDict(
[
("mtr", MultiDict([("mtd", {"mi": "a"}), ("mtd", {"mi": "b"})])),
("mtr", MultiDict([("mtd", {"mi": "c"}), ("mtd", {"mi": "d"})])),
]
),
},
id="matrix",
),
pytest.param(
r"\matrix{a & b \\ c & d}",
{
"mtable": MultiDict(
[
("mtr", MultiDict([("mtd", {"mi": "a"}), ("mtd", {"mi": "b"})])),
("mtr", MultiDict([("mtd", {"mi": "c"}), ("mtd", {"mi": "d"})])),
]
),
},
id="matrix-without-begin-and-end",
),
pytest.param(
r"\begin{matrix*}[r]a & b \\ c & d \end{matrix*}",
{
"mtable": MultiDict(
[
(
"mtr",
MultiDict(
[
("mtd", {"@columnalign": "right", "mi": "a"}),
("mtd", {"@columnalign": "right", "mi": "b"}),
]
),
),
(
"mtr",
MultiDict(
[
("mtd", {"@columnalign": "right", "mi": "c"}),
("mtd", {"@columnalign": "right", "mi": "d"}),
]
),
),
]
)
},
id="matrix-with-alignment",
),
pytest.param(
r"\begin{matrix}-a & b \\ c & d \end{matrix}",
{
"mtable": MultiDict(
[
(
"mtr",
MultiDict([("mtd", MultiDict([("mo", "−"), ("mi", "a")])), ("mtd", {"mi": "b"})]),
),
("mtr", MultiDict([("mtd", {"mi": "c"}), ("mtd", {"mi": "d"})])),
]
),
},
id="matrix-with-negative-sign",
),
pytest.param(
r"\begin{pmatrix}a & b \\ c & d \end{pmatrix}",
MultiDict(
[
("mo", "("),
(
"mtable",
MultiDict(
[
("mtr", MultiDict([("mtd", {"mi": "a"}), ("mtd", {"mi": "b"})])),
("mtr", MultiDict([("mtd", {"mi": "c"}), ("mtd", {"mi": "d"})])),
]
),
),
("mo", ")"),
]
),
id="pmatrix",
),
pytest.param(
r"\begin{array}{cr} 1 & 2 \\ 3 & 4 \end{array}",
{
"mtable": MultiDict(
[
(
"mtr",
MultiDict(
[
("mtd", {"@columnalign": "center", "mn": "1"}),
("mtd", {"@columnalign": "right", "mn": "2"}),
]
),
),
(
"mtr",
MultiDict(
[
("mtd", {"@columnalign": "center", "mn": "3"}),
("mtd", {"@columnalign": "right", "mn": "4"}),
]
),
),
]
)
},
id="simple-array",
),
pytest.param(
r"\begin{array}{c|rl} 1 & 2 & 3 \\ 4 & 5 & 6 \end{array}",
{
"mtable": MultiDict(
[
("@columnlines", "solid none"),
(
"mtr",
MultiDict(
[
("mtd", {"@columnalign": "center", "mn": "1"}),
("mtd", {"@columnalign": "right", "mn": "2"}),
("mtd", {"@columnalign": "left", "mn": "3"}),
]
),
),
(
"mtr",
MultiDict(
[
("mtd", {"@columnalign": "center", "mn": "4"}),
("mtd", {"@columnalign": "right", "mn": "5"}),
("mtd", {"@columnalign": "left", "mn": "6"}),
]
),
),
]
)
},
id="array-with-vertical-bar",
),
pytest.param(
r"\begin{array}{cr} 1 & 2 \\ 3 & 4 \\ \hline 5 & 6 \\ \hdashline 7 & 8 \end{array}",
{
"mtable": MultiDict(
[
("@rowlines", "none solid dashed"),
(
"mtr",
MultiDict(
[
("mtd", {"@columnalign": "center", "mn": "1"}),
("mtd", {"@columnalign": "right", "mn": "2"}),
]
),
),
(
"mtr",
MultiDict(
[
("mtd", {"@columnalign": "center", "mn": "3"}),
("mtd", {"@columnalign": "right", "mn": "4"}),
]
),
),
(
"mtr",
MultiDict(
[
("mtd", {"@columnalign": "center", "mn": "5"}),
("mtd", {"@columnalign": "right", "mn": "6"}),
]
),
),
(
"mtr",
MultiDict(
[
("mtd", {"@columnalign": "center", "mn": "7"}),
("mtd", {"@columnalign": "right", "mn": "8"}),
]
),
),
]
)
},
id="array-with-horizontal-lines",
),
pytest.param(
r"""\begin{bmatrix}
a_{1,1} & a_{1,2} & \cdots & a_{1,n} \\
a_{2,1} & a_{2,2} & \cdots & a_{2,n} \\
\vdots & \vdots & \ddots & \vdots \\
a_{m,1} & a_{m,2} & \cdots & a_{m,n}
\end{bmatrix}""",
MultiDict(
[
("mo", "["),
(
"mtable",
MultiDict(
[
(
"mtr",
MultiDict(
[
(
"mtd",
{
"msub": MultiDict(
[
("mi", "a"),
(
"mrow",
MultiDict(
[("mn", "1"), ("mo", ","), ("mn", "1")]
),
),
]
)
},
),
(
"mtd",
{
"msub": MultiDict(
[
("mi", "a"),
(
"mrow",
MultiDict(
[("mn", "1"), ("mo", ","), ("mn", "2")]
),
),
]
)
},
),
("mtd", {"mo": "⋯"}),
(
"mtd",
{
"msub": MultiDict(
[
("mi", "a"),
(
"mrow",
MultiDict(
[("mn", "1"), ("mo", ","), ("mi", "n")]
),
),
]
)
},
),
]
),
),
(
"mtr",
MultiDict(
[
(
"mtd",
{
"msub": MultiDict(
[
("mi", "a"),
(
"mrow",
MultiDict(
[("mn", "2"), ("mo", ","), ("mn", "1")]
),
),
]
)
},
),
(
"mtd",
{
"msub": MultiDict(
[
("mi", "a"),
(
"mrow",
MultiDict(
[("mn", "2"), ("mo", ","), ("mn", "2")]
),
),
]
)
},
),
("mtd", {"mo": "⋯"}),
(
"mtd",
{
"msub": MultiDict(
[
("mi", "a"),
(
"mrow",
MultiDict(
[("mn", "2"), ("mo", ","), ("mi", "n")]
),
),
]
)
},
),
]
),
),
(
"mtr",
MultiDict(
[
("mtd", {"mo": "⋮"}),
("mtd", {"mo": "⋮"}),
("mtd", {"mo": "⋱"}),
("mtd", {"mo": "⋮"}),
]
),
),
(
"mtr",
MultiDict(
[
(
"mtd",
{
"msub": MultiDict(
[
("mi", "a"),
(
"mrow",
MultiDict(
[("mi", "m"), ("mo", ","), ("mn", "1")]
),
),
]
)
},
),
(
"mtd",
{
"msub": MultiDict(
[
("mi", "a"),
(
"mrow",
MultiDict(
[("mi", "m"), ("mo", ","), ("mn", "2")]
),
),
]
)
},
),
("mtd", {"mo": "⋯"}),
(
"mtd",
{
"msub": MultiDict(
[
("mi", "a"),
(
"mrow",
MultiDict(
[("mi", "m"), ("mo", ","), ("mi", "n")]
),
),
]
)
},
),
]
),
),
]
),
),
("mo", "]"),
]
),
id="issue-33",
),
pytest.param(
r"\sqrt { ( - 25 ) ^ { 2 } } = \pm 25",
MultiDict(
[
(
"msqrt",
{
"mrow": MultiDict(
[
("mo", {"$": "(", "@stretchy": "false"}),
("mo", "−"),
("mn", "25"),
(
"msup",
MultiDict(
[
("mo", {"$": ")", "@stretchy": "false"}),
("mrow", {"mn": "2"}),
]
),
),
]
)
},
),
("mo", "="),
("mi", "±"),
("mn", "25"),
]
),
id="issue-42",
),
pytest.param("2 < 5", MultiDict([("mn", "2"), ("mo", "<"), ("mn", "5")]), id="issue-45-lt"),
pytest.param("2 > 5", MultiDict([("mn", "2"), ("mo", ">"), ("mn", "5")]), id="issue-45-gt"),
pytest.param(r"\And", MultiDict([("mi", "&")]), id="And"),
pytest.param(
r"\left(- x^{3} + 5\right)^{5}",
MultiDict(
[
(
"msup",
MultiDict(
[
(
"mrow",
MultiDict(
[
(
"mo",
{
"@stretchy": "true",
"@fence": "true",
"@form": "prefix",
"$": "(",
},
),
("mo", "−"),
("msup", MultiDict([("mi", "x"), ("mrow", {"mn": "3"})])),
("mo", "+"),
("mn", "5"),
(
"mo",
{
"@stretchy": "true",
"@fence": "true",
"@form": "postfix",
"$": ")",
},
),
]
),
),
("mrow", {"mn": "5"}),
]
),
)
]
),
id="issue-44",
),
pytest.param(r"\mathbb{R}", {"mi": "ℝ"}, id="issue-51"),
pytest.param(
r"\bar{z_1} = z_2",
MultiDict(
[
(
"mover",
MultiDict(
[
("mrow", {"msub": MultiDict([("mi", "z"), ("mn", "1")])}),
("mo", {"@stretchy": "true", "$": "¯"}),
]
),
),
("mo", "="),
("msub", MultiDict([("mi", "z"), ("mn", "2")])),
]
),
id="issue-52",
),
pytest.param(
r"\mathrm{...}",
{"mrow": MultiDict([("mo", "."), ("mo", "."), ("mo", ".")])},
id="issue-60-1",
),
pytest.param(
r"\mathrm{...}+\mathrm{...}",
MultiDict(
[
("mrow", MultiDict([("mo", "."), ("mo", "."), ("mo", ".")])),
("mo", "+"),
("mrow", MultiDict([("mo", "."), ("mo", "."), ("mo", ".")])),
]
),
id="issue-60-2",
),
pytest.param(
r"\frac{x + 4}{x + \frac{123 \left(\sqrt{x} + 5\right)}{x + 4} - 8}",
{
"mfrac": MultiDict(
[
("mrow", MultiDict([("mi", "x"), ("mo", "+"), ("mn", "4")])),
(
"mrow",
MultiDict(
[
("mi", "x"),
("mo", "+"),
(
"mfrac",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mn", "123"),
(
"mrow",
MultiDict(
[
(
"mo",
{
"@stretchy": "true",
"@fence": "true",
"@form": "prefix",
"$": "(",
},
),
("msqrt", {"mrow": {"mi": "x"}}),
("mo", "+"),
("mn", "5"),
(
"mo",
{
"@stretchy": "true",
"@fence": "true",
"@form": "postfix",
"$": ")",
},
),
]
),
),
]
),
),
("mrow", MultiDict([("mi", "x"), ("mo", "+"), ("mn", "4")])),
]
),
),
("mo", "−"),
("mn", "8"),
]
),
),
]
)
},
id="issue-61",
),
pytest.param(
r"\sqrt {\sqrt {\left( x^{3}\right) + v}}",
{
"msqrt": {
"mrow": {
"msqrt": {
"mrow": MultiDict(
[
(
"mrow",
MultiDict(
[
(
"mo",
{
"@stretchy": "true",
"@fence": "true",
"@form": "prefix",
"$": "(",
},
),
("msup", MultiDict([("mi", "x"), ("mrow", {"mn": 3})])),
(
"mo",
{
"@stretchy": "true",
"@fence": "true",
"@form": "postfix",
"$": ")",
},
),
]
),
),
("mo", "+"),
("mi", "v"),
]
),
}
}
}
},
id="issue-63",
),
pytest.param(r"1_{}", {"msub": MultiDict([("mn", "1"), ("mrow", {})])}, id="empty-subscript"),
pytest.param(
r"\begin{Bmatrix}\end{Bmatrix}",
MultiDict([("mo", "{"), ("mtable", {}), ("mo", "}")]),
id="Bmatrix",
),
pytest.param(
r"\begin{vmatrix}\end{vmatrix}",
MultiDict([("mo", "|"), ("mtable", {}), ("mo", "|")]),
id="vmatrix",
),
pytest.param(
r"\begin{Vmatrix}\end{Vmatrix}",
MultiDict([("mo", "‖"), ("mtable", {}), ("mo", "‖")]),
id=r"Vmatrix",
),
pytest.param(
r"\begin{matrix}1^2\end{matrix}",
{"mtable": {"mtr": {"mtd": {"msup": MultiDict([("mn", "1"), ("mn", "2")])}}}},
id="command-inside-matrix",
),
pytest.param(r"\e", {"mi": r"\e"}, id=r"\e"),
pytest.param(
r"\left[\begin{matrix}1 & 0 & 0 & 0\\0 & 1 & 0 & 0\\0 & 0 & 1 & 0\\0 & 0 & 0 & 1\end{matrix}\right]",
{
"mrow": MultiDict(
[
("mo", {"@stretchy": "true", "@fence": "true", "@form": "prefix", "$": "["}),
(
"mtable",
MultiDict(
[
(
"mtr",
MultiDict(
[
("mtd", {"mn": "1"}),
("mtd", {"mn": "0"}),
("mtd", {"mn": "0"}),
("mtd", {"mn": "0"}),
]
),
),
(
"mtr",
MultiDict(
[
("mtd", {"mn": "0"}),
("mtd", {"mn": "1"}),
("mtd", {"mn": "0"}),
("mtd", {"mn": "0"}),
]
),
),
(
"mtr",
MultiDict(
[
("mtd", {"mn": "0"}),
("mtd", {"mn": "0"}),
("mtd", {"mn": "1"}),
("mtd", {"mn": "0"}),
]
),
),
(
"mtr",
MultiDict(
[
("mtd", {"mn": "0"}),
("mtd", {"mn": "0"}),
("mtd", {"mn": "0"}),
("mtd", {"mn": "1"}),
]
),
),
]
),
),
("mo", {"@stretchy": "true", "@fence": "true", "@form": "postfix", "$": "]"}),
]
)
},
id="issue-77",
),
pytest.param(r"\log{x}", MultiDict([("mi", "log"), ("mrow", {"mi": "x"})]), id="logarithm"),
pytest.param(
r"\log_2{x}",
MultiDict([("msub", MultiDict([("mi", "log"), ("mn", "2")])), ("mrow", {"mi": "x"})]),
id="logarithm-with-base",
),
pytest.param("^3", {"msup": MultiDict([("mi", ""), ("mn", "3")])}, id="exponent-without-base-works"),
pytest.param(
r"\lim_{x \to +\infty} f(x)",
MultiDict(
[
(
"msub",
MultiDict(
[
("mo", "lim"),
(
"mrow",
MultiDict(
[("mi", "x"), ("mo", "→"), ("mo", "+"), ("mo", "∞")]
),
),
]
),
),
("mi", "f"),
("mo", {"$": "(", "@stretchy": "false"}),
("mi", "x"),
("mo", {"$": ")", "@stretchy": "false"}),
]
),
id="limit-at-plus-infinity",
),
pytest.param(
r"\inf_{x > s}f(x)",
MultiDict(
[
(
"msub",
MultiDict(
[("mo", "inf"), ("mrow", MultiDict([("mi", "x"), ("mo", ">"), ("mi", "s")]))]
),
),
("mi", "f"),
("mo", {"$": "(", "@stretchy": "false"}),
("mi", "x"),
("mo", {"$": ")", "@stretchy": "false"}),
]
),
id="inf",
),
pytest.param(
r"\int\limits_{0}^{\pi}",
{"munderover": MultiDict([("mo", "∫"), ("mrow", {"mn": "0"}), ("mrow", {"mi": "π"})])},
id="issue-76",
),
pytest.param(
r"\int\limits^{\pi}_{0}",
{"munderover": MultiDict([("mo", "∫"), ("mrow", {"mn": "0"}), ("mrow", {"mi": "π"})])},
id="issue-301-a",
),
pytest.param(
r"\int\limits_{\pi}",
{"munder": MultiDict([("mo", "∫"), ("mrow", {"mi": "π"})])},
id="issue-301-b",
),
pytest.param(
r"\int\limits^{\pi}",
{"mover": MultiDict([("mo", "∫"), ("mrow", {"mi": "π"})])},
id="issue-301-c",
),
pytest.param(
r"\substack{ \xi{2}=g{\left(x \right)}}",
{
"mstyle": {
"@scriptlevel": "1",
"mtable": {
"mtr": {
"mtd": MultiDict(
[
("mi", "ξ"),
("mrow", {"mn": "2"}),
("mo", "="),
("mi", "g"),
(
"mrow",
{
"mrow": MultiDict(
[
(
"mo",
{
"@stretchy": "true",
"@fence": "true",
"@form": "prefix",
"$": "(",
},
),
("mi", "x"),
(
"mo",
{
"@stretchy": "true",
"@fence": "true",
"@form": "postfix",
"$": ")",
},
),
]
),
},
),
]
)
}
},
}
},
id="issue-75-1-row",
),
pytest.param(
r"\sum_{\substack{1\le i\le n\\ i\ne j}}",
{
"msub": MultiDict(
[
("mo", "∑"),
(
"mrow",
{
"mstyle": {
"@scriptlevel": "1",
"mtable": MultiDict(
[
(
"mtr",
{
"mtd": MultiDict(
[
("mn", "1"),
("mo", "≤"),
("mi", "i"),
("mo", "≤"),
("mi", "n"),
]
)
},
),
(
"mtr",
{"mtd": MultiDict([("mi", "i"), ("mo", "≠"), ("mi", "j")])},
),
]
),
}
},
),
]
)
},
id="issue-75-2-rows",
),
pytest.param(
r"\tan x+\sec x+\cos x+\sin x+\cot x+\csc x+\arccos x+\arcsin x+\arctan x +\cosh x+\coth x+\sinh x+\tanh x",
MultiDict(
[
("mi", "tan"),
("mi", "x"),
("mo", "+"),
("mi", "sec"),
("mi", "x"),
("mo", "+"),
("mi", "cos"),
("mi", "x"),
("mo", "+"),
("mi", "sin"),
("mi", "x"),
("mo", "+"),
("mi", "cot"),
("mi", "x"),
("mo", "+"),
("mi", "csc"),
("mi", "x"),
("mo", "+"),
("mi", "arccos"),
("mi", "x"),
("mo", "+"),
("mi", "arcsin"),
("mi", "x"),
("mo", "+"),
("mi", "arctan"),
("mi", "x"),
("mo", "+"),
("mi", "cosh"),
("mi", "x"),
("mo", "+"),
("mi", "coth"),
("mi", "x"),
("mo", "+"),
("mi", "sinh"),
("mi", "x"),
("mo", "+"),
("mi", "tanh"),
("mi", "x"),
]
),
id="issue-91",
),
pytest.param(r"p_{\max}", {"msub": MultiDict([("mi", "p"), ("mrow", {"mo": "max"})])}, id="issue-98"),
pytest.param(
r"\vec{AB}",
{
"mover": MultiDict(
[("mrow", MultiDict([("mi", "A"), ("mi", "B")])), ("mo", {"@stretchy": "true", "$": "→"})]
)
},
id="issue-103",
),
pytest.param(
r"\begin{cases} {x=1} \\ {y=-2}\end{cases}",
{
"mrow": MultiDict(
[
("mo", {"@stretchy": "true", "@fence": "true", "@form": "prefix", "$": "{"}),
(
"mtable",
MultiDict(
[
(
"mtr",
{
"mtd": MultiDict(
[
("@columnalign", "left"),
(
"mrow",
MultiDict([("mi", "x"), ("mo", "="), ("mn", "1")]),
),
]
)
},
),
(
"mtr",
{
"mtd": MultiDict(
[
("@columnalign", "left"),
(
"mrow",
MultiDict(
[
("mi", "y"),
("mo", "="),
("mo", "−"),
("mn", "2"),
]
),
),
]
)
},
),
]
),
),
]
)
},
id="issue-106",
),
pytest.param(r"\max f", MultiDict([("mo", "max"), ("mi", "f")]), id="issue-108-1"),
pytest.param(
r"\max \{a, b, c\}",
MultiDict(
[
("mo", "max"),
("mo", {"@stretchy": "false", "$": "{"}),
("mi", "a"),
("mo", ","),
("mi", "b"),
("mo", ","),
("mi", "c"),
("mo", {"@stretchy": "false", "$": "}"}),
]
),
id="issue-108-2",
),
pytest.param(
r"\min{(x, y)}",
MultiDict(
[
("mo", "min"),
(
"mrow",
MultiDict(
[
("mo", {"@stretchy": "false", "$": "("}),
("mi", "x"),
("mo", ","),
("mi", "y"),
("mo", {"@stretchy": "false", "$": ")"}),
]
),
),
]
),
id="issue-108-3",
),
pytest.param(r"\dot A", {"mover": MultiDict([("mi", "A"), ("mo", "˙")])}, id="issue-112-1"),
pytest.param(
r"\dot{A}",
{"mover": MultiDict([("mrow", MultiDict([("mi", "A")])), ("mo", "˙")])},
id="issue-112-2",
),
pytest.param(r"\operatorname{sn}x", MultiDict([("mo", "sn"), ("mi", "x")]), id="issue-109-1"),
pytest.param(
r"\operatorname{sn}(x+y)",
MultiDict(
[
("mo", "sn"),
("mo", {"@stretchy": "false", "$": "("}),
("mi", "x"),
("mo", "+"),
("mi", "y"),
("mo", {"@stretchy": "false", "$": ")"}),
]
),
id="issue-109-2",
),
pytest.param(
r"\text{Let}\ x=\text{number of cats}.",
MultiDict(
[
("mtext", "Let"),
("mtext", " "),
("mi", "x"),
("mo", "="),
("mtext", "number of cats"),
("mo", "."),
]
),
id="issue-118",
),
pytest.param(
r"F(a,n)=\overset{a-a-a\cdots-a}{}ntext{个}a",
MultiDict(
[
("mi", "F"),
("mo", {"@stretchy": "false", "$": "("}),
("mi", "a"),
("mo", ","),
("mi", "n"),
("mo", {"@stretchy": "false", "$": ")"}),
("mo", "="),
(
"mover",
MultiDict(
[
("mrow", ""),
(
"mrow",
MultiDict(
[
("mi", "a"),
("mo", "−"),
("mi", "a"),
("mo", "−"),
("mi", "a"),
("mo", "⋯"),
("mo", "−"),
("mi", "a"),
]
),
),
]
),
),
("mi", "n"),
("mi", "t"),
("mi", "e"),
("mi", "x"),
("mi", "t"),
("mrow", {"mi": "个"}),
("mi", "a"),
]
),
id="issue-125-1-overset",
),
pytest.param(
r"a\,\overset{?}{=}\,b",
MultiDict(
[
("mi", "a"),
("mspace", {"@width": "0.167em"}),
("mover", MultiDict([("mrow", {"mo": "="}), ("mrow", {"mo": "?"})])),
("mspace", {"@width": "0.167em"}),
("mi", "b"),
]
),
id="issue-125-2-overset",
),
pytest.param(r"\underset ab", {"munder": MultiDict([("mi", "b"), ("mi", "a")])}, id="issue-125-3-underset"),
pytest.param(
r"a\mathop{t}b\mathop{t}c",
MultiDict([("mi", "a"), ("mrow", {"mi": "t"}), ("mi", "b"), ("mrow", {"mi": "t"}), ("mi", "c")]),
id="issue-125-4-mathop",
),
pytest.param(
r"\mathop{x}\limits_0^1",
{"munderover": MultiDict([("mrow", {"mi": "x"}), ("mn", "0"), ("mn", "1")])},
id="issue-125-4-limits",
),
pytest.param(
r"\Bigg[\bigg[\Big[\big[[",
MultiDict(
[
("mo", {"@minsize": "2.470em", "@maxsize": "2.470em", "$": "["}),
("mo", {"@minsize": "2.047em", "@maxsize": "2.047em", "$": "["}),
("mo", {"@minsize": "1.623em", "@maxsize": "1.623em", "$": "["}),
("mo", {"@minsize": "1.2em", "@maxsize": "1.2em", "$": "["}),
("mo", {"@stretchy": "false", "$": "["}),
]
),
id="big",
),
pytest.param(
r"x\rm {\text{var} = 1+\{b\}}\sf \Delta",
MultiDict(
[
("mi", "x"),
(
"mrow",
MultiDict(
[
("mtext", "var"),
("mo", "="),
("mn", "1"),
("mo", "+"),
("mo", {"@stretchy": "false", "$": "{"}),
("mi", {"@mathvariant": "normal", "$": "b"}),
("mo", {"@stretchy": "false", "$": "}"}),
]
),
),
("mi", {"@mathvariant": "sans-serif", "$": "Δ"}),
]
),
id="global-fonts",
),
pytest.param(
"f'(x) = 2x, f''(x) = 2",
MultiDict(
[
("msup", MultiDict([("mi", "f"), ("mi", "′")])),
("mo", {"$": "(", "@stretchy": "false"}),
("mi", "x"),
("mo", {"$": ")", "@stretchy": "false"}),
("mo", "="),
("mn", "2"),
("mi", "x"),
("mo", ","),
("msup", MultiDict([("mi", "f"), ("mi", "″")])),
("mo", {"$": "(", "@stretchy": "false"}),
("mi", "x"),
("mo", {"$": ")", "@stretchy": "false"}),
("mo", "="),
("mn", "2"),
]
),
id="prime",
),
pytest.param(
"'x",
MultiDict([("msup", MultiDict([("mi", ""), ("mi", "′")])), ("mi", "x")]),
id="prime-no-base",
),
pytest.param(
r"""|\,|\:|\>|\;|\\|\!|\quad|\qquad|\hspace1em|\hspace{10ex}|\enspace|\hskip1em|\kern-1.5pt|\mkern10mu|
\mskip18mu|\mspace18mu|\negthinspace|\negmedspace|\negthickspace|\nobreakspace|\space|\thinspace|""",
MultiDict(
[
("mo", {"@stretchy": "false", "$": "|"}),
("mspace", {"@width": "0.167em"}),
("mo", {"@stretchy": "false", "$": "|"}),
("mspace", {"@width": "0.222em"}),
("mo", {"@stretchy": "false", "$": "|"}),
("mspace", {"@width": "0.222em"}),
("mo", {"@stretchy": "false", "$": "|"}),
("mspace", {"@width": "0.278em"}),
("mo", {"@stretchy": "false", "$": "|"}),
("mspace", {"@linebreak": "newline"}),
("mo", {"@stretchy": "false", "$": "|"}),
("mspace", {"@width": "negativethinmathspace"}),
("mo", {"@stretchy": "false", "$": "|"}),
("mspace", {"@width": "1em"}),
("mo", {"@stretchy": "false", "$": "|"}),
("mspace", {"@width": "2em"}),
("mo", {"@stretchy": "false", "$": "|"}),
("mspace", {"@width": "1em"}),
("mo", {"@stretchy": "false", "$": "|"}),
("mspace", {"@width": "10ex"}),
("mo", {"@stretchy": "false", "$": "|"}),
("mspace", {"@width": "0.5em"}),
("mo", {"@stretchy": "false", "$": "|"}),
("mspace", {"@width": "1em"}),
("mo", {"@stretchy": "false", "$": "|"}),
("mspace", {"@width": "-1.5pt"}),
("mo", {"@stretchy": "false", "$": "|"}),
("mspace", {"@width": "10mu"}), # TODO: convert to em?
("mo", {"@stretchy": "false", "$": "|"}),
("mspace", {"@width": "18mu"}), # TODO: convert to em?
("mo", {"@stretchy": "false", "$": "|"}),
("mspace", {"@width": "18mu"}), # TODO: convert to em?
("mo", {"@stretchy": "false", "$": "|"}),
("mspace", {"@width": "negativethinmathspace"}),
("mo", {"@stretchy": "false", "$": "|"}),
("mspace", {"@width": "negativemediummathspace"}),
("mo", {"@stretchy": "false", "$": "|"}),
("mspace", {"@width": "negativethickmathspace"}),
("mo", {"@stretchy": "false", "$": "|"}),
("mtext", " "),
("mo", {"@stretchy": "false", "$": "|"}),
("mtext", " "),
("mo", {"@stretchy": "false", "$": "|"}),
("mspace", {"@width": "thinmathspace"}),
("mo", {"@stretchy": "false", "$": "|"}),
]
),
id="spaces",
),
pytest.param(
r"|x|",
MultiDict(
[
("mo", {"@stretchy": "false", "$": "|"}),
("mi", "x"),
("mo", {"@stretchy": "false", "$": "|"}),
]
),
id="pipe",
),
pytest.param(
r"\|x\|",
MultiDict(
[
("mo", {"@fence": "false", "@stretchy": "false", "$": "‖"}),
("mi", "x"),
("mo", {"@fence": "false", "@stretchy": "false", "$": "‖"}),
]
),
id="double-pipe",
),
pytest.param(
"Hello~World",
MultiDict(
[
("mi", "H"),
("mi", "e"),
("mi", "l"),
("mi", "l"),
("mi", "o"),
("mtext", " "),
("mi", "W"),
("mi", "o"),
("mi", "r"),
("mi", "l"),
("mi", "d"),
]
),
id="tilde",
),
pytest.param(r"\text{ Hello~World }", {"mtext": " Hello~World "}, id="tilde-and-space-in-text"),
pytest.param(
r"""% this is hidden
100\%!% this is hidden, too""",
MultiDict([("mn", "100"), ("mi", "%"), ("mo", "!")]),
id="comments",
),
pytest.param(
r"\#\$\%\&\_\{\}",
MultiDict(
[
("mi", "#"),
("mi", "$"),
("mi", "%"),
("mi", "&"),
("mi", "_"),
("mo", {"@stretchy": "false", "$": "{"}),
("mo", {"@stretchy": "false", "$": "}"}),
]
),
id="escaped-characters",
),
pytest.param(
r"{a \above 1pt b} + {c \above {1.5pt} d}",
MultiDict(
[
("mrow", {"mfrac": MultiDict([("@linethickness", "1pt"), ("mi", "a"), ("mi", "b")])}),
("mo", "+"),
("mrow", {"mfrac": MultiDict([("@linethickness", "1.5pt"), ("mi", "c"), ("mi", "d")])}),
]
),
id="above",
),
pytest.param(
r"\acute a \acute{bc}",
MultiDict(
[
(
"mover",
MultiDict([("mi", "a"), ("mo", "´")]),
),
(
"mover",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mi", "b"),
("mi", "c"),
]
),
),
("mo", "´"),
]
),
),
]
),
id="acute",
),
pytest.param(
r"a \atop {b \atopwithdelims | \} c}",
{
"mfrac": MultiDict(
[
("@linethickness", "0"),
("mi", "a"),
(
"mrow",
MultiDict(
[
("mo", {"@minsize": "2.047em", "@maxsize": "2.047em", "$": "|"}),
(
"mfrac",
MultiDict(
[
("@linethickness", "0"),
("mi", "b"),
("mi", "c"),
]
),
),
("mo", {"@minsize": "2.047em", "@maxsize": "2.047em", "$": "}"}),
]
),
),
]
)
},
id="atop-and-atopwithdelims",
),
pytest.param(
r"{a \abovewithdelims [ ] 1pt b} + {c \abovewithdelims . . {1.5pt} d}",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mo", {"@minsize": "2.047em", "@maxsize": "2.047em", "$": "["}),
(
"mfrac",
MultiDict(
[
("@linethickness", "1pt"),
("mi", "a"),
("mi", "b"),
]
),
),
("mo", {"@minsize": "2.047em", "@maxsize": "2.047em", "$": "]"}),
]
),
),
("mo", "+"),
(
"mrow",
{
"mfrac": MultiDict(
[
("@linethickness", "1.5pt"),
("mi", "c"),
("mi", "d"),
]
),
},
),
]
),
id="abovewithdelims",
),
pytest.param(
r"\Bbb {\text{var} = 1+\{b\}}",
{
"mrow": MultiDict(
[
("mtext", {"@mathvariant": "double-struck", "$": "var"}),
("mo", {"@mathvariant": "double-struck", "$": "="}),
("mn", {"@mathvariant": "double-struck", "$": "1"}),
("mo", {"@mathvariant": "double-struck", "$": "+"}),
("mo", {"@stretchy": "false", "$": "{"}),
("mi", {"@mathvariant": "double-struck", "$": "b"}),
("mo", {"@stretchy": "false", "$": "}"}),
]
)
},
id="blackboard-bold",
),
pytest.param(
r"\Bbb{AB}C",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mi", {"@mathvariant": "double-struck", "$": "A"}),
("mi", {"@mathvariant": "double-struck", "$": "B"}),
]
),
),
("mi", "C"),
]
),
id="Bbb-group",
),
pytest.param(r"\bigcirc", {"mi": "◯"}, id="bigcirc"),
pytest.param(
r"\boldsymbol {\text{var} = 1+\{b\}}",
{
"mrow": MultiDict(
[
("mtext", "var"),
("mo", {"@mathvariant": "bold", "$": "="}),
("mn", {"@mathvariant": "bold", "$": "1"}),
("mo", {"@mathvariant": "bold", "$": "+"}),
("mo", {"@stretchy": "false", "@mathvariant": "bold", "$": "{"}),
("mi", {"@mathvariant": "bold-italic", "$": "b"}),
("mo", {"@stretchy": "false", "@mathvariant": "bold", "$": "}"}),
]
)
},
id="boldsymbol",
),
pytest.param(r"\boxed \Box", {"menclose": {"@notation": "box", "mi": "◻"}}, id="boxed-box"),
pytest.param(
r"\breve a \breve{bc}",
MultiDict(
[
(
"mover",
MultiDict([("mi", "a"), ("mo", "˘")]),
),
(
"mover",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mi", "b"),
("mi", "c"),
]
),
),
("mo", "˘"),
]
),
),
]
),
id="breve",
),
pytest.param(
r"{\brace} + {a \brace b}",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mo", {"@minsize": "2.047em", "@maxsize": "2.047em", "$": "{"}),
("mfrac", MultiDict([("@linethickness", "0"), ("mrow", ""), ("mrow", "")])),
("mo", {"@minsize": "2.047em", "@maxsize": "2.047em", "$": "}"}),
]
),
),
("mo", "+"),
(
"mrow",
MultiDict(
[
("mo", {"@minsize": "2.047em", "@maxsize": "2.047em", "$": "{"}),
("mfrac", MultiDict([("@linethickness", "0"), ("mi", "a"), ("mi", "b")])),
("mo", {"@minsize": "2.047em", "@maxsize": "2.047em", "$": "}"}),
]
),
),
]
),
id="brace",
),
pytest.param(
r"{\brack} + {a \brack b}",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mo", {"@minsize": "2.047em", "@maxsize": "2.047em", "$": "["}),
("mfrac", MultiDict([("@linethickness", "0"), ("mrow", ""), ("mrow", "")])),
("mo", {"@minsize": "2.047em", "@maxsize": "2.047em", "$": "]"}),
]
),
),
("mo", "+"),
(
"mrow",
MultiDict(
[
("mo", {"@minsize": "2.047em", "@maxsize": "2.047em", "$": "["}),
("mfrac", MultiDict([("@linethickness", "0"), ("mi", "a"), ("mi", "b")])),
("mo", {"@minsize": "2.047em", "@maxsize": "2.047em", "$": "]"}),
]
),
),
]
),
id="brace",
),
pytest.param(
r"{\cal {\text{var} = 1+\{b\}}} + B",
MultiDict(
[
(
"mrow",
{
"mrow": MultiDict(
[
("mtext", {"@mathvariant": "script", "$": "var"}),
("mo", {"@mathvariant": "script", "$": "="}),
("mn", {"@mathvariant": "script", "$": "1"}),
("mo", {"@mathvariant": "script", "$": "+"}),
("mo", {"@stretchy": "false", "$": "{"}),
("mi", {"@mathvariant": "script", "$": "b"}),
("mo", {"@stretchy": "false", "$": "}"}),
]
),
},
),
("mo", "+"),
("mi", "B"),
]
),
id="calligraphic-mode",
),
pytest.param(r"a\centerdot b", MultiDict([("mi", "a"), ("mo", "⬝"), ("mi", "b")]), id="centerdot"),
pytest.param(
r"\cfrac{2}{1+ \cfrac{2}{1}}",
{
"mfrac": MultiDict(
[
("mstyle", {"@displaystyle": "false", "@scriptlevel": "0", "mrow": {"mn": "2"}}),
(
"mstyle",
{
"@displaystyle": "false",
"@scriptlevel": "0",
"mrow": MultiDict(
[
("mn", "1"),
("mo", "+"),
(
"mfrac",
MultiDict(
[
(
"mstyle",
{
"@displaystyle": "false",
"@scriptlevel": "0",
"mrow": {"mn": "2"},
},
),
(
"mstyle",
{
"@displaystyle": "false",
"@scriptlevel": "0",
"mrow": {"mn": "1"},
},
),
]
),
),
]
),
},
),
]
)
},
id="cfrac",
),
pytest.param(
r"\check a \check{bc}",
MultiDict(
[
(
"mover",
MultiDict([("mi", "a"), ("mo", "ˇ")]),
),
(
"mover",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mi", "b"),
("mi", "c"),
]
),
),
("mo", "ˇ"),
]
),
),
]
),
id="check",
),
pytest.param(
r"a \choose b",
MultiDict(
[
("mo", {"@minsize": "2.047em", "@maxsize": "2.047em", "$": "("}),
("mfrac", MultiDict([("@linethickness", "0"), ("mi", "a"), ("mi", "b")])),
("mo", {"@minsize": "2.047em", "@maxsize": "2.047em", "$": ")"}),
]
),
id="choose",
),
pytest.param(r"\circledS", {"mi": "Ⓢ"}, id="circledS"),
pytest.param(
r"{a\color{red}bc}d",
MultiDict(
[
(
"mrow",
MultiDict(
[("mi", "a"), ("mstyle", MultiDict([("@mathcolor", "red"), ("mi", "b"), ("mi", "c")]))]
),
),
("mi", "d"),
]
),
id="color",
),
pytest.param(
r"\color{}ab",
MultiDict([("mstyle", MultiDict([("@mathcolor", ""), ("mi", "a"), ("mi", "b")]))]),
id="empty-color-works",
),
pytest.param(
r"\dbinom a b",
{
"mstyle": MultiDict(
[
("@displaystyle", "true"),
("@scriptlevel", "0"),
("mo", {"@minsize": "2.047em", "@maxsize": "2.047em", "$": "("}),
("mfrac", MultiDict([("@linethickness", "0"), ("mi", "a"), ("mi", "b")])),
("mo", {"@minsize": "2.047em", "@maxsize": "2.047em", "$": ")"}),
]
)
},
id="binomial-coefficients",
),
pytest.param(
r"\ddot a \dddot b \ddddot c",
MultiDict(
[
("mover", MultiDict([("mi", "a"), ("mo", "¨")])),
("mover", MultiDict([("mi", "b"), ("mo", "⃛")])),
("mover", MultiDict([("mi", "c"), ("mo", "⃜")])),
]
),
id="ddot-dddot-ddddot",
),
pytest.param(
r"\deg(f(x))",
MultiDict(
[
("mi", "deg"),
("mo", {"@stretchy": "false", "$": "("}),
("mi", "f"),
("mo", {"@stretchy": "false", "$": "("}),
("mi", "x"),
("mo", {"@stretchy": "false", "$": ")"}),
("mo", {"@stretchy": "false", "$": ")"}),
]
),
id="degree-polynomial",
),
pytest.param(
r"\det(A)",
MultiDict(
[
("mo", {"@movablelimits": "true", "$": "det"}),
("mo", {"@stretchy": "false", "$": "("}),
("mi", "A"),
("mo", {"@stretchy": "false", "$": ")"}),
]
),
id="determinant",
),
pytest.param(
r"\dim(A)",
MultiDict(
[
("mi", "dim"),
("mo", {"@stretchy": "false", "$": "("}),
("mi", "A"),
("mo", {"@stretchy": "false", "$": ")"}),
]
),
id="dimension-vector-space",
),
pytest.param(
r"\dfrac a b",
{"mstyle": {"@displaystyle": "true", "@scriptlevel": "0", "mfrac": MultiDict([("mi", "a"), ("mi", "b")])}},
id="dfrac",
),
pytest.param(r"\diagdown \diagup", MultiDict([("mi", "╲"), ("mi", "╱")]), id="diagdown-diagup"),
pytest.param(
r"x_1, \dots, x_n",
MultiDict(
[
("msub", MultiDict([("mi", "x"), ("mn", "1")])),
("mo", ","),
("mo", "…"),
("mo", ","),
("msub", MultiDict([("mi", "x"), ("mi", "n")])),
]
),
id="dots",
),
pytest.param(
r"x_1 + \dotsb + x_n",
MultiDict(
[
("msub", MultiDict([("mi", "x"), ("mn", "1")])),
("mo", "+"),
("mo", "⋯"),
("mo", "+"),
("msub", MultiDict([("mi", "x"), ("mi", "n")])),
]
),
id="dotsb",
),
pytest.param(
r"x_1, \dotsc, x_n",
MultiDict(
[
("msub", MultiDict([("mi", "x"), ("mn", "1")])),
("mo", ","),
("mo", "…"),
("mo", ","),
("msub", MultiDict([("mi", "x"), ("mi", "n")])),
]
),
id="dotsc",
),
pytest.param(
r"A_1 \dotsi A_n",
MultiDict(
[
("msub", MultiDict([("mi", "A"), ("mn", "1")])),
("mo", "⋯"),
("msub", MultiDict([("mi", "A"), ("mi", "n")])),
]
),
id="dotsi",
),
pytest.param(
r"x_1 \dotsm x_n",
MultiDict(
[
("msub", MultiDict([("mi", "x"), ("mn", "1")])),
("mo", "⋯"),
("msub", MultiDict([("mi", "x"), ("mi", "n")])),
]
),
id="dotsm",
),
pytest.param(
r"A_1 \dotso A_n",
MultiDict(
[
("msub", MultiDict([("mi", "A"), ("mn", "1")])),
("mo", "…"),
("msub", MultiDict([("mi", "A"), ("mi", "n")])),
]
),
id="dotso",
),
pytest.param(
r"\frac ab + {\displaystyle \frac cd + \frac ef} + \frac gh",
MultiDict(
[
("mfrac", MultiDict([("mi", "a"), ("mi", "b")])),
("mo", "+"),
(
"mrow",
{
"mstyle": MultiDict(
[
("@displaystyle", "true"),
("@scriptlevel", "0"),
("mfrac", MultiDict([("mi", "c"), ("mi", "d")])),
("mo", "+"),
("mfrac", MultiDict([("mi", "e"), ("mi", "f")])),
]
)
},
),
("mo", "+"),
("mfrac", MultiDict([("mi", "g"), ("mi", "h")])),
]
),
id="displaystyle",
),
pytest.param(
r"\frac ab+\displaystyle\frac cd+\textstyle\frac ef+\scriptstyle\frac gh+\scriptscriptstyle\frac ij",
MultiDict(
[
("mfrac", MultiDict([("mi", "a"), ("mi", "b")])),
("mo", "+"),
(
"mstyle",
MultiDict(
[
("@displaystyle", "true"),
("@scriptlevel", "0"),
("mfrac", MultiDict([("mi", "c"), ("mi", "d")])),
("mo", "+"),
(
"mstyle",
MultiDict(
[
("@displaystyle", "false"),
("@scriptlevel", "0"),
("mfrac", MultiDict([("mi", "e"), ("mi", "f")])),
("mo", "+"),
(
"mstyle",
MultiDict(
[
("@displaystyle", "false"),
("@scriptlevel", "1"),
("mfrac", MultiDict([("mi", "g"), ("mi", "h")])),
("mo", "+"),
(
"mstyle",
MultiDict(
[
("@displaystyle", "false"),
("@scriptlevel", "2"),
("mfrac", MultiDict([("mi", "i"), ("mi", "j")])),
]
),
),
]
),
),
]
),
),
]
),
),
]
),
id="styles",
),
pytest.param(
r"""
\displaylines{
a = a\cr
\text{if } a=b \text{ then } b=a\\
\text{if } a=b \text{ and } b=c \text{ then } a=c
}
""",
{
"mtable": MultiDict(
[
("@rowspacing", "0.5em"),
("@columnspacing", "1em"),
("@displaystyle", "true"),
("mtr", {"mtd": MultiDict([("mi", "a"), ("mo", "="), ("mi", "a")])}),
(
"mtr",
{
"mtd": MultiDict(
[
("mtext", "if "),
("mi", "a"),
("mo", "="),
("mi", "b"),
("mtext", " then "),
("mi", "b"),
("mo", "="),
("mi", "a"),
]
)
},
),
(
"mtr",
{
"mtd": MultiDict(
[
("mtext", "if "),
("mi", "a"),
("mo", "="),
("mi", "b"),
("mtext", " and "),
("mi", "b"),
("mo", "="),
("mi", "c"),
("mtext", " then "),
("mi", "a"),
("mo", "="),
("mi", "c"),
]
)
},
),
]
)
},
id="displaylines",
),
pytest.param(r"\emptyset", {"mo": "∅"}, id="emptyset"),
pytest.param(r"\exp x", MultiDict([("mi", "exp"), ("mi", "x")]), id="exponential-function"),
pytest.param(
r"\fbox{ Hello! }", {"menclose": {"@notation": "box", "mtext": " Hello! "}}, id="fbox"
),
pytest.param(
r"{\frak {\text{var} = 1+\{b\}}} + B",
MultiDict(
[
(
"mrow",
{
"mrow": MultiDict(
[
("mtext", {"@mathvariant": "fraktur", "$": "var"}),
("mo", {"@mathvariant": "fraktur", "$": "="}),
("mn", {"@mathvariant": "fraktur", "$": "1"}),
("mo", {"@mathvariant": "fraktur", "$": "+"}),
("mo", {"@stretchy": "false", "$": "{"}),
("mi", {"@mathvariant": "fraktur", "$": "b"}),
("mo", {"@stretchy": "false", "$": "}"}),
]
),
},
),
("mo", "+"),
("mi", "B"),
]
),
id="fraktur",
),
pytest.param(
r"\gcd_{\rm sub}^{\rm sup}",
{
"munderover": MultiDict(
[
("mo", {"@movablelimits": "true", "$": "gcd"}),
(
"mrow",
MultiDict(
[
("mi", {"@mathvariant": "normal", "$": "s"}),
("mi", {"@mathvariant": "normal", "$": "u"}),
("mi", {"@mathvariant": "normal", "$": "b"}),
]
),
),
(
"mrow",
MultiDict(
[
("mi", {"@mathvariant": "normal", "$": "s"}),
("mi", {"@mathvariant": "normal", "$": "u"}),
("mi", {"@mathvariant": "normal", "$": "p"}),
]
),
),
]
),
},
id="greatest-common-divisor",
),
pytest.param(
r"\genfrac\{]{1pt}{0}{a+b}{c+d}",
{
"mstyle": MultiDict(
[
("@displaystyle", "true"),
("@scriptlevel", "0"),
("mo", {"@minsize": "2.047em", "@maxsize": "2.047em", "$": "{"}),
(
"mfrac",
MultiDict(
[
("@linethickness", "1pt"),
("mrow", MultiDict([("mi", "a"), ("mo", "+"), ("mi", "b")])),
("mrow", MultiDict([("mi", "c"), ("mo", "+"), ("mi", "d")])),
]
),
),
("mo", {"@minsize": "2.047em", "@maxsize": "2.047em", "$": "]"}),
]
),
},
id="genfrac-displaystyle",
),
pytest.param(
r"\genfrac(|{1pt}{1}{a+b}{c+d}",
{
"mstyle": MultiDict(
[
("@displaystyle", "false"),
("@scriptlevel", "0"),
("mo", {"@minsize": "1.2em", "@maxsize": "1.2em", "$": "("}),
(
"mfrac",
MultiDict(
[
("@linethickness", "1pt"),
("mrow", MultiDict([("mi", "a"), ("mo", "+"), ("mi", "b")])),
("mrow", MultiDict([("mi", "c"), ("mo", "+"), ("mi", "d")])),
]
),
),
("mo", {"@minsize": "1.2em", "@maxsize": "1.2em", "$": "|"}),
]
),
},
id="genfrac-textstyle",
),
pytest.param(
r"\genfrac(.{1pt}{2}{a+b}{c+d}",
{
"mstyle": MultiDict(
[
("@displaystyle", "false"),
("@scriptlevel", "1"),
("mo", {"@minsize": "1.2em", "@maxsize": "1.2em", "$": "("}),
(
"mfrac",
MultiDict(
[
("@linethickness", "1pt"),
("mrow", MultiDict([("mi", "a"), ("mo", "+"), ("mi", "b")])),
("mrow", MultiDict([("mi", "c"), ("mo", "+"), ("mi", "d")])),
]
),
),
]
),
},
id="genfrac-scriptstyle",
),
pytest.param(
r"\genfrac\{\}{1pt}{3}{a+b}{c+d}",
{
"mstyle": MultiDict(
[
("@displaystyle", "false"),
("@scriptlevel", "2"),
("mo", {"@minsize": "1.2em", "@maxsize": "1.2em", "$": "{"}),
(
"mfrac",
MultiDict(
[
("@linethickness", "1pt"),
("mrow", MultiDict([("mi", "a"), ("mo", "+"), ("mi", "b")])),
("mrow", MultiDict([("mi", "c"), ("mo", "+"), ("mi", "d")])),
]
),
),
("mo", {"@minsize": "1.2em", "@maxsize": "1.2em", "$": "}"}),
]
),
},
id="genfrac-scriptscriptstyle",
),
pytest.param(r"\gggtr", {"mo": "⋙"}, id="gggtr"),
pytest.param(r"\gvertneqq", {"mo": "≩"}, id="gvertneqq"),
pytest.param(r"\gt", {"mo": ">"}, id="gt"),
pytest.param(
r"\grave a \grave{bc}",
MultiDict(
[
(
"mover",
MultiDict([("mi", "a"), ("mo", "`")]),
),
(
"mover",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mi", "b"),
("mi", "c"),
]
),
),
("mo", "`"),
]
),
),
]
),
id="grave",
),
pytest.param(
r"\hat a \hat{bc}",
MultiDict(
[
(
"mover",
MultiDict([("mi", "a"), ("mo", {"@stretchy": "false", "$": "^"})]),
),
(
"mover",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mi", "b"),
("mi", "c"),
]
),
),
("mo", {"@stretchy": "false", "$": "^"}),
]
),
),
]
),
id="hat",
),
pytest.param(r"\hom", {"mi": "hom"}, id="hom"),
pytest.param(
r"\href{https://github.com/roniemartinez/latex2mathml}{\text{latex2mathml}}",
{"mtext": {"@href": "https://github.com/roniemartinez/latex2mathml", "mrow": {"mtext": "latex2mathml"}}},
id="href",
),
pytest.param(
r"[{[\Huge[\huge[[}[",
MultiDict(
[
("mo", {"@stretchy": "false", "$": "["}),
(
"mrow",
MultiDict(
[
("mo", {"@stretchy": "false", "$": "["}),
(
"mstyle",
MultiDict(
[
("@mathsize", "2.49em"),
("mo", {"@stretchy": "false", "$": "["}),
(
"mstyle",
MultiDict(
[
("@mathsize", "2.07em"),
("mo", {"@stretchy": "false", "$": "["}),
("mo", {"@stretchy": "false", "$": "["}),
]
),
),
]
),
),
]
),
),
("mo", {"@stretchy": "false", "$": "["}),
]
),
id="huge",
),
pytest.param(
r"\sqrt[abc]{123}",
{
"mroot": MultiDict(
[("mrow", {"mn": "123"}), ("mrow", MultiDict([("mi", "a"), ("mi", "b"), ("mi", "c")]))]
)
},
id="sqrt-with-multiple-root-nodes",
),
pytest.param(
r"\begin{array}{l} \text{Side Angle Side}\\ \text{S}\hphantom{\text{ide }}\text{A}\hphantom{\text{ngle }}"
r"\text{S} \end{array}",
{
"mtable": MultiDict(
[
(
"mtr",
{
"mtd": {"@columnalign": "left", "mtext": "Side Angle Side"},
},
),
(
"mtr",
{
"mtd": MultiDict(
[
("@columnalign", "left"),
("mtext", "S"),
(
"mpadded",
{
"@height": "0",
"@depth": "0",
"mphantom": {"mrow": {"mtext": "ide "}},
},
),
("mtext", "A"),
(
"mpadded",
{
"@height": "0",
"@depth": "0",
"mphantom": {"mrow": {"mtext": "ngle "}},
},
),
("mtext", "S"),
]
)
},
),
]
)
},
id="hphantom",
),
pytest.param(
r"\idotsint",
{"mrow": MultiDict([("mo", "∫"), ("mo", "⋯"), ("mo", "∫")])},
id="idotsint",
),
pytest.param(r"\intop", {"mo": {"@movablelimits": "true", "$": "∫"}}, id="intop"),
pytest.param(r"\injlim", {"mo": {"@movablelimits": "true", "$": "inj lim"}}, id="injlim"),
pytest.param(r"\ker", {"mi": "ker"}, id="ker"),
pytest.param(
r"[{[\LARGE[\Large[\large[[}[",
MultiDict(
[
("mo", {"@stretchy": "false", "$": "["}),
(
"mrow",
MultiDict(
[
("mo", {"@stretchy": "false", "$": "["}),
(
"mstyle",
MultiDict(
[
("@mathsize", "1.73em"),
("mo", {"@stretchy": "false", "$": "["}),
(
"mstyle",
MultiDict(
[
("@mathsize", "1.44em"),
("mo", {"@stretchy": "false", "$": "["}),
(
"mstyle",
MultiDict(
[
("@mathsize", "1.2em"),
("mo", {"@stretchy": "false", "$": "["}),
("mo", {"@stretchy": "false", "$": "["}),
]
),
),
]
),
),
]
),
),
]
),
),
("mo", {"@stretchy": "false", "$": "["}),
]
),
id="large",
),
pytest.param(
r"[{[\normalsize[\scriptsize[[}[",
MultiDict(
[
("mo", {"@stretchy": "false", "$": "["}),
(
"mrow",
MultiDict(
[
("mo", {"@stretchy": "false", "$": "["}),
(
"mstyle",
MultiDict(
[
("@mathsize", "1em"),
("mo", {"@stretchy": "false", "$": "["}),
(
"mstyle",
MultiDict(
[
("@mathsize", "0.7em"),
("mo", {"@stretchy": "false", "$": "["}),
("mo", {"@stretchy": "false", "$": "["}),
]
),
),
]
),
),
]
),
),
("mo", {"@stretchy": "false", "$": "["}),
]
),
id="normalsize-scriptsize",
),
pytest.param(
r"\mathbb{AB}C",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mi", {"@mathvariant": "double-struck", "$": "A"}),
("mi", {"@mathvariant": "double-struck", "$": "B"}),
]
),
),
("mi", "C"),
]
),
id="mathbb",
),
pytest.param(
r"\mathbf {\text{var} = 1+\{b\}}",
{
"mrow": MultiDict(
[
("mtext", {"@mathvariant": "bold", "$": "var"}),
("mo", {"@mathvariant": "bold", "$": "="}),
("mn", {"@mathvariant": "bold", "$": "1"}),
("mo", {"@mathvariant": "bold", "$": "+"}),
("mo", {"@stretchy": "false", "$": "{"}),
("mi", {"@mathvariant": "bold", "$": "b"}),
("mo", {"@stretchy": "false", "$": "}"}),
]
)
},
id="mathbf",
),
pytest.param(
r"{\mathcal {\text{var} = 1+\{b\}}} + B",
MultiDict(
[
(
"mrow",
{
"mrow": MultiDict(
[
("mtext", {"@mathvariant": "script", "$": "var"}),
("mo", {"@mathvariant": "script", "$": "="}),
("mn", {"@mathvariant": "script", "$": "1"}),
("mo", {"@mathvariant": "script", "$": "+"}),
("mo", {"@stretchy": "false", "$": "{"}),
("mi", {"@mathvariant": "script", "$": "b"}),
("mo", {"@stretchy": "false", "$": "}"}),
]
),
},
),
("mo", "+"),
("mi", "B"),
]
),
id="mathcal",
),
pytest.param(
r"{\mathfrak {\text{var} = 1+\{b\}}} + B",
MultiDict(
[
(
"mrow",
{
"mrow": MultiDict(
[
("mtext", {"@mathvariant": "fraktur", "$": "var"}),
("mo", {"@mathvariant": "fraktur", "$": "="}),
("mn", {"@mathvariant": "fraktur", "$": "1"}),
("mo", {"@mathvariant": "fraktur", "$": "+"}),
("mo", {"@stretchy": "false", "$": "{"}),
("mi", {"@mathvariant": "fraktur", "$": "b"}),
("mo", {"@stretchy": "false", "$": "}"}),
]
),
},
),
("mo", "+"),
("mi", "B"),
]
),
id="mathfrak",
),
pytest.param(
r"{\mathit {\text{var} = 1+\{b\}}} + B",
MultiDict(
[
(
"mrow",
{
"mrow": MultiDict(
[
("mtext", {"@mathvariant": "italic", "$": "var"}),
("mo", {"@mathvariant": "italic", "$": "="}),
("mn", {"@mathvariant": "italic", "$": "1"}),
("mo", {"@mathvariant": "italic", "$": "+"}),
("mo", {"@stretchy": "false", "$": "{"}),
("mi", {"@mathvariant": "italic", "$": "b"}),
("mo", {"@stretchy": "false", "$": "}"}),
]
),
},
),
("mo", "+"),
("mi", "B"),
]
),
id="mathit",
),
pytest.param(
r"{\mathrm {\text{var} = 1+\{b\}}} + B",
MultiDict(
[
(
"mrow",
{
"mrow": MultiDict(
[
("mtext", "var"),
("mo", "="),
("mn", "1"),
("mo", "+"),
("mo", {"@stretchy": "false", "$": "{"}),
("mi", {"@mathvariant": "normal", "$": "b"}),
("mo", {"@stretchy": "false", "$": "}"}),
]
),
},
),
("mo", "+"),
("mi", "B"),
]
),
id="mathrm",
),
# FIXME: no way to distinguish \mathcal and \mathscr for now
pytest.param(
r"{\mathscr {\text{var} = 1+\{b\}}} + B",
MultiDict(
[
(
"mrow",
{
"mrow": MultiDict(
[
("mtext", {"@mathvariant": "script", "$": "var"}),
("mo", {"@mathvariant": "script", "$": "="}),
("mn", {"@mathvariant": "script", "$": "1"}),
("mo", {"@mathvariant": "script", "$": "+"}),
("mo", {"@stretchy": "false", "$": "{"}),
("mi", {"@mathvariant": "script", "$": "b"}),
("mo", {"@stretchy": "false", "$": "}"}),
]
),
},
),
("mo", "+"),
("mi", "B"),
]
),
id="mathscr",
),
pytest.param(
r"{\mathsf {\text{var} = 1+\{b\}}} + B",
MultiDict(
[
(
"mrow",
{
"mrow": MultiDict(
[
("mtext", "var"),
("mo", "="),
("mn", "1"),
("mo", "+"),
("mo", {"@stretchy": "false", "$": "{"}),
("mi", {"@mathvariant": "sans-serif", "$": "b"}),
("mo", {"@stretchy": "false", "$": "}"}),
]
),
},
),
("mo", "+"),
("mi", "B"),
]
),
id="mathsf",
),
pytest.param(
r"{\mathtt {\text{var} = 1+\{b\}}} + B",
MultiDict(
[
(
"mrow",
{
"mrow": MultiDict(
[
("mtext", {"@mathvariant": "monospace", "$": "var"}),
("mo", {"@mathvariant": "monospace", "$": "="}),
("mn", {"@mathvariant": "monospace", "$": "1"}),
("mo", {"@mathvariant": "monospace", "$": "+"}),
("mo", {"@stretchy": "false", "$": "{"}),
("mi", {"@mathvariant": "monospace", "$": "b"}),
("mo", {"@stretchy": "false", "$": "}"}),
]
),
},
),
("mo", "+"),
("mi", "B"),
]
),
id="mathtt",
),
# FIXME: convert with correct spacing
pytest.param(
r"\mathop{a}\mathord{b}\mathpunct{c}\mathbin{d}\mathrel{e}",
MultiDict(
[
("mrow", {"mi": "a"}),
("mrow", {"mi": "b"}),
("mrow", {"mi": "c"}),
("mrow", {"mi": "d"}),
("mrow", {"mi": "e"}),
]
),
id="math-commands-that-currently-does-nothing",
),
pytest.param(
r"\hbox{This is a sentence.}",
{
"mstyle": {
"@displaystyle": "false",
"@scriptlevel": "0",
"mtext": "This is a sentence.",
}
},
id="hbox",
),
pytest.param(
r"\hbox{left $x > 0$ center \$x > 0\$ right}",
{
"mstyle": MultiDict(
[
("@displaystyle", "false"),
("@scriptlevel", "0"),
("mtext", "left "),
("mrow", MultiDict([("mi", "x"), ("mo", ">"), ("mn", "0")])),
("mtext", r" center \$x > 0\$ right"),
]
)
},
id="hbox-with-math-mode",
),
pytest.param(
r"\hbox{\alpha $\alpha$}",
{
"mstyle": MultiDict(
[
("@displaystyle", "false"),
("@scriptlevel", "0"),
("mtext", r"\alpha "),
("mrow", {"mi": "α"}),
]
)
},
id="hbox-with-backslash-in-text",
),
pytest.param(
r"\begin{matrix} xxxxxx & xxxxxx & xxxxxx \cr ab & \hfil ab & ab\hfil\cr \end{matrix}",
{
"mtable": MultiDict(
[
(
"mtr",
MultiDict(
[
(
"mtd",
MultiDict(
[
("mi", "x"),
("mi", "x"),
("mi", "x"),
("mi", "x"),
("mi", "x"),
("mi", "x"),
]
),
),
(
"mtd",
MultiDict(
[
("mi", "x"),
("mi", "x"),
("mi", "x"),
("mi", "x"),
("mi", "x"),
("mi", "x"),
]
),
),
(
"mtd",
MultiDict(
[
("mi", "x"),
("mi", "x"),
("mi", "x"),
("mi", "x"),
("mi", "x"),
("mi", "x"),
]
),
),
]
),
),
(
"mtr",
MultiDict(
[
("mtd", MultiDict([("mi", "a"), ("mi", "b")])),
("mtd", MultiDict([("@columnalign", "right"), ("mi", "a"), ("mi", "b")])),
("mtd", MultiDict([("@columnalign", "left"), ("mi", "a"), ("mi", "b")])),
]
),
),
]
)
},
id="hfil",
),
pytest.param(r"\ldotp", {"mo": "."}, id="ldotp"),
pytest.param(r"\lg", {"mi": "lg"}, id="lg"),
pytest.param(r"\liminf", {"mo": {"@movablelimits": "true", "$": "lim inf"}}, id="liminf"),
pytest.param(r"\limsup", {"mo": {"@movablelimits": "true", "$": "lim sup"}}, id="limsup"),
pytest.param(r"\llless", {"mo": "⋘"}, id="llless"),
pytest.param(r"\lt", {"mo": "<"}, id="lt"),
pytest.param(r"\lvert", {"mo": "|"}, id="lvert"),
pytest.param(r"\lVert", {"mo": "‖"}, id="lVert"),
pytest.param(r"\lvertneqq", {"mo": "≨"}, id="lvertneqq"),
pytest.param(r"\ngeqq", {"mo": "≱"}, id="ngeqq"),
pytest.param(r"\nshortmid", {"mo": "∤"}, id="nshortmid"),
pytest.param(r"\nshortparallel", {"mo": "∦"}, id="nshortparallel"),
pytest.param(r"\nsubseteqq", {"mo": "⊈"}, id="nsubseteqq"),
pytest.param(r"\omicron", {"mo": "ο"}, id="omicron"),
pytest.param(r"\Pr", {"mo": {"@movablelimits": "true", "$": "Pr"}}, id="Pr"),
pytest.param(r"\projlim", {"mo": {"@movablelimits": "true", "$": "proj lim"}}, id="projlim"),
pytest.param(r"\rvert", {"mo": "|"}, id="rvert"),
pytest.param(r"\rVert", {"mo": "‖"}, id="rVert"),
pytest.param(r"\S", {"mo": "§"}, id="S"),
pytest.param(r"\shortmid", {"mo": "∣"}, id="shortmid"),
pytest.param(r"\smallfrown", {"mo": "⌢"}, id="smallfrown"),
pytest.param(r"\smallint", {"mo": {"@largeop": "false", "$": "∫"}}, id="smallint"),
pytest.param(r"\smallsmile", {"mo": "⌣"}, id="smallsmile"),
pytest.param(r"\surd", {"mo": {"@stretchy": "false", "$": "√"}}, id="surd"),
pytest.param(r"\thicksim", {"mo": "∼"}, id="thicksim"),
pytest.param(r"\thickapprox", {"mo": "≈"}, id="thickapprox"),
pytest.param(r"\varsubsetneqq", {"mo": "⫋"}, id="varsubsetneqq"),
pytest.param(r"\varsupsetneq", {"mo": "⊋"}, id="varsupsetneq"),
pytest.param(r"\varsupsetneqq", {"mo": "⫌"}, id="varsupsetneqq"),
pytest.param(
r"[{[\small[\tiny[\Tiny[[}[",
MultiDict(
[
("mo", {"@stretchy": "false", "$": "["}),
(
"mrow",
MultiDict(
[
("mo", {"@stretchy": "false", "$": "["}),
(
"mstyle",
MultiDict(
[
("@mathsize", "0.85em"),
("mo", {"@stretchy": "false", "$": "["}),
(
"mstyle",
MultiDict(
[
("@mathsize", "0.5em"),
("mo", {"@stretchy": "false", "$": "["}),
(
"mstyle",
MultiDict(
[
("@mathsize", "0.6em"),
("mo", {"@stretchy": "false", "$": "["}),
("mo", {"@stretchy": "false", "$": "["}),
]
),
),
]
),
),
]
),
),
]
),
),
("mo", {"@stretchy": "false", "$": "["}),
]
),
id="small-tiny",
),
pytest.param(
r"\mbox{This is a sentence.}",
{
"mstyle": {
"@displaystyle": "false",
"@scriptlevel": "0",
"mtext": "This is a sentence.",
}
},
id="mbox",
),
pytest.param(
r"\frac{\style{color:red}{x+1}}{\style{color:green}y+2}",
{
"mfrac": MultiDict(
[
(
"mrow",
{
"mrow": MultiDict(
[("@style", "color:red"), ("mi", "x"), ("mo", "+"), ("mn", "1")]
)
},
),
(
"mrow",
MultiDict([("mi", {"@style": "color:green", "$": "y"}), ("mo", "+"), ("mn", "2")]),
),
]
)
},
id="style",
),
pytest.param(
r"\mathring a \mathring{bc}",
MultiDict(
[
(
"mover",
MultiDict([("mi", "a"), ("mo", "˚")]),
),
(
"mover",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mi", "b"),
("mi", "c"),
]
),
),
("mo", "˚"),
]
),
),
]
),
id="mathring",
),
pytest.param(
r"\overleftarrow a \overleftarrow{bc}",
MultiDict(
[
(
"mover",
MultiDict([("mi", "a"), ("mo", "←")]),
),
(
"mover",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mi", "b"),
("mi", "c"),
]
),
),
("mo", "←"),
]
),
),
]
),
id="overleftarrow",
),
pytest.param(
r"\overleftrightarrow a \overleftrightarrow{bc}",
MultiDict(
[
(
"mover",
MultiDict([("mi", "a"), ("mo", "↔")]),
),
(
"mover",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mi", "b"),
("mi", "c"),
]
),
),
("mo", "↔"),
]
),
),
]
),
id="overleftrightarrow",
),
pytest.param(
r"\overline a \overline{bc}",
MultiDict(
[
(
"mover",
MultiDict([("mi", "a"), ("mo", {"@accent": "true", "$": "―"})]),
),
(
"mover",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mi", "b"),
("mi", "c"),
]
),
),
("mo", {"@accent": "true", "$": "―"}),
]
),
),
]
),
id="overline",
),
pytest.param(
r"\overparen a \overparen{bc}",
MultiDict(
[
(
"mover",
MultiDict([("mi", "a"), ("mo", "⏜")]),
),
(
"mover",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mi", "b"),
("mi", "c"),
]
),
),
("mo", "⏜"),
]
),
),
]
),
id="overparen",
),
pytest.param(
r"\overrightarrow a \overrightarrow{bc}",
MultiDict(
[
(
"mover",
MultiDict([("mi", "a"), ("mo", "→")]),
),
(
"mover",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mi", "b"),
("mi", "c"),
]
),
),
("mo", "→"),
]
),
),
]
),
id="overrightarrow",
),
pytest.param(
r"\tilde a \tilde{bc}",
MultiDict(
[
(
"mover",
MultiDict([("mi", "a"), ("mo", {"@stretchy": "false", "$": "~"})]),
),
(
"mover",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mi", "b"),
("mi", "c"),
]
),
),
("mo", {"@stretchy": "false", "$": "~"}),
]
),
),
]
),
id="tilde",
),
pytest.param(
r"\underleftarrow a \underleftarrow{bc}",
MultiDict(
[
(
"munder",
MultiDict([("mi", "a"), ("mo", "←")]),
),
(
"munder",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mi", "b"),
("mi", "c"),
]
),
),
("mo", "←"),
]
),
),
]
),
id="underleftarrow",
),
pytest.param(
r"\underrightarrow a \underrightarrow{bc}",
MultiDict(
[
(
"munder",
MultiDict([("mi", "a"), ("mo", "→")]),
),
(
"munder",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mi", "b"),
("mi", "c"),
]
),
),
("mo", "→"),
]
),
),
]
),
id="underrightarrow",
),
pytest.param(
r"\underleftrightarrow a \underleftrightarrow{bc}",
MultiDict(
[
(
"munder",
MultiDict([("mi", "a"), ("mo", "↔")]),
),
(
"munder",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mi", "b"),
("mi", "c"),
]
),
),
("mo", "↔"),
]
),
),
]
),
id="underleftrightarrow",
),
pytest.param(
r"\underline a \underline{bc}",
MultiDict(
[
(
"munder",
MultiDict([("mi", "a"), ("mo", {"@accent": "true", "$": "―"})]),
),
(
"munder",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mi", "b"),
("mi", "c"),
]
),
),
("mo", {"@accent": "true", "$": "―"}),
]
),
),
]
),
id="underline",
),
pytest.param(
r"\underparen a \underparen{bc}",
MultiDict(
[
(
"munder",
MultiDict([("mi", "a"), ("mo", "⏝")]),
),
(
"munder",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mi", "b"),
("mi", "c"),
]
),
),
("mo", "⏝"),
]
),
),
]
),
id="underparen",
),
pytest.param(
r"\widehat a \widehat{bc}",
MultiDict(
[
(
"mover",
MultiDict([("mi", "a"), ("mo", "^")]),
),
(
"mover",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mi", "b"),
("mi", "c"),
]
),
),
("mo", "^"),
]
),
),
]
),
id="widehat",
),
pytest.param(
r"\widetilde a \widetilde{bc}",
MultiDict(
[
(
"mover",
MultiDict([("mi", "a"), ("mo", "~")]),
),
(
"mover",
MultiDict(
[
(
"mrow",
MultiDict(
[
("mi", "b"),
("mi", "c"),
]
),
),
("mo", "~"),
]
),
),
]
),
id="widetilde",
),
pytest.param(r"\phantom a", {"mphantom": {"mi": "a"}}, id="phantom"),
pytest.param(r"\vphantom a", {"mpadded": {"@width": "0", "mphantom": {"mi": "a"}}}, id="vphantom"),
pytest.param(
r"\sideset{_1^2}{_3^4}\sum",
{
"mrow": MultiDict(
[
(
"msubsup",
MultiDict(
[
(
"mpadded",
{
"@width": "0",
"mphantom": {"mo": {"@movablelimits": "false", "$": "∑"}},
},
),
("mn", "1"),
("mn", "2"),
]
),
),
("mstyle", {"@scriptlevel": "0", "mspace": {"@width": "-0.167em"}}),
(
"msubsup",
MultiDict(
[("mo", {"@movablelimits": "false", "$": "∑"}), ("mn", "3"), ("mn", "4")]
),
),
]
)
},
id="sideset",
),
pytest.param(
r"\sideset{_1^2}{_3^4}{\sum}",
{
"mrow": MultiDict(
[
(
"msubsup",
MultiDict(
[
(
"mpadded",
{
"@width": "0",
"mphantom": {"mrow": {"@movablelimits": "false", "mo": "∑"}},
},
),
("mn", "1"),
("mn", "2"),
]
),
),
("mstyle", {"@scriptlevel": "0", "mspace": {"@width": "-0.167em"}}),
(
"msubsup",
MultiDict(
[("mrow", {"@movablelimits": "false", "mo": "∑"}), ("mn", "3"), ("mn", "4")]
),
),
]
)
},
id="307-a",
),
pytest.param(
r"\sideset{^{°}}{}{C}",
{
"mrow": MultiDict(
[
(
"msup",
MultiDict(
[
(
"mpadded",
{
"@width": "0",
"mphantom": {"mrow": {"@movablelimits": "false", "mi": "C"}},
},
),
("mrow", {"mi": "°"}),
]
),
),
("mstyle", {"@scriptlevel": "0", "mspace": {"@width": "-0.167em"}}),
("mrow", {"mrow": {"@movablelimits": "false", "mi": "C"}}),
]
)
},
id="307-b",
),
pytest.param(
r"\tbinom{2}{3}",
{
"mstyle": MultiDict(
[
("@displaystyle", "false"),
("@scriptlevel", "0"),
("mo", {"@minsize": "1.2em", "@maxsize": "1.2em", "$": "("}),
("mfrac", MultiDict([("@linethickness", "0"), ("mrow", {"mn": "2"}), ("mrow", {"mn": "3"})])),
("mo", {"@minsize": "1.2em", "@maxsize": "1.2em", "$": ")"}),
]
)
},
id="tbinom",
),
pytest.param(
r"\tfrac{1}{2}",
{
"mstyle": {
"@displaystyle": "false",
"@scriptlevel": "0",
"mfrac": MultiDict([("mrow", {"mn": "1"}), ("mrow", {"mn": "2"})]),
}
},
id="tfrac",
),
pytest.param(
r"{\mit {\text{var} = 1+\{b\}}} + B",
MultiDict(
[
(
"mrow",
{
"mrow": MultiDict(
[
("mtext", {"@mathvariant": "italic", "$": "var"}),
("mo", {"@mathvariant": "italic", "$": "="}),
("mn", {"@mathvariant": "italic", "$": "1"}),
("mo", {"@mathvariant": "italic", "$": "+"}),
("mo", {"@stretchy": "false", "$": "{"}),
("mi", "b"),
("mo", {"@stretchy": "false", "$": "}"}),
]
),
},
),
("mo", "+"),
("mi", "B"),
]
),
id="mit",
),
pytest.param(
r"{\oldstyle {\text{var} = 1+\{b\}}} + B",
MultiDict(
[
(
"mrow",
{
"mrow": MultiDict(
[
("mtext", {"@mathvariant": "normal", "$": "var"}),
("mo", {"@mathvariant": "normal", "$": "="}),
("mn", {"@mathvariant": "normal", "$": "1"}),
("mo", {"@mathvariant": "normal", "$": "+"}),
("mo", {"@stretchy": "false", "$": "{"}),
("mi", {"@mathvariant": "normal", "$": "b"}),
("mo", {"@stretchy": "false", "$": "}"}),
]
),
},
),
("mo", "+"),
("mi", "B"),
]
),
id="oldstyle",
),
pytest.param(
r"{\scr {\text{var} = 1+\{b\}}} + B",
MultiDict(
[
(
"mrow",
{
"mrow": MultiDict(
[
("mtext", {"@mathvariant": "script", "$": "var"}),
("mo", {"@mathvariant": "script", "$": "="}),
("mn", {"@mathvariant": "script", "$": "1"}),
("mo", {"@mathvariant": "script", "$": "+"}),
("mo", {"@stretchy": "false", "$": "{"}),
("mi", {"@mathvariant": "script", "$": "b"}),
("mo", {"@stretchy": "false", "$": "}"}),
]
),
},
),
("mo", "+"),
("mi", "B"),
]
),
id="scr",
),
pytest.param(
r"{\tt {\text{var} = 1+\{b\}}} + B",
MultiDict(
[
(
"mrow",
{
"mrow": MultiDict(
[
("mtext", {"@mathvariant": "monospace", "$": "var"}),
("mo", {"@mathvariant": "monospace", "$": "="}),
("mn", {"@mathvariant": "monospace", "$": "1"}),
("mo", {"@mathvariant": "monospace", "$": "+"}),
("mo", {"@stretchy": "false", "$": "{"}),
("mi", {"@mathvariant": "monospace", "$": "b"}),
("mo", {"@stretchy": "false", "$": "}"}),
]
),
},
),
("mo", "+"),
("mi", "B"),
]
),
id="tt",
),
pytest.param(
r"\textbf{ Hello~World }",
{"mtext": {"@mathvariant": "bold", "$": " Hello~World "}},
id="textbf",
),
pytest.param(
r"\textit{ Hello~World }",
{"mtext": {"@mathvariant": "italic", "$": " Hello~World "}},
id="textit",
),
pytest.param(r"\textrm{ Hello~World }", {"mtext": " Hello~World "}, id="textrm"),
pytest.param(
r"\textsf{ Hello~World }",
{"mtext": {"@mathvariant": "sans-serif", "$": " Hello~World "}},
id="textsf",
),
pytest.param(
r"\texttt{ Hello~World }",
{"mtext": {"@mathvariant": "monospace", "$": " Hello~World "}},
id="texttt",
),
pytest.param(
r"\LaTeX",
{
"mrow": MultiDict(
[
("mi", "L"),
("mspace", {"@width": "-.325em"}),
(
"mpadded",
MultiDict(
[
("@height", "+.21ex"),
("@depth", "-.21ex"),
("@voffset", "+.21ex"),
("mstyle", {"@displaystyle": "false", "@scriptlevel": "1", "mrow": {"mi": "A"}}),
]
),
),
("mspace", {"@width": "-.17em"}),
("mi", "T"),
("mspace", {"@width": "-.14em"}),
(
"mpadded",
MultiDict(
[
("@height", "-.5ex"),
("@depth", "+.5ex"),
("@voffset", "-.5ex"),
("mrow", {"mi": "E"}),
]
),
),
("mspace", {"@width": "-.115em"}),
("mi", "X"),
]
)
},
id="LaTeX",
),
pytest.param(
r"\Bbb \LaTeX",
{
"mrow": MultiDict(
[
("mi", {"@mathvariant": "double-struck", "$": "L"}),
("mspace", {"@width": "-.325em"}),
(
"mpadded",
MultiDict(
[
("@height", "+.21ex"),
("@depth", "-.21ex"),
("@voffset", "+.21ex"),
(
"mstyle",
{
"@displaystyle": "false",
"@scriptlevel": "1",
"mrow": {"mi": {"@mathvariant": "double-struck", "$": "A"}},
},
),
]
),
),
("mspace", {"@width": "-.17em"}),
("mi", {"@mathvariant": "double-struck", "$": "T"}),
("mspace", {"@width": "-.14em"}),
(
"mpadded",
MultiDict(
[
("@height", "-.5ex"),
("@depth", "+.5ex"),
("@voffset", "-.5ex"),
("mrow", {"mi": {"@mathvariant": "double-struck", "$": "E"}}),
]
),
),
("mspace", {"@width": "-.115em"}),
("mi", {"@mathvariant": "double-struck", "$": "X"}),
]
)
},
id="LaTeX-with-style",
),
pytest.param(
r"\TeX",
{
"mrow": MultiDict(
[
("mi", "T"),
("mspace", {"@width": "-.14em"}),
(
"mpadded",
MultiDict(
[
("@height", "-.5ex"),
("@depth", "+.5ex"),
("@voffset", "-.5ex"),
("mrow", {"mi": "E"}),
]
),
),
("mspace", {"@width": "-.115em"}),
("mi", "X"),
]
)
},
id="TeX",
),
pytest.param(
r"\rm \TeX",
{
"mrow": MultiDict(
[
("mi", {"@mathvariant": "normal", "$": "T"}),
("mspace", {"@width": "-.14em"}),
(
"mpadded",
MultiDict(
[
("@height", "-.5ex"),
("@depth", "+.5ex"),
("@voffset", "-.5ex"),
("mrow", {"mi": {"@mathvariant": "normal", "$": "E"}}),
]
),
),
("mspace", {"@width": "-.115em"}),
("mi", {"@mathvariant": "normal", "$": "X"}),
]
)
},
id="TeX-with-style",
),
pytest.param(
r"\skew7\hat A",
MultiDict(
[
(
"mrow",
{
"mover": MultiDict(
[
("mrow", MultiDict([("mi", "A"), ("mspace", {"@width": "0.389em"})])),
("mo", {"@stretchy": "false", "$": "^"}),
]
),
},
),
("mspace", {"@width": "-0.389em"}),
]
),
id="skew",
),
pytest.param(
r"\skew{8}\tilde M",
MultiDict(
[
(
"mrow",
{
"mover": MultiDict(
[
("mrow", MultiDict([("mi", "M"), ("mspace", {"@width": "0.444em"})])),
("mo", {"@stretchy": "false", "$": "~"}),
]
),
},
),
("mspace", {"@width": "-0.444em"}),
]
),
id="skew-with-braces",
),
pytest.param(
r"\mod 5",
MultiDict(
[
("mspace", {"@width": "1em"}),
("mi", "mod"),
("mspace", {"@width": "0.333em"}),
("mn", "5"),
]
),
id="mod",
),
pytest.param(
r"\pmod 5",
MultiDict(
[
("mspace", {"@width": "1em"}),
("mo", "("),
("mi", "mod"),
("mspace", {"@width": "0.333em"}),
("mn", "5"),
("mo", ")"),
]
),
id="pmod",
),
pytest.param(
r"\left\{\middle|\right\}",
{
"mrow": MultiDict(
[
("mo", {"@stretchy": "true", "@fence": "true", "@form": "prefix", "$": "{"}),
(
"mo",
{
"@stretchy": "true",
"@fence": "true",
"@lspace": "0.05em",
"@rspace": "0.05em",
"$": "|",
},
),
("mo", {"@stretchy": "true", "@fence": "true", "@form": "postfix", "$": "}"}),
]
)
},
id="middle",
),
pytest.param(r"9 \bmod2", MultiDict([("mn", "9"), ("mo", "mod"), ("mn", "2")]), id="bmod"),
pytest.param(r"\overbrace3", {"mover": MultiDict([("mn", "3"), ("mo", "⏞")])}, id="overbrace-a"),
pytest.param(
r"\overbrace3^a",
{"mover": MultiDict([("mover", MultiDict([("mn", "3"), ("mo", "⏞")])), ("mi", "a")])},
id="overbrace-b",
),
pytest.param(
r"\overbrace3^a_x",
{
"munderover": MultiDict(
[("mover", MultiDict([("mn", "3"), ("mo", "⏞")])), ("mi", "x"), ("mi", "a")]
)
},
id="overbrace-c",
),
pytest.param(r"\underbrace3", {"munder": MultiDict([("mn", "3"), ("mo", "⏟")])}, id="underbrace-a"),
pytest.param(
r"\underbrace3_a",
{"munder": MultiDict([("munder", MultiDict([("mn", "3"), ("mo", "⏟")])), ("mi", "a")])},
id="underbrace-b",
),
pytest.param(
r"\underbrace3_a^x",
{
"munderover": MultiDict(
[("munder", MultiDict([("mn", "3"), ("mo", "⏟")])), ("mi", "a"), ("mi", "x")]
)
},
id="underbrace-c",
),
pytest.param(
r"\xleftarrow x",
{
"mover": MultiDict(
[
("mstyle", {"@scriptlevel": "0", "mo": "←"}),
(
"mpadded",
MultiDict(
[
("@width", "+0.833em"),
("@lspace", "0.556em"),
("@voffset", "-.2em"),
("@height", "-.2em"),
("mi", "x"),
("mspace", {"@depth": ".25em"}),
]
),
),
]
)
},
id="xleftarrow",
),
pytest.param(
r"\xleftarrow[y] x",
{
"munderover": MultiDict(
[
("mstyle", {"@scriptlevel": "0", "mo": "←"}),
(
"mpadded",
MultiDict(
[
("@width", "+0.833em"),
("@lspace", "0.556em"),
("@voffset", "-.2em"),
("@height", "-.2em"),
("mrow", {"mi": "y"}),
("mspace", {"@depth": ".25em"}),
]
),
),
(
"mpadded",
MultiDict(
[
("@width", "+0.833em"),
("@lspace", "0.556em"),
("@voffset", "-.2em"),
("@height", "-.2em"),
("mi", "x"),
("mspace", {"@depth": ".25em"}),
]
),
),
]
)
},
id="xleftarrow-with-argument",
),
pytest.param(
r"\xrightarrow x",
{
"mover": MultiDict(
[
("mstyle", {"@scriptlevel": "0", "mo": "→"}),
(
"mpadded",
MultiDict(
[
("@width", "+0.833em"),
("@lspace", "0.556em"),
("@voffset", "-.2em"),
("@height", "-.2em"),
("mi", "x"),
("mspace", {"@depth": ".25em"}),
]
),
),
]
)
},
id="xrightarrow",
),
pytest.param(
r"\xrightarrow[y] x",
{
"munderover": MultiDict(
[
("mstyle", {"@scriptlevel": "0", "mo": "→"}),
(
"mpadded",
MultiDict(
[
("@width", "+0.833em"),
("@lspace", "0.556em"),
("@voffset", "-.2em"),
("@height", "-.2em"),
("mrow", {"mi": "y"}),
("mspace", {"@depth": ".25em"}),
]
),
),
(
"mpadded",
MultiDict(
[
("@width", "+0.833em"),
("@lspace", "0.556em"),
("@voffset", "-.2em"),
("@height", "-.2em"),
("mi", "x"),
("mspace", {"@depth": ".25em"}),
]
),
),
]
)
},
id="xrightarrow-with-argument",
),
pytest.param(
r"\bigl(\begin{smallmatrix} 1 & 2 & 3 \\ 4 & 5 & 6 \\ \end{smallmatrix}\bigr)",
MultiDict(
[
(
"mo",
{
"@stretchy": "true",
"@fence": "true",
"@minsize": "1.2em",
"@maxsize": "1.2em",
"$": "(",
},
),
(
"mstyle",
{
"@scriptlevel": "1",
"mtable": MultiDict(
[
("@rowspacing", "0.1em"),
("@columnspacing", "0.2778em"),
(
"mtr",
MultiDict([("mtd", {"mn": "1"}), ("mtd", {"mn": "2"}), ("mtd", {"mn": "3"})]),
),
(
"mtr",
MultiDict([("mtd", {"mn": "4"}), ("mtd", {"mn": "5"}), ("mtd", {"mn": "6"})]),
),
]
),
},
),
(
"mo",
{
"@stretchy": "true",
"@fence": "true",
"@minsize": "1.2em",
"@maxsize": "1.2em",
"$": ")",
},
),
]
),
id="bigl-smallmatrix-bigr",
),
pytest.param(
r"\not\in\not a\not\operatorname{R}\not",
MultiDict(
[
("mo", "∉"),
("mpadded", MultiDict([("@width", "0"), ("mtext", "⧸")])),
("mi", "a"),
("mpadded", MultiDict([("@width", "0"), ("mtext", "⧸")])),
("mo", "R"),
("mpadded", MultiDict([("@width", "0"), ("mtext", "⧸")])),
]
),
id="not",
),
pytest.param(
r"\begin{split} x &= y \\ &=z \end{split}",
{
"mtable": MultiDict(
[
("@displaystyle", "true"),
("@columnspacing", "0em"),
("@rowspacing", "3pt"),
(
"mtr",
MultiDict(
[
("mtd", {"@columnalign": "right", "mi": "x"}),
(
"mtd",
MultiDict(
[("@columnalign", "left"), ("mi", ""), ("mo", "="), ("mi", "y")]
),
),
]
),
),
(
"mtr",
MultiDict(
[
("mtd", {"@columnalign": "right"}),
(
"mtd",
MultiDict(
[("@columnalign", "left"), ("mi", ""), ("mo", "="), ("mi", "z")]
),
),
]
),
),
]
)
},
id="split",
),
pytest.param(
r"\begin{align*}x &=y & w &=z & a&=b \end{align*}",
{
"mtable": MultiDict(
[
("@displaystyle", "true"),
("@rowspacing", "3pt"),
("@columnspacing", "0em 2em 0em 2em 0em 2em"),
(
"mtr",
MultiDict(
[
("mtd", {"@columnalign": "right", "mi": "x"}),
(
"mtd",
MultiDict(
[("@columnalign", "left"), ("mi", ""), ("mo", "="), ("mi", "y")]
),
),
("mtd", {"@columnalign": "right", "mi": "w"}),
(
"mtd",
MultiDict(
[("@columnalign", "left"), ("mi", ""), ("mo", "="), ("mi", "z")]
),
),
("mtd", {"@columnalign": "right", "mi": "a"}),
(
"mtd",
MultiDict(
[("@columnalign", "left"), ("mi", ""), ("mo", "="), ("mi", "b")]
),
),
]
),
),
]
)
},
id="align",
),
],
)
def test_converter(latex: str, json: MultiDict) -> None:
parent = {
"math": {
"@xmlns": "http://www.w3.org/1998/Math/MathML",
"@display": "block",
"mrow": json,
}
}
bf = BadgerFish(dict_type=MultiDict)
math = bf.etree(parent)
assert convert(latex, display="block") == _convert(math[0])
def test_attributes() -> None:
assert (
convert("1")
== '<math xmlns="http://www.w3.org/1998/Math/MathML" display="inline"><mrow><mn>1</mn></mrow></math>'
)
assert (
convert("1", display="block")
== '<math xmlns="http://www.w3.org/1998/Math/MathML" display="block"><mrow><mn>1</mn></mrow></math>'
)
| 178,944 | 41.44426 | 120 | py |
latex2mathml | latex2mathml-master/tests/test_walker.py | import string
from typing import Any, Tuple, Union
import pytest
from latex2mathml.exceptions import (
DenominatorNotFoundError,
DoubleSubscriptsError,
DoubleSuperscriptsError,
ExtraLeftOrMissingRightError,
InvalidAlignmentError,
InvalidStyleForGenfracError,
InvalidWidthError,
LimitsMustFollowMathOperatorError,
MissingEndError,
MissingSuperScriptOrSubscriptError,
NumeratorNotFoundError,
)
from latex2mathml.walker import Node, walk
@pytest.mark.parametrize(
"latex, expected",
[
pytest.param(string.ascii_letters, [Node(token=c) for c in string.ascii_letters], id="alphabets"),
pytest.param("{{}}", [Node(token="{}", children=(Node(token="{}", children=()),))], id="empty-group"),
pytest.param(string.digits, [Node(token=string.digits)], id="numbers"),
pytest.param("12.56", [Node(token="12.56")], id="decimals"),
pytest.param("5x", [Node(token="5"), Node(token="x")], id="numbers-and-alphabets"),
pytest.param("5.8x", [Node(token="5.8"), Node(token="x")], id="decimals-and-alphabets"),
pytest.param("3 x", [Node(token="3"), Node(token="x")], id="string-with-space"),
pytest.param("+-*/=()[])]([", [Node(token=c) for c in "+-*/=()[])](["], id="operators"),
pytest.param("3 + 5x - 5y = 7", [Node(token=c) for c in "3+5x-5y=7"], id="numbers-alphabets-and-operators"),
pytest.param(r"\alpha\beta", [Node(token=r"\alpha"), Node(token=r"\beta")], id="symbols"),
pytest.param(
r"\frac2x",
[Node(token=r"\frac", children=(Node(token="2"), Node(token="x")))],
id="symbols-appended-with-number",
),
pytest.param("{a}", [Node(token="{}", children=(Node(token="a"),))], id="single-group"),
pytest.param(
"{a}{b}",
[Node(token="{}", children=(Node(token="a"),)), Node(token="{}", children=(Node(token="b"),))],
id="multiple-groups",
),
pytest.param(
"{a+{b}}",
[
Node(
token="{}",
children=(Node(token="a"), Node(token="+"), Node(token="{}", children=(Node(token="b"),))),
)
],
id="inner-group",
),
pytest.param("a_b", [Node(token="_", children=(Node(token="a"), Node(token="b")))], id="subscript-1"),
pytest.param(
"{a_b}",
[Node(token="{}", children=(Node(token="_", children=(Node(token="a"), Node(token="b"))),))],
id="subscript-2",
),
pytest.param("1_2", [Node(token="_", children=(Node(token="1"), Node(token="2")))], id="subscript-3"),
pytest.param("1.2_2", [Node(token="_", children=(Node(token="1.2"), Node(token="2")))], id="subscript-4"),
pytest.param("a^b", [Node(token="^", children=(Node(token="a"), Node(token="b")))], id="superscript-1"),
pytest.param(
"{a^b}",
[Node(token="{}", children=(Node(token="^", children=(Node(token="a"), Node(token="b"))),))],
id="superscript-2",
),
pytest.param(
"a^{i+1}_3",
[
Node(
token="_^",
children=(
Node(token="a"),
Node(token="3"),
Node(token="{}", children=(Node(token="i"), Node(token="+"), Node(token="1"))),
),
)
],
id="superscript-3",
),
pytest.param(
"a_b^c",
[Node(token="_^", children=(Node(token="a"), Node(token="b"), Node(token="c")))],
id="subscript-and-superscript-1",
),
pytest.param(
"a^b_c",
[Node(token="_^", children=(Node(token="a"), Node(token="c"), Node(token="b")))],
id="subscript-and-superscript-2",
),
pytest.param(
r"\sqrt[3]{2}",
[Node(token=r"\root", children=(Node(token="{}", children=(Node(token="2"),)), Node(token="3")))],
id="root",
),
pytest.param(
r"\frac{1}{2}",
[
Node(
token=r"\frac",
children=(
Node(token="{}", children=(Node(token="1"),)),
Node(token="{}", children=(Node(token="2"),)),
),
),
],
id="fraction-1",
),
pytest.param(
r"1 \over 2",
[Node(token=r"\frac", children=(Node(token="1"), Node(token="2")))],
id="fraction-2",
),
pytest.param(
r"{1 \over 2}",
[Node(token="{}", children=(Node(token=r"\frac", children=(Node(token="1"), Node(token="2"))),))],
id="fraction-3",
),
pytest.param(
r"\left\{\right.",
[Node(token=r"\left", children=(Node(token=r"\right", delimiter="."),), delimiter=r"\{")],
id="null-delimiter-1",
),
pytest.param(
r"\matrix{a & b \\ c & d}",
[
Node(
token=r"\matrix",
children=(
Node(token="a"),
Node(token="&"),
Node(token="b"),
Node(token=r"\\"),
Node(token="c"),
Node(token="&"),
Node(token="d"),
),
alignment="",
)
],
id="matrix-1",
),
pytest.param(
r"\begin{matrix}a & b \\ c & d \end{matrix}",
[
Node(
token=r"\matrix",
children=(
Node(token="a"),
Node(token="&"),
Node(token="b"),
Node(token=r"\\"),
Node(token="c"),
Node(token="&"),
Node(token="d"),
),
alignment="",
)
],
id="matrix-2",
),
pytest.param(
r"\left\{ \begin{array} { l } { 3x - 5y + 4z = 0} \\ { x - y + 8z = 0} \\ { 2x - 6y + z = 0} "
r"\end{array}\right.",
[
Node(
token=r"\left",
children=(
Node(
token=r"\array",
alignment="l",
children=(
Node(
token="{}",
children=(
Node(token="3"),
Node(token="x"),
Node(token="-"),
Node(token="5"),
Node(token="y"),
Node(token="+"),
Node(token="4"),
Node(token="z"),
Node(token="="),
Node(token="0"),
),
),
Node(token=r"\\"),
Node(
token="{}",
children=(
Node(token="x"),
Node(token="-"),
Node(token="y"),
Node(token="+"),
Node(token="8"),
Node(token="z"),
Node(token="="),
Node(token="0"),
),
),
Node(token=r"\\"),
Node(
token="{}",
children=(
Node(token="2"),
Node(token="x"),
Node(token="-"),
Node(token="6"),
Node(token="y"),
Node(token="+"),
Node(token="z"),
Node(token="="),
Node(token="0"),
),
),
),
),
Node(token=r"\right", delimiter="."),
),
delimiter=r"\{",
)
],
id="null-delimiter-2",
),
pytest.param(
r"\begin{matrix*}[r]a & b \\ c & d \end{matrix*}",
[
Node(
token=r"\matrix*",
children=(
Node(token="a"),
Node(token="&"),
Node(token="b"),
Node(token=r"\\"),
Node(token="c"),
Node(token="&"),
Node(token="d"),
),
alignment="r",
)
],
id="matrix-with-alignment",
),
pytest.param(
r"\begin{matrix*}[]a & b \\ c & d \end{matrix*}",
[
Node(
token=r"\matrix*",
children=(
Node(token="a"),
Node(token="&"),
Node(token="b"),
Node(token=r"\\"),
Node(token="c"),
Node(token="&"),
Node(token="d"),
),
alignment="",
)
],
id="matrix-with-empty-alignment",
),
pytest.param(
r"\begin{matrix}-a & b \\ c & d \end{matrix}",
[
Node(
token=r"\matrix",
children=(
Node(token="-"),
Node(token="a"),
Node(token="&"),
Node(token="b"),
Node(token=r"\\"),
Node(token="c"),
Node(token="&"),
Node(token="d"),
),
alignment="",
)
],
id="matrix-with-negative-sign",
),
pytest.param(
r"\begin{matrix}-\end{matrix}",
[Node(token=r"\matrix", children=(Node(token="-"),), alignment="")],
id="matrix-with-just-negative-sign-1",
),
pytest.param(
r"\begin{matrix}a_{1} & b_{2} \\ c_{3} & d_{4} \end{matrix}",
[
Node(
token=r"\matrix",
children=(
Node(token="_", children=(Node(token="a"), Node(token="{}", children=(Node(token="1"),)))),
Node(token="&"),
Node(token="_", children=(Node(token="b"), Node(token="{}", children=(Node(token="2"),)))),
Node(token=r"\\"),
Node(token="_", children=(Node(token="c"), Node(token="{}", children=(Node(token="3"),)))),
Node(token="&"),
Node(token="_", children=(Node(token="d"), Node(token="{}", children=(Node(token="4"),)))),
),
alignment="",
),
],
id="complex-matrix",
),
pytest.param(
r"\begin{array}{cc} 1 & 2 \\ 3 & 4 \end{array}",
[
Node(
token=r"\array",
children=(
Node(token="1"),
Node(token="&"),
Node(token="2"),
Node(token=r"\\"),
Node(token="3"),
Node(token="&"),
Node(token="4"),
),
alignment="cc",
)
],
id="simple-array",
),
pytest.param(
r"""\begin{bmatrix}
a_{1,1} & a_{1,2} & \cdots & a_{1,n} \\
a_{2,1} & a_{2,2} & \cdots & a_{2,n} \\
\vdots & \vdots & \ddots & \vdots \\
a_{m,1} & a_{m,2} & \cdots & a_{m,n}
\end{bmatrix}""",
[
Node(
token=r"\bmatrix",
children=(
Node(
token="_",
children=(
Node(token="a"),
Node(token="{}", children=(Node(token="1"), Node(token=","), Node(token="1"))),
),
),
Node(token="&"),
Node(
token="_",
children=(
Node(token="a"),
Node(token="{}", children=(Node(token="1"), Node(token=","), Node(token="2"))),
),
),
Node(token="&"),
Node(token=r"\cdots"),
Node(token="&"),
Node(
token="_",
children=(
Node(token="a"),
Node(token="{}", children=(Node(token="1"), Node(token=","), Node(token="n"))),
),
),
Node(token=r"\\"),
Node(
token="_",
children=(
Node(token="a"),
Node(token="{}", children=(Node(token="2"), Node(token=","), Node(token="1"))),
),
),
Node(token="&"),
Node(
token="_",
children=(
Node(token="a"),
Node(token="{}", children=(Node(token="2"), Node(token=","), Node(token="2"))),
),
),
Node(token="&"),
Node(token=r"\cdots"),
Node(token="&"),
Node(
token="_",
children=(
Node(token="a"),
Node(token="{}", children=(Node(token="2"), Node(token=","), Node(token="n"))),
),
),
Node(token=r"\\"),
Node(token=r"\vdots"),
Node(token="&"),
Node(token=r"\vdots"),
Node(token="&"),
Node(token=r"\ddots"),
Node(token="&"),
Node(token=r"\vdots"),
Node(token=r"\\"),
Node(
token="_",
children=(
Node(token="a"),
Node(token="{}", children=(Node(token="m"), Node(token=","), Node(token="1"))),
),
),
Node(token="&"),
Node(
token="_",
children=(
Node(token="a"),
Node(token="{}", children=(Node(token="m"), Node(token=","), Node(token="2"))),
),
),
Node(token="&"),
Node(token=r"\cdots"),
Node(token="&"),
Node(
token="_",
children=(
Node(token="a"),
Node(token="{}", children=(Node(token="m"), Node(token=","), Node(token="n"))),
),
),
),
alignment="",
)
],
id="issue-33",
),
pytest.param(
r"\sqrt { ( - 25 ) ^ { 2 } } = \pm 25",
[
Node(
token=r"\sqrt",
children=(
Node(
token="{}",
children=(
Node(token="("),
Node(token="-"),
Node(token="25"),
Node(
token="^", children=(Node(token=")"), Node(token="{}", children=(Node(token="2"),)))
),
),
),
),
),
Node(token="="),
Node(token=r"\pm"),
Node(token="25"),
],
id="issue-42",
),
pytest.param(
r"\left(- x^{3} + 5\right)^{5}",
[
Node(
token="^",
children=(
Node(
token=r"\left",
children=(
Node(token="-"),
Node(
token="^", children=(Node(token="x"), Node(token="{}", children=(Node(token="3"),)))
),
Node(token="+"),
Node(token="5"),
Node(token=r"\right", delimiter=")"),
),
delimiter="(",
),
Node(token="{}", children=(Node(token="5"),)),
),
)
],
id="issue-44",
),
pytest.param(
r"\begin{array}{rcl}ABC&=&a\\A&=&abc\end{array}",
[
Node(
token=r"\array",
children=(
Node(token="A"),
Node(token="B"),
Node(token="C"),
Node(token="&"),
Node(token="="),
Node(token="&"),
Node(token="a"),
Node(token=r"\\"),
Node(token="A"),
Node(token="&"),
Node(token="="),
Node(token="&"),
Node(token="a"),
Node(token="b"),
Node(token="c"),
),
alignment="rcl",
)
],
id="issue-55",
),
pytest.param(
r"\begin{array}{cr} 1 & 2 \\ 3 & 4 \\ \hline 5 & 6 \end{array}",
[
Node(
token=r"\array",
children=(
Node(token="1"),
Node(token="&"),
Node(token="2"),
Node(token=r"\\"),
Node(token="3"),
Node(token="&"),
Node(token="4"),
Node(token=r"\\"),
Node(token=r"\hline"),
Node(token="5"),
Node(token="&"),
Node(token="6"),
),
alignment="cr",
)
],
id="array-with-horizontal-line",
),
pytest.param(
r"\begin{array}{cr} 1 & 2 \\ \hline 3 & 4 \\ \hline 5 & 6 \end{array}",
[
Node(
token=r"\array",
children=(
Node(token="1"),
Node(token="&"),
Node(token="2"),
Node(token=r"\\"),
Node(token=r"\hline"),
Node(token="3"),
Node(token="&"),
Node(token="4"),
Node(token=r"\\"),
Node(token=r"\hline"),
Node(token="5"),
Node(token="&"),
Node(token="6"),
),
alignment="cr",
)
],
id="array-with-horizontal-lines",
),
pytest.param(
r"\mathrm{...}",
[
Node(
token=r"\mathrm",
children=(Node(token="{}", children=(Node(token="."), Node(token="."), Node(token="."))),),
)
],
id="issue-60",
),
pytest.param(
r"\frac{x + 4}{x + \frac{123 \left(\sqrt{x} + 5\right)}{x + 4} - 8}",
[
Node(
token=r"\frac",
children=(
Node(token="{}", children=(Node(token="x"), Node(token="+"), Node(token="4"))),
Node(
token="{}",
children=(
Node(token="x"),
Node(token="+"),
Node(
token=r"\frac",
children=(
Node(
token="{}",
children=(
Node(token="123"),
Node(
token=r"\left",
children=(
Node(
token=r"\sqrt",
children=(Node(token="{}", children=(Node(token="x"),)),),
),
Node(token="+"),
Node(token="5"),
Node(token=r"\right", delimiter=")"),
),
delimiter="(",
),
),
),
Node(token="{}", children=(Node(token="x"), Node(token="+"), Node(token="4"))),
),
),
Node(token="-"),
Node(token="8"),
),
),
),
)
],
id="issue-61",
),
pytest.param(
r"\sqrt {\sqrt {\left( x^{3}\right) + v}}",
[
Node(
token=r"\sqrt",
children=(
Node(
token="{}",
children=(
Node(
token=r"\sqrt",
children=(
Node(
token="{}",
children=(
Node(
token=r"\left",
children=(
Node(
token="^",
children=(
Node(token="x"),
Node(token="{}", children=(Node(token="3"),)),
),
),
Node(token=r"\right", delimiter=")"),
),
delimiter="(",
),
Node(token="+"),
Node(token="v"),
),
),
),
),
),
),
),
)
],
id="issue-63",
),
pytest.param(
r"\left(\left(x\right)\right)",
[
Node(
token=r"\left",
children=(
Node(
token=r"\left",
children=(Node(token="x"), Node(token=r"\right", delimiter=")")),
delimiter="(",
),
Node(token=r"\right", delimiter=")"),
),
delimiter="(",
)
],
id=r"nested-left-right",
),
pytest.param(
r"\left(x\right){5}",
[
Node(
token=r"\left",
children=(Node(token="x"), Node(token=r"\right", delimiter=")")),
delimiter="(",
),
Node(token="{}", children=(Node(token="5"),)),
],
id=r"group-after-right",
),
pytest.param(
r"\sqrt[3]{}",
[Node(token=r"\root", children=(Node(token="{}", children=()), Node(token="3")))],
id="empty-nth-root",
),
pytest.param(
r"1_{}", [Node(token="_", children=(Node(token="1"), Node(token="{}", children=())))], id="empty-subscript"
),
pytest.param(
r"\array{}",
[Node(token=r"\array", children=(Node(token="{}", children=()),), alignment="")],
id="empty-array",
),
pytest.param(
r"\array{{}}",
[Node(token=r"\array", children=(Node(token="{}", children=()),), alignment="")],
id="empty-array-with-empty-group",
),
pytest.param(
r"\left[\begin{matrix}1 & 0 & 0 & 0\\0 & 1 & 0 & 0\\0 & 0 & 1 & 0\\0 & 0 & 0 & 1\end{matrix}\right]",
[
Node(
token=r"\left",
children=(
Node(
token=r"\matrix",
children=(
Node(token="1"),
Node(token="&"),
Node(token="0"),
Node(token="&"),
Node(token="0"),
Node(token="&"),
Node(token="0"),
Node(token=r"\\"),
Node(token="0"),
Node(token="&"),
Node(token="1"),
Node(token="&"),
Node(token="0"),
Node(token="&"),
Node(token="0"),
Node(token=r"\\"),
Node(token="0"),
Node(token="&"),
Node(token="0"),
Node(token="&"),
Node(token="1"),
Node(token="&"),
Node(token="0"),
Node(token=r"\\"),
Node(token="0"),
Node(token="&"),
Node(token="0"),
Node(token="&"),
Node(token="0"),
Node(token="&"),
Node(token="1"),
),
alignment="",
),
Node(token=r"\right", delimiter="]"),
),
delimiter="[",
)
],
id="issue-77",
),
pytest.param(
r"\left({x}\right)",
[
Node(
token=r"\left",
children=(
Node(token="{}", children=(Node(token="x"),)),
Node(token=r"\right", delimiter=")"),
),
delimiter="(",
)
],
id="issue-78-1",
),
pytest.param(
r"\left(\frac{x^{x^{x}}}{x}\right)",
[
Node(
token=r"\left",
children=(
Node(
token=r"\frac",
children=(
Node(
token="{}",
children=(
Node(
token="^",
children=(
Node(token="x"),
Node(
token="{}",
children=(
Node(
token="^",
children=(
Node(token="x"),
Node(token="{}", children=(Node(token="x"),)),
),
),
),
),
),
),
),
),
Node(token="{}", children=(Node(token="x"),)),
),
),
Node(token=r"\right", delimiter=")"),
),
delimiter="(",
)
],
id="issue-78-2",
),
pytest.param(
r"x^{x^{x^{x}}} \left(x^{x^{x}} \left(x^{x} \left(\log{\left(x \right)} + 1\right) \log{\left(x \right)} + "
r"\frac{x^{x}}{x}\right) \log{\left(x \right)} + \frac{x^{x^{x}}}{x}\right)",
[
Node(
token="^",
children=(
Node(token="x"),
Node(
token="{}",
children=(
Node(
token="^",
children=(
Node(token="x"),
Node(
token="{}",
children=(
Node(
token="^",
children=(
Node(token="x"),
Node(token="{}", children=(Node(token="x"),)),
),
),
),
),
),
),
),
),
),
),
Node(
token=r"\left",
children=(
Node(
token="^",
children=(
Node(token="x"),
Node(
token="{}",
children=(
Node(
token="^",
children=(Node(token="x"), Node(token="{}", children=(Node(token="x"),))),
),
),
),
),
),
Node(
token=r"\left",
children=(
Node(
token="^", children=(Node(token="x"), Node(token="{}", children=(Node(token="x"),)))
),
Node(
token=r"\left",
children=(
Node(token=r"\log"),
Node(
token="{}",
children=(
Node(
token=r"\left",
children=(Node(token="x"), Node(token=r"\right", delimiter=")")),
delimiter="(",
),
),
),
Node(token="+"),
Node(token="1"),
Node(token=r"\right", delimiter=")"),
),
delimiter="(",
),
Node(token=r"\log"),
Node(
token="{}",
children=(
Node(
token=r"\left",
children=(Node(token="x"), Node(token=r"\right", delimiter=")")),
delimiter="(",
),
),
),
Node(token="+"),
Node(
token=r"\frac",
children=(
Node(
token="{}",
children=(
Node(
token="^",
children=(
Node(token="x"),
Node(token="{}", children=(Node(token="x"),)),
),
),
),
),
Node(token="{}", children=(Node(token="x"),)),
),
),
Node(token=r"\right", delimiter=")"),
),
delimiter="(",
),
Node(token=r"\log"),
Node(
token="{}",
children=(
Node(
token=r"\left",
children=(Node(token="x"), Node(token=r"\right", delimiter=")")),
delimiter="(",
),
),
),
Node(token="+"),
Node(
token=r"\frac",
children=(
Node(
token="{}",
children=(
Node(
token="^",
children=(
Node(token="x"),
Node(
token="{}",
children=(
Node(
token="^",
children=(
Node(token="x"),
Node(token="{}", children=(Node(token="x"),)),
),
),
),
),
),
),
),
),
Node(token="{}", children=(Node(token="x"),)),
),
),
Node(token=r"\right", delimiter=")"),
),
delimiter="(",
),
],
id="issue-78-3",
),
pytest.param(
r"\log_2{x}",
[
Node(token="_", children=(Node(token=r"\log"), Node(token="2"))),
Node(token="{}", children=(Node(token="x"),)),
],
id="logarithm-with-base",
),
pytest.param(
r"\sqrt[]{3}",
[Node(token=r"\sqrt", children=(Node(token="{}", children=(Node(token="3"),)),))],
id="issue-79-empty-root",
),
pytest.param(
r"\frac{3}{\frac{1}{2}{x}^{2}}",
[
Node(
token=r"\frac",
children=(
Node(token="{}", children=(Node(token="3"),)),
Node(
token="{}",
children=(
Node(
token=r"\frac",
children=(
Node(token="{}", children=(Node(token="1"),)),
Node(token="{}", children=(Node(token="2"),)),
),
),
Node(
token="^",
children=(
Node(token="{}", children=(Node(token="x"),)),
Node(token="{}", children=(Node(token="2"),)),
),
),
),
),
),
)
],
id="issue-79-exponent-after-fraction",
),
pytest.param(
r"\frac{3}{\frac{1}{2}{x}^{2}-\frac{3\sqrt[]{3}}{2}x+3}",
[
Node(
token=r"\frac",
children=(
Node(token="{}", children=(Node(token="3"),)),
Node(
token="{}",
children=(
Node(
token=r"\frac",
children=(
Node(token="{}", children=(Node(token="1"),)),
Node(token="{}", children=(Node(token="2"),)),
),
),
Node(
token="^",
children=(
Node(token="{}", children=(Node(token="x"),)),
Node(token="{}", children=(Node(token="2"),)),
),
),
Node(token="-"),
Node(
token=r"\frac",
children=(
Node(
token="{}",
children=(
Node(token="3"),
Node(
token=r"\sqrt",
children=(Node(token="{}", children=(Node(token="3"),)),),
),
),
),
Node(token="{}", children=(Node(token="2"),)),
),
),
Node(token="x"),
Node(token="+"),
Node(token="3"),
),
),
),
)
],
id="issue-79",
),
pytest.param(
"^3", [Node(token="^", children=(Node(token=""), Node(token="3")))], id="superscript-without-base-works"
),
pytest.param(
"_3", [Node(token="_", children=(Node(token=""), Node(token="3")))], id="subscript-without-base-works"
),
pytest.param(
r"\lim_{x \to +\infty} f(x)",
[
Node(
token="_",
children=(
Node(token=r"\lim"),
Node(
token="{}",
children=(Node(token="x"), Node(token=r"\to"), Node(token="+"), Node(token=r"\infty")),
),
),
),
Node(token="f"),
Node(token="("),
Node(token="x"),
Node(token=")"),
],
id="limit-at-plus-infinity",
),
pytest.param(
r"\inf_{x > s}f(x)",
[
Node(
token="_",
children=(
Node(token=r"\inf"),
Node(token="{}", children=(Node(token="x"), Node(token=">"), Node(token="s"))),
),
),
Node(token="f"),
Node(token="("),
Node(token="x"),
Node(token=")"),
],
id="inf",
),
pytest.param(
r"\sup_{x \in \mathbb{R}}f(x)",
[
Node(
token="_",
children=(
Node(token=r"\sup"),
Node(token="{}", children=(Node(token="x"), Node(token=r"\in"), Node(token="ℝ"))),
),
),
Node(token="f"),
Node(token="("),
Node(token="x"),
Node(token=")"),
],
id="sup",
),
pytest.param(
r"\max_{x \in [a,b]}f(x)",
[
Node(
token="_",
children=(
Node(token=r"\max"),
Node(
token="{}",
children=(
Node(token="x"),
Node(token=r"\in"),
Node(token="["),
Node(token="a"),
Node(token=","),
Node(token="b"),
Node(token="]"),
),
),
),
),
Node(token="f"),
Node(token="("),
Node(token="x"),
Node(token=")"),
],
id="max",
),
pytest.param(
r"\min_{x \in [\alpha,\beta]}f(x)",
[
Node(
token="_",
children=(
Node(token=r"\min"),
Node(
token="{}",
children=(
Node(token="x"),
Node(token=r"\in"),
Node(token="["),
Node(token=r"\alpha"),
Node(token=","),
Node(token=r"\beta"),
Node(token="]"),
),
),
),
),
Node(token="f"),
Node(token="("),
Node(token="x"),
Node(token=")"),
],
id="min",
),
pytest.param(
r"\int\limits_{0}^{\pi}",
[
Node(
token="_^",
children=(
Node(token=r"\int"),
Node(token="{}", children=(Node(token="0"),)),
Node(token="{}", children=(Node(token=r"\pi"),)),
),
modifier=r"\limits",
),
],
id="issue-76",
),
pytest.param(
r"\sum_{\substack{1\le i\le n\\ i\ne j}}",
[
Node(
token="_",
children=(
Node(token=r"\sum"),
Node(
token="{}",
children=(
Node(
token=r"\substack",
children=(
Node(token="1"),
Node(token=r"\le"),
Node(token="i"),
Node(token=r"\le"),
Node(token="n"),
Node(token=r"\\"),
Node(token="i"),
Node(token=r"\ne"),
Node(token="j"),
),
alignment="",
),
),
),
),
)
],
id="issue-75",
),
pytest.param(
r"\mathrm{AA}",
[Node(token=r"\mathrm", children=(Node(token="{}", children=(Node(token="A"), Node(token="A"))),))],
id="issue-94",
),
pytest.param(
r"(1+(x-y)^{2})",
[
Node(token="("),
Node(token="1"),
Node(token="+"),
Node(token="("),
Node(token="x"),
Node(token="-"),
Node(token="y"),
Node(token="^", children=(Node(token=")"), Node(token="{}", children=(Node(token="2"),)))),
Node(token=")"),
],
id="issue-96",
),
pytest.param(
r"p_{\max}",
[Node(token="_", children=(Node(token="p"), Node(token="{}", children=(Node(token=r"\max"),))))],
id="issue-98",
),
pytest.param(
r"\vec{AB}",
[Node(token=r"\vec", children=(Node(token="{}", children=(Node(token="A"), Node(token="B"))),))],
id="issue-103",
),
pytest.param(r"\max f", [Node(token=r"\max"), Node(token="f")], id="issue-108-1"),
pytest.param(
r"\max \{a, b, c\}",
[
Node(token=r"\max"),
Node(token=r"\{"),
Node(token="a"),
Node(token=","),
Node(token="b"),
Node(token=","),
Node(token="c"),
Node(token=r"\}"),
],
id="issue-108-2",
),
pytest.param(
r"\min{(x, y)}",
[
Node(token=r"\min"),
Node(
token="{}",
children=(Node(token="("), Node(token="x"), Node(token=","), Node(token="y"), Node(token=")")),
),
],
id="issue-108-3",
),
pytest.param(
r"x = {-b \pm \sqrt{b^2-4ac} \over 2a}",
[
Node(token="x"),
Node(token="="),
Node(
token="{}",
children=(
Node(
token=r"\frac",
children=(
Node(
token="{}",
children=(
Node(token="-"),
Node(token="b"),
Node(token=r"\pm"),
Node(
token=r"\sqrt",
children=(
Node(
token="{}",
children=(
Node(token="^", children=(Node(token="b"), Node(token="2"))),
Node(token="-"),
Node(token="4"),
Node(token="a"),
Node(token="c"),
),
),
),
),
),
),
Node(token="{}", children=(Node(token="2"), Node(token="a"))),
),
),
),
),
],
id="quadratic-equation",
),
pytest.param(
r"\binom{2}{3}",
[
Node(
token=r"\binom",
children=(
Node(token="{}", children=(Node(token="2"),)),
Node(token="{}", children=(Node(token="3"),)),
),
)
],
id="binomial",
),
pytest.param(
r"\overline{a}",
[Node(token=r"\overline", children=(Node(token="{}", children=(Node(token="a"),)),))],
id="overline",
),
pytest.param(
r"\bar{a}",
[Node(token=r"\bar", children=(Node(token="{}", children=(Node(token="a"),)),))],
id="bar",
),
pytest.param(
r"\underline{a}",
[Node(token=r"\underline", children=(Node(token="{}", children=(Node(token="a"),)),))],
id="underline",
),
pytest.param(
r"\overrightarrow{a}",
[Node(token=r"\overrightarrow", children=(Node(token="{}", children=(Node(token="a"),)),))],
id="overrightarrow",
),
pytest.param(r"\text{Let}", [Node(token=r"\text", text="Let")], id="text"),
pytest.param(
r"F(a,n)=\overset{a-a-a\cdots-a}{}ntext{个}a",
[
Node(token="F"),
Node(token="("),
Node(token="a"),
Node(token=","),
Node(token="n"),
Node(token=")"),
Node(token="="),
Node(
token=r"\overset",
children=(
Node(token="{}", children=()),
Node(
token="{}",
children=(
Node(token="a"),
Node(token="-"),
Node(token="a"),
Node(token="-"),
Node(token="a"),
Node(token=r"\cdots"),
Node(token="-"),
Node(token="a"),
),
),
),
),
Node(token="n"),
Node(token="t"),
Node(token="e"),
Node(token="x"),
Node(token="t"),
Node(token="{}", children=(Node(token="个"),)),
Node(token="a"),
],
id="issue-125-overset",
),
pytest.param(
r"|\hspace1em|\hspace{10ex}|",
[
Node(token="|"),
Node(token=r"\hspace", attributes={"width": "1em"}),
Node(token="|"),
Node(token=r"\hspace", attributes={"width": "10ex"}),
Node(token="|"),
],
id="issue-129-hspace",
),
pytest.param(
"f'(x) = 2x, f''(x) = 2",
[
Node(token="^", children=(Node(token="f"), Node(token=r"\prime"))),
Node(token="("),
Node(token="x"),
Node(token=")"),
Node(token="="),
Node(token="2"),
Node(token="x"),
Node(token=","),
Node(token="^", children=(Node(token="f"), Node(token=r"\dprime"))),
Node(token="("),
Node(token="x"),
Node(token=")"),
Node(token="="),
Node(token="2"),
],
id="prime",
),
pytest.param(
r"{a \above 1pt b} + {c \above {1.5pt} d}",
[
Node(
token="{}",
children=(
Node(
token=r"\frac",
children=(Node(token="a"), Node(token="b")),
attributes={"linethickness": "1pt"},
),
),
),
Node(token="+"),
Node(
token="{}",
children=(
Node(
token=r"\frac",
children=(Node(token="c"), Node(token="d")),
attributes={"linethickness": "1.5pt"},
),
),
),
],
id="above",
),
pytest.param(
r"a \atop {b \atopwithdelims \{ \} c}",
[
Node(
token=r"\frac",
children=(
Node(token="a"),
Node(
token="{}",
children=(
Node(
token=r"\frac",
children=(
Node(token="b"),
Node(token="c"),
),
attributes={"linethickness": "0"},
delimiter="{}",
),
),
),
),
attributes={"linethickness": "0"},
),
],
id="atop-and-atopwithdelims",
),
pytest.param(
r"{a \abovewithdelims [ ] 1pt b} + {c \abovewithdelims . . {1.5pt} d}",
[
Node(
token="{}",
children=(
Node(
token=r"\frac",
children=(
Node(token="a"),
Node(token="b"),
),
attributes={"linethickness": "1pt"},
delimiter="[]",
),
),
),
Node(token="+"),
Node(
token="{}",
children=(
Node(
token=r"\frac",
children=(
Node(token="c"),
Node(token="d"),
),
attributes={"linethickness": "1.5pt"},
delimiter="..",
),
),
),
],
id="abovewithdelims",
),
# We don't want \Huge or \huge to make its siblings as children as it breaks groupings on deep-nesting
pytest.param(
r"[{[\Huge[\huge[[}[",
[
Node(token="["),
Node(
token="{}",
children=(
Node(token="["),
Node(token=r"\Huge"),
Node(token="["),
Node(token=r"\huge"),
Node(token="["),
Node(token="["),
),
),
Node(token="["),
],
id="huge",
),
pytest.param(
r"X_\mathrm{min}",
[
Node(
token="_",
children=(
Node(token="X"),
Node(
token=r"\mathrm",
children=(Node(token="{}", children=(Node(token="m"), Node(token="i"), Node(token="n"))),),
),
),
)
],
id="issue-203-1",
),
pytest.param(
r"a\mathop{t}b\mathop{t}c",
[
Node(token="a"),
Node(token=r"\mathop", children=(Node(token="{}", children=(Node(token="t"),)),)),
Node(token="b"),
Node(token=r"\mathop", children=(Node(token="{}", children=(Node(token="t"),)),)),
Node(token="c"),
],
id="issue-203-2",
),
pytest.param(r"\hbox{E=mc^2}", [Node(token=r"\hbox", text="E=mc^2")], id="hbox"),
pytest.param(
r"\style{color:red}{x+1}",
[
Node(
token="{}",
children=(Node(token="x"), Node(token="+"), Node(token="1")),
attributes={"style": "color:red"},
)
],
id="style",
),
pytest.param(
r"\sideset{_1^2}{_3^4}\sum",
[
Node(
token=r"\sideset",
children=(
Node(
token="_^",
children=(
Node(
token=r"\vphantom",
children=(Node(token=r"\sum", attributes={"movablelimits": "false"}),),
),
Node(token="1"),
Node(token="2"),
),
),
Node(
token="_^",
children=(
Node(token=r"\sum", attributes={"movablelimits": "false"}),
Node(token="3"),
Node(token="4"),
),
),
),
)
],
id="sideset",
),
pytest.param(
r"\sideset{^2}{_3}\sum",
[
Node(
token=r"\sideset",
children=(
Node(
token="^",
children=(
Node(
token=r"\vphantom",
children=(Node(token=r"\sum", attributes={"movablelimits": "false"}),),
),
Node(token="2"),
),
),
Node(
token="_",
children=(
Node(token=r"\sum", attributes={"movablelimits": "false"}),
Node(token="3"),
),
),
),
)
],
id="sideset-2",
),
pytest.param(
r"\root 3 \of x", [Node(token=r"\root", children=(Node(token="x"), Node(token="3")))], id="root-of"
),
pytest.param(
r"\root n+1 \of x + 2",
[
Node(
token=r"\root",
children=(
Node(token="x"),
Node(token="{}", children=(Node(token="n"), Node(token="+"), Node(token="1"))),
),
),
Node(token="+"),
Node(token="2"),
],
id="root-of-multiple",
),
pytest.param(
r"\root \of x",
[Node(token=r"\root", children=(Node(token="x"), Node(token="{}", children=())))],
id="root-of-without-root",
),
pytest.param(
r"\skew7\hat a\skew{8}\hat b",
[
Node(
token=r"\skew",
children=(Node(token=r"\hat", children=(Node(token="a"),)),),
attributes={"width": "0.389em"},
),
Node(
token=r"\skew",
children=(Node(token=r"\hat", children=(Node(token="b"),)),),
attributes={"width": "0.444em"},
),
],
id="skew-hat",
),
pytest.param(r"\xleftarrow x", [Node(token=r"\xleftarrow", children=(Node(token="x"),))], id="xleftarrow"),
pytest.param(
r"\xleftarrow[y] x",
[Node(token=r"\xleftarrow", children=(Node(token="{}", children=(Node(token="y"),)), Node(token="x")))],
id="xleftarrow-with-argument",
),
pytest.param(r"\xrightarrow x", [Node(token=r"\xrightarrow", children=(Node(token="x"),))], id="xrightarrow"),
pytest.param(
r"\xrightarrow[y] x",
[Node(token=r"\xrightarrow", children=(Node(token="{}", children=(Node(token="y"),)), Node(token="x")))],
id="xrightarrow-with-argument",
),
pytest.param(
r"\not \in \not\ni \not a \not\equiv \not\operatorname{R}\not",
[
Node(token=r"\nin"),
Node(token=r"\nni"),
Node(token=r"\not"),
Node(token="a"),
Node(token=r"\nequiv"),
Node(token=r"\not"),
Node(token=r"\operatorname{R}"),
Node(token=r"\not"),
],
id="not",
),
pytest.param(
"x_1'",
[
Node(
token="_^",
children=(Node(token="x"), Node(token="1"), Node(token="\\prime")),
),
],
id="issue-392-subscript",
),
pytest.param(
"x'^2",
[
Node(
token="^",
children=(
Node(token="x"),
Node(
token="{}",
children=(Node(token="\\prime"), Node(token="2")),
),
),
)
],
id="issue-392-superscript",
),
],
)
def test_walk(latex: str, expected: list) -> None:
assert walk(latex) == expected
@pytest.mark.parametrize(
"latex, exception",
[
pytest.param(r"\right)", ExtraLeftOrMissingRightError, id=r"missing-\left"),
pytest.param(r"\left(x", ExtraLeftOrMissingRightError, id=r"missing-\right"),
pytest.param(r"\middle|", ExtraLeftOrMissingRightError, id=r"missing-\left"),
pytest.param(r"{ \over 2}", NumeratorNotFoundError, id="fraction-without-numerator"),
pytest.param(r"{1 \over }", DenominatorNotFoundError, id="fraction-without-denominator"),
pytest.param(r"1_", MissingSuperScriptOrSubscriptError, id="missing-subscript"),
pytest.param(r"1^", MissingSuperScriptOrSubscriptError, id="missing-superscript"),
pytest.param(r"1_2_3", DoubleSubscriptsError, id="double-subscript"),
pytest.param(r"1^2^3", DoubleSuperscriptsError, id="double-superscript"),
pytest.param(r"\genfrac(){1pt}4ab", InvalidStyleForGenfracError, id="invalid-style-for-genfrac"),
pytest.param(r"\begin{array}\end{array1}", MissingEndError, id="missing-end"),
pytest.param(r"\begin{matrix*}[xxx]\end{matrix*}", InvalidAlignmentError, id="invalid-alignment"),
pytest.param(r"\skew{}\hat b", InvalidWidthError, id="invalid-width"),
pytest.param(r"\skew{X}\hat b", InvalidWidthError, id="invalid-width-not-number"),
pytest.param(r"\limits^{\pi}", LimitsMustFollowMathOperatorError, id="limits-must-follow-math-operator-blank"),
pytest.param(r"5\limits^{\pi}", LimitsMustFollowMathOperatorError, id="limits-must-follow-math-operator"),
pytest.param(r"x^2'", DoubleSuperscriptsError, id="issue-392"),
],
)
def test_error(latex: str, exception: Union[Tuple[Any, ...], Any]) -> None:
with pytest.raises(exception):
walk(latex)
| 70,402 | 39.070006 | 120 | py |
latex2mathml | latex2mathml-master/tests/__init__.py | 0 | 0 | 0 | py | |
latex2mathml | latex2mathml-master/tests/test_tokenizer.py | import string
import pytest
from latex2mathml.tokenizer import tokenize
@pytest.mark.parametrize(
"latex, expected",
[
pytest.param("\\", ["\\"], id="single-backslash"),
pytest.param(string.ascii_letters, list(string.ascii_letters), id="alphabets"),
pytest.param(string.digits, [string.digits], id="numbers"),
pytest.param("123\\", ["123", "\\"], id="backslash-after-number"),
pytest.param(r"123\\", ["123", r"\\"], id="double-backslash-after-number"),
pytest.param("12.56", ["12.56"], id="decimal"),
pytest.param(r"12.\\", ["12", ".", r"\\"], id="incomplete-decimal"),
pytest.param("5x", list("5x"), id="numbers-and-alphabets"),
pytest.param("5.8x", ["5.8", "x"], id="decimals-and-alphabets"),
pytest.param("3 x", ["3", "x"], id="string-with-spaces"),
pytest.param("+-*/=()[]_^{}", list("+-*/=()[]_^{}"), id="operators"),
pytest.param(
"3 + 5x - 5y = 7", ["3", "+", "5", "x", "-", "5", "y", "=", "7"], id="numbers-alphabets-and-operators"
),
pytest.param(r"\alpha\beta", [r"\alpha", r"\beta"], id="symbols"),
pytest.param(r"\frac2x", [r"\frac", "2", "x"], id="symbols-appended-with-number"),
pytest.param(
r"\begin{matrix}a & b \\ c & d \end{matrix}",
[r"\begin{matrix}", "a", "&", "b", r"\\", "c", "&", "d", r"\end{matrix}"],
id="matrix",
),
pytest.param(
r"\begin{matrix*}[r]a & b \\ c & d \end{matrix*}",
[
r"\begin{matrix*}",
"[",
"r",
"]",
"a",
"&",
"b",
r"\\",
"c",
"&",
"d",
r"\end{matrix*}",
],
id="matrix-with-alignment",
),
pytest.param(
r"\begin{matrix}-a & b \\ c & d \end{matrix}",
[r"\begin{matrix}", "-", "a", "&", "b", r"\\", "c", "&", "d", r"\end{matrix}"],
id="matrix-with-negative-sign",
),
pytest.param(
r"\begin{array}{cc} 1 & 2 \\ 3 & 4 \end{array}",
[
r"\begin{array}",
"{",
"c",
"c",
"}",
"1",
"&",
"2",
r"\\",
"3",
"&",
"4",
r"\end{array}",
],
id="simple-array",
),
pytest.param("a_{2,n}", ["a", "_", "{", "2", ",", "n", "}"], id="subscript"),
pytest.param("a^{i+1}_3", ["a", "^", "{", "i", "+", "1", "}", "_", "3"], id="superscript-with-curly-braces"),
pytest.param(
r"""\begin{bmatrix}
a_{1,1} & a_{1,2} & \cdots & a_{1,n} \\
a_{2,1} & a_{2,2} & \cdots & a_{2,n} \\
\vdots & \vdots & \ddots & \vdots \\
a_{m,1} & a_{m,2} & \cdots & a_{m,n}
\end{bmatrix}""",
[
r"\begin{bmatrix}",
"a",
"_",
"{",
"1",
",",
"1",
"}",
"&",
"a",
"_",
"{",
"1",
",",
"2",
"}",
"&",
r"\cdots",
"&",
"a",
"_",
"{",
"1",
",",
"n",
"}",
r"\\",
"a",
"_",
"{",
"2",
",",
"1",
"}",
"&",
"a",
"_",
"{",
"2",
",",
"2",
"}",
"&",
r"\cdots",
"&",
"a",
"_",
"{",
"2",
",",
"n",
"}",
r"\\",
r"\vdots",
"&",
r"\vdots",
"&",
r"\ddots",
"&",
r"\vdots",
r"\\",
"a",
"_",
"{",
"m",
",",
"1",
"}",
"&",
"a",
"_",
"{",
"m",
",",
"2",
"}",
"&",
r"\cdots",
"&",
"a",
"_",
"{",
"m",
",",
"n",
"}",
r"\end{bmatrix}",
],
id="issue-33",
),
pytest.param(r"\mathbb{R}", ["ℝ"], id="issue-51"),
pytest.param(
r"\begin{array}{rcl}ABC&=&a\\A&=&abc\end{array}",
[
r"\begin{array}",
"{",
"r",
"c",
"l",
"}",
"A",
"B",
"C",
"&",
"=",
"&",
"a",
r"\\",
"A",
"&",
"=",
"&",
"a",
"b",
"c",
r"\end{array}",
],
id="issue-55",
),
pytest.param(r"\mathrm{...}", [r"\mathrm", "{", ".", ".", ".", "}"], id="issue-60"),
pytest.param(
r"\frac{x + 4}{x + \frac{123 \left(\sqrt{x} + 5\right)}{x + 4} - 8}",
[
r"\frac",
"{",
"x",
"+",
"4",
"}",
"{",
"x",
"+",
r"\frac",
"{",
"123",
r"\left",
"(",
r"\sqrt",
"{",
"x",
"}",
"+",
"5",
r"\right",
")",
"}",
"{",
"x",
"+",
"4",
"}",
"-",
"8",
"}",
],
id="issue-61",
),
pytest.param(
r"\sqrt {\sqrt {\left( x^{3}\right) + v}}",
[
r"\sqrt",
"{",
r"\sqrt",
"{",
r"\left",
"(",
"x",
"^",
"{",
"3",
"}",
r"\right",
")",
"+",
"v",
"}",
"}",
],
id="issue-63",
),
pytest.param(
r"\left[\begin{matrix}1 & 0 & 0 & 0\\0 & 1 & 0 & 0\\0 & 0 & 1 & 0\\0 & 0 & 0 & 1\end{matrix}\right]",
[
r"\left",
"[",
r"\begin{matrix}",
"1",
"&",
"0",
"&",
"0",
"&",
"0",
r"\\",
"0",
"&",
"1",
"&",
"0",
"&",
"0",
r"\\",
"0",
"&",
"0",
"&",
"1",
"&",
"0",
r"\\",
"0",
"&",
"0",
"&",
"0",
"&",
"1",
r"\end{matrix}",
r"\right",
"]",
],
id="issue-77",
),
pytest.param(
r"x^{x^{x^{x}}} \left(x^{x^{x}} \left(x^{x} \left(\log{\left(x \right)} + 1\right) \log{\left(x \right)} + "
r"\frac{x^{x}}{x}\right) \log{\left(x \right)} + \frac{x^{x^{x}}}{x}\right)",
[
"x",
"^",
"{",
"x",
"^",
"{",
"x",
"^",
"{",
"x",
"}",
"}",
"}",
r"\left",
"(",
"x",
"^",
"{",
"x",
"^",
"{",
"x",
"}",
"}",
r"\left",
"(",
"x",
"^",
"{",
"x",
"}",
r"\left",
"(",
r"\log",
"{",
r"\left",
"(",
"x",
r"\right",
")",
"}",
"+",
"1",
r"\right",
")",
r"\log",
"{",
r"\left",
"(",
"x",
r"\right",
")",
"}",
"+",
r"\frac",
"{",
"x",
"^",
"{",
"x",
"}",
"}",
"{",
"x",
"}",
r"\right",
")",
r"\log",
"{",
r"\left",
"(",
"x",
r"\right",
")",
"}",
"+",
r"\frac",
"{",
"x",
"^",
"{",
"x",
"^",
"{",
"x",
"}",
"}",
"}",
"{",
"x",
"}",
r"\right",
")",
],
id="issue-78",
),
pytest.param(
r"\max_{x \in \[a,b\]}f(x)",
[
r"\max",
"_",
"{",
"x",
r"\in",
r"\[",
"a",
",",
"b",
r"\]",
"}",
"f",
"(",
"x",
")",
],
id="max",
),
pytest.param(r"\max \{a, b, c\}", [r"\max", r"\{", "a", ",", "b", ",", "c", r"\}"], id="issue-108-1"),
pytest.param(r"\operatorname{sn}x", [r"\operatorname{sn}", "x"], id="issue-109"),
pytest.param(
r"\text{Let}\ x=\text{number of cats}.",
[r"\text", "Let", r"\ ", "x", "=", r"\text", "number of cats", "."],
id="issue-109",
),
pytest.param(
r"x = {-b \pm \sqrt{b^2-4ac} \over 2a}",
[
"x",
"=",
"{",
"-",
"b",
r"\pm",
r"\sqrt",
"{",
"b",
"^",
"2",
"-",
"4",
"a",
"c",
"}",
r"\over",
"2",
"a",
"}",
],
id="quadratic-equation",
),
pytest.param(
r"a\,\overset{?}{=}\,b", ["a", "\\,", r"\overset", "{", "?", "}", "{", "=", "}", "\\,", "b"], id="issue-125"
),
pytest.param(
r"|\hspace1em|\hspace 1.2em|\hspace{1.5ex}|\hspace {2ex}|",
[
"|",
r"\hspace",
"1em",
"|",
r"\hspace",
"1.2em",
"|",
r"\hspace",
"{",
"1.5ex",
"}",
"|",
r"\hspace",
"{",
"2ex",
"}",
"|",
],
id="issue-129",
),
pytest.param(r"\text{Hello~World}", [r"\text", "Hello~World"], id="tilde-in-text"),
pytest.param(
r"""% this is hidden
100\%! 100% this is hidden, too
\test% this is another hidden line""",
["100", r"\%", "!", "100", r"\test"],
id="comments",
),
pytest.param(
r"{a \above 1pt b} + {c \above { 1.5 pt } d}",
["{", "a", r"\above", "1pt", "b", "}", "+", "{", "c", r"\above", "{", "1.5pt", "}", "d", "}"],
id="above",
),
pytest.param(
r"\mathop{x}\limits_0^1",
[r"\mathop", "{", "x", "}", r"\limits", "_", "0", "^", "1"],
id="issue-125",
),
pytest.param(r"\fbox{E=mc^2}", [r"\fbox", "E=mc^2"], id="fbox"),
pytest.param("X_123", ["X", "_", "1", "23"], id="issue-203-1"),
pytest.param("X_1.23", ["X", "_", "1", ".23"], id="issue-203-2"),
pytest.param("X^123", ["X", "^", "1", "23"], id="issue-203-3"),
pytest.param("X^1.23", ["X", "^", "1", ".23"], id="issue-203-4"),
pytest.param(r"X_\mathrm{min}", ["X", "_", r"\mathrm", "{", "m", "i", "n", "}"], id="issue-203-5"),
pytest.param(r"\hbox{E=mc^2}", [r"\hbox", "E=mc^2"], id="hbox"),
pytest.param(r"\style{color:red}", [r"\style", "color:red"], id="style"),
pytest.param(r"\frac12", [r"\frac", "1", "2"], id="issue-245-1"),
pytest.param(r"\frac1.", [r"\frac", "1", "."], id="issue-245-2"),
pytest.param(r"\frac.2", [r"\frac", ".", "2"], id="issue-245-3"),
pytest.param(r"\frac123", [r"\frac", "1", "2", "3"], id="issue-245-4"),
pytest.param(r"\color{}ab", [r"\color", "", "a", "b"], id="empty-color"),
pytest.param(
r"\frac 1 2 3 + \frac 123", [r"\frac", "1", "2", "3", "+", r"\frac", "1", "2", "3"], id="issue-386"
),
pytest.param(r"\begin {cases} \end {cases}", [r"\begin{cases}", r"\end{cases}"], id="issue-391"),
pytest.param(r"\operatorname { s n } x", [r"\operatorname{sn}", "x"], id="issue-391-operatorname"),
],
)
def test_tokenize(latex: str, expected: list) -> None:
assert list(tokenize(latex)) == expected
| 14,921 | 27.314991 | 120 | py |
latex2mathml | latex2mathml-master/latex2mathml/exceptions.py | class NumeratorNotFoundError(Exception):
pass
class DenominatorNotFoundError(Exception):
pass
class ExtraLeftOrMissingRightError(Exception):
pass
class MissingSuperScriptOrSubscriptError(Exception):
pass
class DoubleSubscriptsError(Exception):
pass
class DoubleSuperscriptsError(Exception):
pass
class NoAvailableTokensError(Exception):
pass
class InvalidStyleForGenfracError(Exception):
pass
class MissingEndError(Exception):
pass
class InvalidAlignmentError(Exception):
pass
class InvalidWidthError(Exception):
pass
class LimitsMustFollowMathOperatorError(Exception):
pass
| 645 | 12.744681 | 52 | py |
latex2mathml | latex2mathml-master/latex2mathml/walker.py | from typing import Any, Dict, Iterator, List, NamedTuple, Optional, Tuple
from latex2mathml import commands
from latex2mathml.exceptions import (
DenominatorNotFoundError,
DoubleSubscriptsError,
DoubleSuperscriptsError,
ExtraLeftOrMissingRightError,
InvalidAlignmentError,
InvalidStyleForGenfracError,
InvalidWidthError,
LimitsMustFollowMathOperatorError,
MissingEndError,
MissingSuperScriptOrSubscriptError,
NoAvailableTokensError,
NumeratorNotFoundError,
)
from latex2mathml.symbols_parser import convert_symbol
from latex2mathml.tokenizer import tokenize
class Node(NamedTuple):
token: str
children: Optional[Tuple[Any, ...]] = None
delimiter: Optional[str] = None
alignment: Optional[str] = None
text: Optional[str] = None
attributes: Optional[Dict[str, str]] = None
modifier: Optional[str] = None
def walk(data: str) -> List[Node]:
tokens = tokenize(data)
return _walk(tokens)
def _walk(tokens: Iterator[str], terminator: Optional[str] = None, limit: int = 0) -> List[Node]:
group: List[Node] = []
token: str
has_available_tokens = False
for token in tokens:
has_available_tokens = True
if token == terminator:
delimiter = None
if terminator == commands.RIGHT:
delimiter = next(tokens)
group.append(Node(token=token, delimiter=delimiter))
break
elif (token == commands.RIGHT != terminator) or (token == commands.MIDDLE and terminator != commands.RIGHT):
raise ExtraLeftOrMissingRightError
elif token == commands.LEFT:
delimiter = next(tokens)
children = tuple(_walk(tokens, terminator=commands.RIGHT)) # make \right as a child of \left
if len(children) == 0 or children[-1].token != commands.RIGHT:
raise ExtraLeftOrMissingRightError
node = Node(token=token, children=children if len(children) else None, delimiter=delimiter)
elif token == commands.OPENING_BRACE:
children = tuple(_walk(tokens, terminator=commands.CLOSING_BRACE))
if len(children) and children[-1].token == commands.CLOSING_BRACE:
children = children[:-1]
node = Node(token=commands.BRACES, children=children)
elif token in (commands.SUBSCRIPT, commands.SUPERSCRIPT):
try:
previous = group.pop()
except IndexError:
previous = Node(token="") # left operand can be empty if not present
if token == previous.token == commands.SUBSCRIPT:
raise DoubleSubscriptsError
if (token == previous.token == commands.SUPERSCRIPT) and (
previous.children is not None
and len(previous.children) >= 2
and previous.children[1].token != commands.PRIME
):
raise DoubleSuperscriptsError
modifier = None
if previous.token == commands.LIMITS:
modifier = commands.LIMITS
try:
previous = group.pop()
if not previous.token.startswith("\\"): # TODO: Complete list of operators
raise LimitsMustFollowMathOperatorError
except IndexError:
raise LimitsMustFollowMathOperatorError
if token == commands.SUBSCRIPT and previous.token == commands.SUPERSCRIPT and previous.children is not None:
children = tuple(_walk(tokens, terminator=terminator, limit=1))
node = Node(
token=commands.SUBSUP,
children=(previous.children[0], *children, previous.children[1]),
modifier=previous.modifier,
)
elif (
token == commands.SUPERSCRIPT and previous.token == commands.SUBSCRIPT and previous.children is not None
):
children = tuple(_walk(tokens, terminator=terminator, limit=1))
node = Node(token=commands.SUBSUP, children=(*previous.children, *children), modifier=previous.modifier)
elif (
token == commands.SUPERSCRIPT
and previous.token == commands.SUPERSCRIPT
and previous.children is not None
and previous.children[1].token == commands.PRIME
):
children = tuple(_walk(tokens, terminator=terminator, limit=1))
node = Node(
token=commands.SUPERSCRIPT,
children=(
previous.children[0],
Node(token=commands.BRACES, children=(previous.children[1], *children)),
),
modifier=previous.modifier,
)
else:
try:
children = tuple(_walk(tokens, terminator=terminator, limit=1))
except NoAvailableTokensError:
raise MissingSuperScriptOrSubscriptError
if previous.token in (commands.OVERBRACE, commands.UNDERBRACE):
modifier = previous.token
node = Node(token=token, children=(previous, *children), modifier=modifier)
elif token == commands.APOSTROPHE:
try:
previous = group.pop()
except IndexError:
previous = Node(token="") # left operand can be empty if not present
if (
previous.token == commands.SUPERSCRIPT
and previous.children is not None
and len(previous.children) >= 2
and previous.children[1].token != commands.PRIME
):
raise DoubleSuperscriptsError
if (
previous.token == commands.SUPERSCRIPT
and previous.children is not None
and len(previous.children) >= 2
and previous.children[1].token == commands.PRIME
):
node = Node(token=commands.SUPERSCRIPT, children=(previous.children[0], Node(token=commands.DPRIME)))
elif previous.token == commands.SUBSCRIPT and previous.children is not None:
node = Node(
token=commands.SUBSUP,
children=(*previous.children, Node(token=commands.PRIME)),
modifier=previous.modifier,
)
else:
node = Node(token=commands.SUPERSCRIPT, children=(previous, Node(token=commands.PRIME)))
elif token in commands.COMMANDS_WITH_TWO_PARAMETERS:
attributes = None
children = tuple(_walk(tokens, terminator=terminator, limit=2))
if token in (commands.OVERSET, commands.UNDERSET):
children = children[::-1]
node = Node(token=token, children=children, attributes=attributes)
elif token in commands.COMMANDS_WITH_ONE_PARAMETER or token.startswith(commands.MATH):
children = tuple(_walk(tokens, terminator=terminator, limit=1))
node = Node(token=token, children=children)
elif token == commands.NOT:
try:
next_node = tuple(_walk(tokens, terminator=terminator, limit=1))[0]
if next_node.token.startswith("\\"):
negated_symbol = r"\n" + next_node.token[1:]
symbol = convert_symbol(negated_symbol)
if symbol:
node = Node(token=negated_symbol)
group.append(node)
continue
node = Node(token=token)
group.extend((node, next_node))
continue
except NoAvailableTokensError:
node = Node(token=token)
elif token in (commands.XLEFTARROW, commands.XRIGHTARROW):
children = tuple(_walk(tokens, terminator=terminator, limit=1))
if children[0].token == commands.OPENING_BRACKET:
children = (
Node(
token=commands.BRACES, children=tuple(_walk(tokens, terminator=commands.CLOSING_BRACKET))[:-1]
),
*tuple(_walk(tokens, terminator=terminator, limit=1)),
)
node = Node(token=token, children=children)
elif token in (commands.HSKIP, commands.HSPACE, commands.KERN, commands.MKERN, commands.MSKIP, commands.MSPACE):
children = tuple(_walk(tokens, terminator=terminator, limit=1))
if children[0].token == commands.BRACES and children[0].children is not None:
children = children[0].children
node = Node(token=token, attributes={"width": children[0].token})
elif token == commands.COLOR:
attributes = {"mathcolor": next(tokens)}
children = tuple(_walk(tokens, terminator=terminator))
sibling = None
if len(children) and children[-1].token == terminator:
children, sibling = children[:-1], children[-1]
group.append(Node(token=token, children=children, attributes=attributes))
if sibling:
group.append(sibling)
break
elif token == commands.STYLE:
attributes = {"style": next(tokens)}
next_node = tuple(_walk(tokens, terminator=terminator, limit=1))[0]
node = next_node._replace(attributes=attributes)
elif token in (
*commands.BIG.keys(),
*commands.BIG_OPEN_CLOSE.keys(),
commands.FBOX,
commands.HBOX,
commands.MBOX,
commands.MIDDLE,
commands.TEXT,
commands.TEXTBF,
commands.TEXTIT,
commands.TEXTRM,
commands.TEXTSF,
commands.TEXTTT,
):
node = Node(token=token, text=next(tokens))
elif token == commands.HREF:
attributes = {"href": next(tokens)}
children = tuple(_walk(tokens, terminator=terminator, limit=1))
node = Node(token=token, children=children, attributes=attributes)
elif token in (
commands.ABOVE,
commands.ATOP,
commands.ABOVEWITHDELIMS,
commands.ATOPWITHDELIMS,
commands.BRACE,
commands.BRACK,
commands.CHOOSE,
commands.OVER,
):
attributes = None
delimiter = None
if token == commands.ABOVEWITHDELIMS:
delimiter = next(tokens).lstrip("\\") + next(tokens).lstrip("\\")
elif token == commands.ATOPWITHDELIMS:
attributes = {"linethickness": "0"}
delimiter = next(tokens).lstrip("\\") + next(tokens).lstrip("\\")
elif token == commands.BRACE:
delimiter = "{}"
elif token == commands.BRACK:
delimiter = "[]"
elif token == commands.CHOOSE:
delimiter = "()"
if token in (commands.ABOVE, commands.ABOVEWITHDELIMS):
dimension_node = tuple(_walk(tokens, terminator=terminator, limit=1))[0]
dimension = _get_dimension(dimension_node)
attributes = {"linethickness": dimension}
elif token in (commands.ATOP, commands.BRACE, commands.BRACK, commands.CHOOSE):
attributes = {"linethickness": "0"}
denominator = tuple(_walk(tokens, terminator=terminator))
sibling = None
if len(denominator) and denominator[-1].token == terminator:
denominator, sibling = denominator[:-1], denominator[-1]
if len(denominator) == 0:
if token in (commands.BRACE, commands.BRACK):
denominator = (Node(token=commands.BRACES, children=()),)
else:
raise DenominatorNotFoundError
if len(group) == 0:
if token in (commands.BRACE, commands.BRACK):
group = [Node(token=commands.BRACES, children=())]
else:
raise NumeratorNotFoundError
if len(denominator) > 1:
denominator = (Node(token=commands.BRACES, children=denominator),)
if len(group) == 1:
children = (*group, *denominator)
else:
children = (Node(token=commands.BRACES, children=tuple(group)), *denominator)
group = [Node(token=commands.FRAC, children=children, attributes=attributes, delimiter=delimiter)]
if sibling is not None:
group.append(sibling)
break
elif token == commands.SQRT:
root_nodes = None
next_node = tuple(_walk(tokens, limit=1))[0]
if next_node.token == commands.OPENING_BRACKET:
root_nodes = tuple(_walk(tokens, terminator=commands.CLOSING_BRACKET))[:-1]
next_node = tuple(_walk(tokens, limit=1))[0]
if len(root_nodes) > 1:
root_nodes = (Node(token=commands.BRACES, children=root_nodes),)
if root_nodes:
node = Node(token=commands.ROOT, children=(next_node, *root_nodes))
else:
node = Node(token=token, children=(next_node,))
elif token == commands.ROOT:
root_nodes = tuple(_walk(tokens, terminator=r"\of"))[:-1]
next_node = tuple(_walk(tokens, limit=1))[0]
if len(root_nodes) > 1:
root_nodes = (Node(token=commands.BRACES, children=root_nodes),)
if root_nodes:
node = Node(token=token, children=(next_node, *root_nodes))
else:
node = Node(token=token, children=(next_node, Node(token=commands.BRACES, children=())))
elif token in commands.MATRICES:
children = tuple(_walk(tokens, terminator=terminator))
sibling = None
if len(children) and children[-1].token == terminator:
children, sibling = children[:-1], children[-1]
if len(children) == 1 and children[0].token == commands.BRACES and children[0].children:
children = children[0].children
if sibling is not None:
group.extend([Node(token=token, children=children, alignment=""), sibling])
break
else:
node = Node(token=token, children=children, alignment="")
elif token == commands.GENFRAC:
delimiter = next(tokens).lstrip("\\") + next(tokens).lstrip("\\")
dimension_node, style_node = tuple(_walk(tokens, terminator=terminator, limit=2))
dimension = _get_dimension(dimension_node)
style = _get_style(style_node)
attributes = {"linethickness": dimension}
children = tuple(_walk(tokens, terminator=terminator, limit=2))
group.extend(
[Node(token=style), Node(token=token, children=children, delimiter=delimiter, attributes=attributes)]
)
break
elif token == commands.SIDESET:
left, right, operator = tuple(_walk(tokens, terminator=terminator, limit=3))
left_token, left_children = _make_subsup(left)
right_token, right_children = _make_subsup(right)
attributes = {"movablelimits": "false"}
node = Node(
token=token,
children=(
Node(
token=left_token,
children=(
Node(
token=commands.VPHANTOM,
children=(
Node(token=operator.token, children=operator.children, attributes=attributes),
),
),
*left_children,
),
),
Node(
token=right_token,
children=(
Node(token=operator.token, children=operator.children, attributes=attributes),
*right_children,
),
),
),
)
elif token == commands.SKEW:
width_node, child = tuple(_walk(tokens, terminator=terminator, limit=2))
width = width_node.token
if width == commands.BRACES:
if width_node.children is None or len(width_node.children) == 0:
raise InvalidWidthError
width = width_node.children[0].token
if not width.isdigit():
raise InvalidWidthError
node = Node(token=token, children=(child,), attributes={"width": f"{0.0555 * int(width):.3f}em"})
elif token.startswith(commands.BEGIN):
node = _get_environment_node(token, tokens)
else:
node = Node(token=token)
group.append(node)
if limit and len(group) >= limit:
break
if not has_available_tokens:
raise NoAvailableTokensError
return group
def _make_subsup(node: Node) -> Tuple[str, Tuple[Node, ...]]:
# TODO: raise error instead of assertion
assert node.token == commands.BRACES
try:
assert (
node.children is not None
and 2 <= len(node.children[0].children) <= 3
and node.children[0].token
in (
commands.SUBSUP,
commands.SUBSCRIPT,
commands.SUPERSCRIPT,
)
)
token = node.children[0].token
children = node.children[0].children[1:]
return token, children
except IndexError:
return "", ()
def _get_dimension(node: Node) -> str:
dimension = node.token
if node.token == commands.BRACES and node.children is not None:
dimension = node.children[0].token
return dimension
def _get_style(node: Node) -> str:
style = node.token
if node.token == commands.BRACES and node.children is not None:
style = node.children[0].token
if style == "0":
return commands.DISPLAYSTYLE
if style == "1":
return commands.TEXTSTYLE
if style == "2":
return commands.SCRIPTSTYLE
if style == "3":
return commands.SCRIPTSCRIPTSTYLE
raise InvalidStyleForGenfracError
def _get_environment_node(token: str, tokens: Iterator[str]) -> Node:
# TODO: support non-matrix environments
start_index = token.index("{") + 1
environment = token[start_index:-1]
terminator = rf"{commands.END}{{{environment}}}"
children = tuple(_walk(tokens, terminator=terminator))
if len(children) and children[-1].token != terminator:
raise MissingEndError
children = children[:-1]
alignment = ""
if len(children) and children[0].token == commands.OPENING_BRACKET:
children_iter = iter(children)
next(children_iter) # remove BRACKET
for c in children_iter:
if c.token == commands.CLOSING_BRACKET:
break
elif c.token not in "lcr|":
raise InvalidAlignmentError
alignment += c.token
children = tuple(children_iter)
elif (
len(children)
and children[0].children is not None
and (
children[0].token == commands.BRACES
or (environment.endswith("*") and children[0].token == commands.BRACKETS)
)
and all(c.token in "lcr|" for c in children[0].children)
):
alignment = "".join(c.token for c in children[0].children)
children = children[1:]
return Node(token=rf"\{environment}", children=children, alignment=alignment)
| 19,969 | 42.60262 | 120 | py |
latex2mathml | latex2mathml-master/latex2mathml/symbols_parser.py | import codecs
import os
import re
from typing import Dict, Optional, Union
SYMBOLS_FILE: str = os.path.join(os.path.dirname(os.path.realpath(__file__)), "unimathsymbols.txt")
SYMBOLS: Optional[Dict[str, str]] = None
def convert_symbol(symbol: str) -> Union[str, None]:
global SYMBOLS
if not SYMBOLS:
SYMBOLS = parse_symbols()
return SYMBOLS.get(symbol, None)
def parse_symbols() -> Dict[str, str]:
_symbols: Dict[str, str] = {}
with codecs.open(SYMBOLS_FILE, encoding="utf-8") as f:
for line in f:
if line.startswith("#"):
continue
columns = line.strip().split("^")
_unicode = columns[0]
latex = columns[2]
unicode_math = columns[3]
if latex and latex not in _symbols:
_symbols[latex] = _unicode
if unicode_math and unicode_math not in _symbols:
_symbols[unicode_math] = _unicode
for equivalent in re.findall(r"[=#]\s*(\\[^,^ ]+),?", columns[-1]):
if equivalent not in _symbols:
_symbols[equivalent] = _unicode
_symbols.update(
{
r"\And": _symbols[r"\ampersand"],
r"\bigcirc": _symbols[r"\lgwhtcircle"],
r"\Box": _symbols[r"\square"],
r"\circledS": "024C8",
r"\diagdown": "02572",
r"\diagup": "02571",
r"\dots": "02026",
r"\dotsb": _symbols[r"\cdots"],
r"\dotsc": "02026",
r"\dotsi": _symbols[r"\cdots"],
r"\dotsm": _symbols[r"\cdots"],
r"\dotso": "02026",
r"\emptyset": "02205",
r"\gggtr": "022D9",
r"\gvertneqq": "02269",
r"\gt": _symbols[r"\greater"],
r"\ldotp": _symbols[r"\period"],
r"\llless": _symbols[r"\lll"],
r"\lt": _symbols[r"\less"],
r"\lvert": _symbols[r"\vert"],
r"\lVert": _symbols[r"\Vert"],
r"\lvertneqq": _symbols[r"\lneqq"],
r"\ngeqq": _symbols[r"\ngeq"],
r"\nshortmid": _symbols[r"\nmid"],
r"\nshortparallel": _symbols[r"\nparallel"],
r"\nsubseteqq": _symbols[r"\nsubseteq"],
r"\omicron": _symbols[r"\upomicron"],
r"\rvert": _symbols[r"\vert"],
r"\rVert": _symbols[r"\Vert"],
r"\shortmid": _symbols[r"\mid"],
r"\smallfrown": _symbols[r"\frown"],
r"\smallint": "0222B",
r"\smallsmile": _symbols[r"\smile"],
r"\surd": _symbols[r"\sqrt"],
r"\thicksim": "0223C",
r"\thickapprox": _symbols[r"\approx"],
r"\varsubsetneqq": _symbols[r"\subsetneqq"],
r"\varsupsetneq": "0228B",
r"\varsupsetneqq": _symbols[r"\supsetneqq"],
}
)
del _symbols[r"\mathring"] # FIXME: improve tokenizer without removing this
return _symbols
| 2,954 | 36.405063 | 99 | py |
latex2mathml | latex2mathml-master/latex2mathml/commands.py | from collections import OrderedDict, defaultdict
from typing import DefaultDict, Dict, Optional, Tuple
OPENING_BRACE = "{"
CLOSING_BRACE = "}"
BRACES = "{}"
OPENING_BRACKET = "["
CLOSING_BRACKET = "]"
BRACKETS = "[]"
OPENING_PARENTHESIS = "("
CLOSING_PARENTHESIS = ")"
PARENTHESES = "()"
SUBSUP = "_^"
SUBSCRIPT = "_"
SUPERSCRIPT = "^"
APOSTROPHE = "'"
PRIME = r"\prime"
DPRIME = r"\dprime"
LEFT = r"\left"
MIDDLE = r"\middle"
RIGHT = r"\right"
ABOVE = r"\above"
ABOVEWITHDELIMS = r"\abovewithdelims"
ATOP = r"\atop"
ATOPWITHDELIMS = r"\atopwithdelims"
BINOM = r"\binom"
BRACE = r"\brace"
BRACK = r"\brack"
CFRAC = r"\cfrac"
CHOOSE = r"\choose"
DBINOM = r"\dbinom"
DFRAC = r"\dfrac"
FRAC = r"\frac"
GENFRAC = r"\genfrac"
OVER = r"\over"
TBINOM = r"\tbinom"
TFRAC = r"\tfrac"
ROOT = r"\root"
SQRT = r"\sqrt"
OVERSET = r"\overset"
UNDERSET = r"\underset"
ACUTE = r"\acute"
BAR = r"\bar"
BREVE = r"\breve"
CHECK = r"\check"
DOT = r"\dot"
DDOT = r"\ddot"
DDDOT = r"\dddot"
DDDDOT = r"\ddddot"
GRAVE = r"\grave"
HAT = r"\hat"
MATHRING = r"\mathring"
OVERBRACE = r"\overbrace"
OVERLEFTARROW = r"\overleftarrow"
OVERLEFTRIGHTARROW = r"\overleftrightarrow"
OVERLINE = r"\overline"
OVERPAREN = r"\overparen"
OVERRIGHTARROW = r"\overrightarrow"
TILDE = r"\tilde"
UNDERBRACE = r"\underbrace"
UNDERLEFTARROW = r"\underleftarrow"
UNDERLINE = r"\underline"
UNDERPAREN = r"\underparen"
UNDERRIGHTARROW = r"\underrightarrow"
UNDERLEFTRIGHTARROW = r"\underleftrightarrow"
VEC = r"\vec"
WIDEHAT = r"\widehat"
WIDETILDE = r"\widetilde"
XLEFTARROW = r"\xleftarrow"
XRIGHTARROW = r"\xrightarrow"
HREF = r"\href"
TEXT = r"\text"
TEXTBF = r"\textbf"
TEXTIT = r"\textit"
TEXTRM = r"\textrm"
TEXTSF = r"\textsf"
TEXTTT = r"\texttt"
BEGIN = r"\begin"
END = r"\end"
LIMITS = r"\limits"
INTEGRAL = r"\int"
SUMMATION = r"\sum"
LIMIT = (r"\lim", r"\sup", r"\inf", r"\max", r"\min")
OPERATORNAME = r"\operatorname"
LBRACE = r"\{"
FUNCTIONS = (
r"\arccos",
r"\arcsin",
r"\arctan",
r"\cos",
r"\cosh",
r"\cot",
r"\coth",
r"\csc",
r"\deg",
r"\dim",
r"\exp",
r"\hom",
r"\ker",
r"\ln",
r"\lg",
r"\log",
r"\sec",
r"\sin",
r"\sinh",
r"\tan",
r"\tanh",
)
DETERMINANT = r"\det"
GCD = r"\gcd"
INTOP = r"\intop"
INJLIM = r"\injlim"
LIMINF = r"\liminf"
LIMSUP = r"\limsup"
PR = r"\Pr"
PROJLIM = r"\projlim"
MOD = r"\mod"
PMOD = r"\pmod"
BMOD = r"\bmod"
HDASHLINE = r"\hdashline"
HLINE = r"\hline"
HFIL = r"\hfil"
CASES = r"\cases"
DISPLAYLINES = r"\displaylines"
SMALLMATRIX = r"\smallmatrix"
SUBSTACK = r"\substack"
SPLIT = r"\split"
ALIGN = r"\align*"
MATRICES = (
r"\matrix",
r"\matrix*",
r"\pmatrix",
r"\pmatrix*",
r"\bmatrix",
r"\bmatrix*",
r"\Bmatrix",
r"\Bmatrix*",
r"\vmatrix",
r"\vmatrix*",
r"\Vmatrix",
r"\Vmatrix*",
r"\array",
SUBSTACK,
CASES,
DISPLAYLINES,
SMALLMATRIX,
SPLIT,
ALIGN,
)
BACKSLASH = "\\"
CARRIAGE_RETURN = r"\cr"
COLON = r"\:"
COMMA = r"\,"
DOUBLEBACKSLASH = r"\\"
ENSPACE = r"\enspace"
EXCLAMATION = r"\!"
GREATER_THAN = r"\>"
HSKIP = r"\hskip"
HSPACE = r"\hspace"
KERN = r"\kern"
MKERN = r"\mkern"
MSKIP = r"\mskip"
MSPACE = r"\mspace"
NEGTHINSPACE = r"\negthinspace"
NEGMEDSPACE = r"\negmedspace"
NEGTHICKSPACE = r"\negthickspace"
NOBREAKSPACE = r"\nobreakspace"
SPACE = r"\space"
THINSPACE = r"\thinspace"
QQUAD = r"\qquad"
QUAD = r"\quad"
SEMICOLON = r"\;"
BLACKBOARD_BOLD = r"\Bbb"
BOLD_SYMBOL = r"\boldsymbol"
MIT = r"\mit"
OLDSTYLE = r"\oldstyle"
SCR = r"\scr"
TT = r"\tt"
MATH = r"\math"
MATHBB = r"\mathbb"
MATHBF = r"\mathbf"
MATHCAL = r"\mathcal"
MATHFRAK = r"\mathfrak"
MATHIT = r"\mathit"
MATHRM = r"\mathrm"
MATHSCR = r"\mathscr"
MATHSF = r"\mathsf"
MATHTT = r"\mathtt"
BOXED = r"\boxed"
FBOX = r"\fbox"
HBOX = r"\hbox"
MBOX = r"\mbox"
COLOR = r"\color"
DISPLAYSTYLE = r"\displaystyle"
TEXTSTYLE = r"\textstyle"
SCRIPTSTYLE = r"\scriptstyle"
SCRIPTSCRIPTSTYLE = r"\scriptscriptstyle"
STYLE = r"\style"
HPHANTOM = r"\hphantom"
PHANTOM = r"\phantom"
VPHANTOM = r"\vphantom"
IDOTSINT = r"\idotsint"
LATEX = r"\LaTeX"
TEX = r"\TeX"
SIDESET = r"\sideset"
SKEW = r"\skew"
NOT = r"\not"
def font_factory(default: Optional[str], replacement: Dict[str, Optional[str]]) -> DefaultDict[str, Optional[str]]:
fonts = defaultdict(lambda: default, replacement)
return fonts
LOCAL_FONTS: Dict[str, DefaultDict[str, Optional[str]]] = {
BLACKBOARD_BOLD: font_factory("double-struck", {"fence": None}),
BOLD_SYMBOL: font_factory("bold", {"mi": "bold-italic", "mtext": None}),
MATHBB: font_factory("double-struck", {"fence": None}),
MATHBF: font_factory("bold", {"fence": None}),
MATHCAL: font_factory("script", {"fence": None}),
MATHFRAK: font_factory("fraktur", {"fence": None}),
MATHIT: font_factory("italic", {"fence": None}),
MATHRM: font_factory(None, {"mi": "normal"}),
MATHSCR: font_factory("script", {"fence": None}),
MATHSF: font_factory(None, {"mi": "sans-serif"}),
MATHTT: font_factory("monospace", {"fence": None}),
MIT: font_factory("italic", {"fence": None, "mi": None}),
OLDSTYLE: font_factory("normal", {"fence": None}),
SCR: font_factory("script", {"fence": None}),
TT: font_factory("monospace", {"fence": None}),
}
OLD_STYLE_FONTS: Dict[str, DefaultDict[str, Optional[str]]] = {
r"\rm": font_factory(None, {"mi": "normal"}),
r"\bf": font_factory(None, {"mi": "bold"}),
r"\it": font_factory(None, {"mi": "italic"}),
r"\sf": font_factory(None, {"mi": "sans-serif"}),
r"\tt": font_factory(None, {"mi": "monospace"}),
}
GLOBAL_FONTS = {
**OLD_STYLE_FONTS,
r"\cal": font_factory("script", {"fence": None}),
r"\frak": font_factory("fraktur", {"fence": None}),
}
COMMANDS_WITH_ONE_PARAMETER = (
ACUTE,
BAR,
BLACKBOARD_BOLD,
BOLD_SYMBOL,
BOXED,
BREVE,
CHECK,
DOT,
DDOT,
DDDOT,
DDDDOT,
GRAVE,
HAT,
HPHANTOM,
MATHRING,
MIT,
MOD,
OLDSTYLE,
OVERBRACE,
OVERLEFTARROW,
OVERLEFTRIGHTARROW,
OVERLINE,
OVERPAREN,
OVERRIGHTARROW,
PHANTOM,
PMOD,
SCR,
TILDE,
TT,
UNDERBRACE,
UNDERLEFTARROW,
UNDERLINE,
UNDERPAREN,
UNDERRIGHTARROW,
UNDERLEFTRIGHTARROW,
VEC,
VPHANTOM,
WIDEHAT,
WIDETILDE,
)
COMMANDS_WITH_TWO_PARAMETERS = (
BINOM,
CFRAC,
DBINOM,
DFRAC,
FRAC,
OVERSET,
TBINOM,
TFRAC,
UNDERSET,
)
BIG: Dict[str, Tuple[str, dict]] = {
# command: (mathml_equivalent, attributes)
r"\Bigg": ("mo", OrderedDict([("minsize", "2.470em"), ("maxsize", "2.470em")])),
r"\bigg": ("mo", OrderedDict([("minsize", "2.047em"), ("maxsize", "2.047em")])),
r"\Big": ("mo", OrderedDict([("minsize", "1.623em"), ("maxsize", "1.623em")])),
r"\big": ("mo", OrderedDict([("minsize", "1.2em"), ("maxsize", "1.2em")])),
}
BIG_OPEN_CLOSE = {
command + postfix: (tag, OrderedDict([("stretchy", "true"), ("fence", "true"), *attrib.items()]))
for command, (tag, attrib) in BIG.items()
for postfix in "lmr"
}
MSTYLE_SIZES: Dict[str, Tuple[str, dict]] = {
# command: (mathml_equivalent, attributes)
r"\Huge": ("mstyle", {"mathsize": "2.49em"}),
r"\huge": ("mstyle", {"mathsize": "2.07em"}),
r"\LARGE": ("mstyle", {"mathsize": "1.73em"}),
r"\Large": ("mstyle", {"mathsize": "1.44em"}),
r"\large": ("mstyle", {"mathsize": "1.2em"}),
r"\normalsize": ("mstyle", {"mathsize": "1em"}),
r"\scriptsize": ("mstyle", {"mathsize": "0.7em"}),
r"\small": ("mstyle", {"mathsize": "0.85em"}),
r"\tiny": ("mstyle", {"mathsize": "0.5em"}),
r"\Tiny": ("mstyle", {"mathsize": "0.6em"}),
}
STYLES: Dict[str, Tuple[str, dict]] = {
DISPLAYSTYLE: ("mstyle", {"displaystyle": "true", "scriptlevel": "0"}),
TEXTSTYLE: ("mstyle", {"displaystyle": "false", "scriptlevel": "0"}),
SCRIPTSTYLE: ("mstyle", {"displaystyle": "false", "scriptlevel": "1"}),
SCRIPTSCRIPTSTYLE: ("mstyle", {"displaystyle": "false", "scriptlevel": "2"}),
}
CONVERSION_MAP: Dict[str, Tuple[str, dict]] = {
# command: (mathml_equivalent, attributes)
# tables
**{matrix: ("mtable", {}) for matrix in MATRICES},
DISPLAYLINES: ("mtable", {"rowspacing": "0.5em", "columnspacing": "1em", "displaystyle": "true"}),
SMALLMATRIX: ("mtable", {"rowspacing": "0.1em", "columnspacing": "0.2778em"}),
SPLIT: (
"mtable",
{"displaystyle": "true", "columnspacing": "0em", "rowspacing": "3pt"},
),
ALIGN: (
"mtable",
{"displaystyle": "true", "rowspacing": "3pt"},
),
# subscripts/superscripts
SUBSCRIPT: ("msub", {}),
SUPERSCRIPT: ("msup", {}),
SUBSUP: ("msubsup", {}),
# fractions
BINOM: ("mfrac", {"linethickness": "0"}),
CFRAC: ("mfrac", {}),
DBINOM: ("mfrac", {"linethickness": "0"}),
DFRAC: ("mfrac", {}),
FRAC: ("mfrac", {}),
GENFRAC: ("mfrac", {}),
TBINOM: ("mfrac", {"linethickness": "0"}),
TFRAC: ("mfrac", {}),
# over/under
ACUTE: ("mover", {}),
BAR: ("mover", {}),
BREVE: ("mover", {}),
CHECK: ("mover", {}),
DOT: ("mover", {}),
DDOT: ("mover", {}),
DDDOT: ("mover", {}),
DDDDOT: ("mover", {}),
GRAVE: ("mover", {}),
HAT: ("mover", {}),
LIMITS: ("munderover", {}),
MATHRING: ("mover", {}),
OVERBRACE: ("mover", {}),
OVERLEFTARROW: ("mover", {}),
OVERLEFTRIGHTARROW: ("mover", {}),
OVERLINE: ("mover", {}),
OVERPAREN: ("mover", {}),
OVERRIGHTARROW: ("mover", {}),
TILDE: ("mover", {}),
OVERSET: ("mover", {}),
UNDERBRACE: ("munder", {}),
UNDERLEFTARROW: ("munder", {}),
UNDERLINE: ("munder", {}),
UNDERPAREN: ("munder", {}),
UNDERRIGHTARROW: ("munder", {}),
UNDERLEFTRIGHTARROW: ("munder", {}),
UNDERSET: ("munder", {}),
VEC: ("mover", {}),
WIDEHAT: ("mover", {}),
WIDETILDE: ("mover", {}),
# spaces
COLON: ("mspace", {"width": "0.222em"}),
COMMA: ("mspace", {"width": "0.167em"}),
DOUBLEBACKSLASH: ("mspace", {"linebreak": "newline"}),
ENSPACE: ("mspace", {"width": "0.5em"}),
EXCLAMATION: ("mspace", {"width": "negativethinmathspace"}),
GREATER_THAN: ("mspace", {"width": "0.222em"}),
HSKIP: ("mspace", {}),
HSPACE: ("mspace", {}),
KERN: ("mspace", {}),
MKERN: ("mspace", {}),
MSKIP: ("mspace", {}),
MSPACE: ("mspace", {}),
NEGTHINSPACE: ("mspace", {"width": "negativethinmathspace"}),
NEGMEDSPACE: ("mspace", {"width": "negativemediummathspace"}),
NEGTHICKSPACE: ("mspace", {"width": "negativethickmathspace"}),
THINSPACE: ("mspace", {"width": "thinmathspace"}),
QQUAD: ("mspace", {"width": "2em"}),
QUAD: ("mspace", {"width": "1em"}),
SEMICOLON: ("mspace", {"width": "0.278em"}),
# enclose
BOXED: ("menclose", {"notation": "box"}),
FBOX: ("menclose", {"notation": "box"}),
# operators
**BIG,
**BIG_OPEN_CLOSE,
**MSTYLE_SIZES,
**{limit: ("mo", {}) for limit in LIMIT},
LEFT: ("mo", OrderedDict([("stretchy", "true"), ("fence", "true"), ("form", "prefix")])),
MIDDLE: ("mo", OrderedDict([("stretchy", "true"), ("fence", "true"), ("lspace", "0.05em"), ("rspace", "0.05em")])),
RIGHT: ("mo", OrderedDict([("stretchy", "true"), ("fence", "true"), ("form", "postfix")])),
# styles
COLOR: ("mstyle", {}),
**STYLES,
# others
SQRT: ("msqrt", {}),
ROOT: ("mroot", {}),
HREF: ("mtext", {}),
TEXT: ("mtext", {}),
TEXTBF: ("mtext", {"mathvariant": "bold"}),
TEXTIT: ("mtext", {"mathvariant": "italic"}),
TEXTRM: ("mtext", {}),
TEXTSF: ("mtext", {"mathvariant": "sans-serif"}),
TEXTTT: ("mtext", {"mathvariant": "monospace"}),
HBOX: ("mtext", {}),
MBOX: ("mtext", {}),
HPHANTOM: ("mphantom", {}),
PHANTOM: ("mphantom", {}),
VPHANTOM: ("mphantom", {}),
SIDESET: ("mrow", {}),
SKEW: ("mrow", {}),
MOD: ("mi", {}),
PMOD: ("mi", {}),
BMOD: ("mo", {}),
XLEFTARROW: ("mover", {}),
XRIGHTARROW: ("mover", {}),
}
DIACRITICS: Dict[str, Tuple[str, Dict[str, str]]] = {
ACUTE: ("´", {}),
BAR: ("¯", {"stretchy": "true"}),
BREVE: ("˘", {}),
CHECK: ("ˇ", {}),
DOT: ("˙", {}),
DDOT: ("¨", {}),
DDDOT: ("⃛", {}),
DDDDOT: ("⃜", {}),
GRAVE: ("`", {}),
HAT: ("^", {"stretchy": "false"}),
MATHRING: ("˚", {}),
OVERBRACE: ("⏞", {}),
OVERLEFTARROW: ("←", {}),
OVERLEFTRIGHTARROW: ("↔", {}),
OVERLINE: ("―", {"accent": "true"}),
OVERPAREN: ("⏜", {}),
OVERRIGHTARROW: ("→", {}),
TILDE: ("~", {"stretchy": "false"}),
UNDERBRACE: ("⏟", {}),
UNDERLEFTARROW: ("←", {}),
UNDERLEFTRIGHTARROW: ("↔", {}),
UNDERLINE: ("―", {"accent": "true"}),
UNDERPAREN: ("⏝", {}),
UNDERRIGHTARROW: ("→", {}),
VEC: ("→", {"stretchy": "true"}),
WIDEHAT: ("^", {}),
WIDETILDE: ("~", {}),
}
| 13,087 | 24.814596 | 119 | py |
latex2mathml | latex2mathml-master/latex2mathml/tokenizer.py | import re
from typing import Iterator
from latex2mathml import commands
from latex2mathml.symbols_parser import convert_symbol
UNITS = ("in", "mm", "cm", "pt", "em", "ex", "pc", "bp", "dd", "cc", "sp", "mu")
PATTERN = re.compile(
rf"""
(%[^\n]+) | # comment
(a-zA-Z) | # letter
([_^])(\d) | # number succeeding an underscore or a caret
(-?\d+(?:\.\d+)?\s*(?:{'|'.join(UNITS)})) | # dimension
(\d+(?:\.\d+)?) | # integer/decimal
(\.\d*) | # dot (.) or decimal can start with just a dot
(\\[\\\[\]{{}}\s!,:>;|_%#$&]) | # escaped characters
(\\(?:begin|end)\s*{{[a-zA-Z]+\*?}}) | # begin or end
(\\operatorname\s*{{[a-zA-Z\s*]+\*?\s*}}) | # operatorname
# color, fbox, href, hbox, mbox, style, text, textbf, textit, textrm, textsf, texttt
(\\(?:color|fbox|hbox|href|mbox|style|text|textbf|textit|textrm|textsf|texttt))\s*{{([^}}]*)}} |
(\\[cdt]?frac)\s*([.\d])\s*([.\d])? | # fractions
(\\math[a-z]+)({{)([a-zA-Z])(}}) | # commands starting with math
(\\[a-zA-Z]+) | # other commands
(\S) # non-space character
""",
re.VERBOSE,
)
def tokenize(latex_string: str, skip_comments: bool = True) -> Iterator[str]:
"""
Converts Latex string into tokens.
:param latex_string: Latex string.
:param skip_comments: Flag to skip comments (default=True).
"""
for match in PATTERN.finditer(latex_string):
tokens = tuple(filter(lambda x: x is not None, match.groups()))
if tokens[0].startswith(commands.MATH):
full_math = "".join(tokens)
symbol = convert_symbol(full_math)
if symbol:
yield f"&#x{symbol};"
continue
for captured in tokens:
if skip_comments and captured.startswith("%"):
break
if captured.endswith(UNITS):
yield captured.replace(" ", "")
continue
if captured.startswith((commands.BEGIN, commands.END, commands.OPERATORNAME)):
yield "".join(captured.split(" "))
continue
yield captured
| 2,360 | 41.160714 | 100 | py |
latex2mathml | latex2mathml-master/latex2mathml/converter.py | import copy
import enum
import re
from collections import OrderedDict
from typing import Dict, Iterable, Iterator, List, Optional, Tuple
from xml.etree.cElementTree import Element, SubElement, tostring
from xml.sax.saxutils import unescape
from latex2mathml import commands
from latex2mathml.symbols_parser import convert_symbol
from latex2mathml.walker import Node, walk
COLUMN_ALIGNMENT_MAP = {"r": "right", "l": "left", "c": "center"}
OPERATORS = (
"+",
"-",
"*",
"/",
"(",
")",
"=",
",",
"?",
"[",
"]",
"|",
r"\|",
"!",
r"\{",
r"\}",
r">",
r"<",
r".",
r"\bigotimes",
r"\centerdot",
r"\dots",
r"\dotsc",
r"\dotso",
r"\gt",
r"\ldotp",
r"\lt",
r"\lvert",
r"\lVert",
r"\lvertneqq",
r"\ngeqq",
r"\omicron",
r"\rvert",
r"\rVert",
r"\S",
r"\smallfrown",
r"\smallint",
r"\smallsmile",
r"\surd",
r"\varsubsetneqq",
r"\varsupsetneqq",
)
MATH_MODE_PATTERN = re.compile(r"\\\$|\$|\\?[^\\$]+")
class Mode(enum.Enum):
TEXT = enum.auto()
MATH = enum.auto()
def convert(latex: str, xmlns: str = "http://www.w3.org/1998/Math/MathML", display: str = "inline") -> str:
math = Element("math", xmlns=xmlns, display=display)
row = SubElement(math, "mrow")
_convert_group(iter(walk(latex)), row)
return _convert(math)
def _convert(tree: Element) -> str:
return unescape(tostring(tree, encoding="unicode"))
def _convert_matrix(nodes: Iterator[Node], parent: Element, command: str, alignment: Optional[str] = None) -> None:
row = None
cell = None
col_index = 0
col_alignment = None
max_col_size = 0
row_index = 0
row_lines = []
hfil_indexes: List[bool] = []
for node in nodes:
if row is None:
row = SubElement(parent, "mtr")
if cell is None:
col_alignment, col_index = _get_column_alignment(alignment, col_alignment, col_index)
cell = _make_matrix_cell(row, col_alignment)
if node.token == commands.BRACES:
_convert_group(iter([node]), cell)
elif node.token == "&":
_set_cell_alignment(cell, hfil_indexes)
hfil_indexes = []
col_alignment, col_index = _get_column_alignment(alignment, col_alignment, col_index)
cell = _make_matrix_cell(row, col_alignment)
if command in (commands.SPLIT, commands.ALIGN) and col_index % 2 == 0:
SubElement(cell, "mi")
elif node.token in (commands.DOUBLEBACKSLASH, commands.CARRIAGE_RETURN):
_set_cell_alignment(cell, hfil_indexes)
hfil_indexes = []
row_index += 1
if col_index > max_col_size:
max_col_size = col_index
col_index = 0
col_alignment, col_index = _get_column_alignment(alignment, col_alignment, col_index)
row = SubElement(parent, "mtr")
cell = _make_matrix_cell(row, col_alignment)
elif node.token == commands.HLINE:
row_lines.append("solid")
elif node.token == commands.HDASHLINE:
row_lines.append("dashed")
elif node.token == commands.HFIL:
hfil_indexes.append(True)
else:
if row_index > len(row_lines):
row_lines.append("none")
hfil_indexes.append(False)
_convert_group(iter([node]), cell)
if col_index > max_col_size:
max_col_size = col_index
if any(r == "solid" for r in row_lines):
parent.set("rowlines", " ".join(row_lines))
if row is not None and cell is not None and len(cell) == 0:
# Remove last row if it does not contain anything
parent.remove(row)
if max_col_size and command == commands.ALIGN:
spacing = ("0em", "2em")
multiplier = max_col_size // len(spacing)
parent.set("columnspacing", " ".join(spacing * multiplier))
def _set_cell_alignment(cell: Element, hfil_indexes: List[bool]) -> None:
if cell is not None and any(hfil_indexes) and len(hfil_indexes) > 1:
if hfil_indexes[0] and not hfil_indexes[-1]:
cell.attrib["columnalign"] = "right"
elif not hfil_indexes[0] and hfil_indexes[-1]:
cell.attrib["columnalign"] = "left"
def _get_column_alignment(
alignment: Optional[str], column_alignment: Optional[str], column_index: int
) -> Tuple[Optional[str], int]:
if alignment:
try:
column_alignment = COLUMN_ALIGNMENT_MAP.get(alignment[column_index])
except IndexError:
column_alignment = COLUMN_ALIGNMENT_MAP.get(alignment[column_index % len(alignment)])
column_index += 1
return column_alignment, column_index
def _make_matrix_cell(row: Element, column_alignment: Optional[str]) -> Element:
if column_alignment:
return SubElement(row, "mtd", columnalign=column_alignment)
return SubElement(row, "mtd")
def _convert_group(nodes: Iterable[Node], parent: Element, font: Optional[Dict[str, Optional[str]]] = None) -> None:
_font = font
for node in nodes:
token = node.token
if token in (*commands.MSTYLE_SIZES, *commands.STYLES):
node = Node(token=token, children=tuple(n for n in nodes))
_convert_command(node, parent, _font)
elif token in commands.CONVERSION_MAP or token in (commands.MOD, commands.PMOD):
_convert_command(node, parent, _font)
elif token in commands.LOCAL_FONTS and node.children is not None:
_convert_group(iter(node.children), parent, commands.LOCAL_FONTS[token])
elif token.startswith(commands.MATH) and node.children is not None:
_convert_group(iter(node.children), parent, _font)
elif token in commands.GLOBAL_FONTS.keys():
_font = commands.GLOBAL_FONTS.get(token)
elif node.children is None:
_convert_symbol(node, parent, _font)
elif node.children is not None:
attributes = node.attributes or {}
_row = SubElement(parent, "mrow", attrib=attributes)
_convert_group(iter(node.children), _row, _font)
def _get_alignment_and_column_lines(alignment: Optional[str] = None) -> Tuple[Optional[str], Optional[str]]:
if alignment is None:
return None, None
if "|" not in alignment:
return alignment, None
_alignment = ""
column_lines = []
for c in alignment:
if c == "|":
column_lines.append("solid")
else:
_alignment += c
if len(_alignment) - len(column_lines) == 2:
column_lines.append("none")
return _alignment, " ".join(column_lines)
def separate_by_mode(text: str) -> Iterator[Tuple[str, Mode]]:
string = ""
is_math_mode = False
for match in MATH_MODE_PATTERN.findall(text):
if match == "$": # should match both $ and $$
yield string, Mode.MATH if is_math_mode else Mode.TEXT
string = ""
is_math_mode = not is_math_mode
else:
string += match
if len(string):
yield string, Mode.MATH if is_math_mode else Mode.TEXT
# TODO: if stays in math mode, means not terminated properly, raise error
def _convert_command(node: Node, parent: Element, font: Optional[Dict[str, Optional[str]]] = None) -> None:
command = node.token
modifier = node.modifier
if command in (commands.SUBSTACK, commands.SMALLMATRIX):
parent = SubElement(parent, "mstyle", scriptlevel="1")
elif command == commands.CASES:
parent = SubElement(parent, "mrow")
lbrace = SubElement(parent, "mo", OrderedDict([("stretchy", "true"), ("fence", "true"), ("form", "prefix")]))
lbrace.text = "&#x{};".format(convert_symbol(commands.LBRACE))
elif command in (commands.DBINOM, commands.DFRAC):
parent = SubElement(parent, "mstyle", displaystyle="true", scriptlevel="0")
elif command == commands.HPHANTOM:
parent = SubElement(parent, "mpadded", height="0", depth="0")
elif command == commands.VPHANTOM:
parent = SubElement(parent, "mpadded", width="0")
elif command in (commands.TBINOM, commands.HBOX, commands.MBOX, commands.TFRAC):
parent = SubElement(parent, "mstyle", displaystyle="false", scriptlevel="0")
elif command in (commands.MOD, commands.PMOD):
SubElement(parent, "mspace", width="1em")
tag, attributes = copy.deepcopy(commands.CONVERSION_MAP[command])
if node.attributes is not None and node.token != commands.SKEW:
attributes.update(node.attributes)
if command == commands.LEFT:
parent = SubElement(parent, "mrow")
_append_prefix_element(node, parent)
alignment, column_lines = _get_alignment_and_column_lines(node.alignment)
if column_lines:
attributes["columnlines"] = column_lines
if command == commands.SUBSUP and node.children is not None and node.children[0].token == commands.GCD:
tag = "munderover"
elif command == commands.SUPERSCRIPT and modifier in (commands.LIMITS, commands.OVERBRACE):
tag = "mover"
elif command == commands.SUBSCRIPT and modifier in (commands.LIMITS, commands.UNDERBRACE):
tag = "munder"
elif command == commands.SUBSUP and modifier in (commands.LIMITS, commands.OVERBRACE, commands.UNDERBRACE):
tag = "munderover"
elif (
command in (commands.XLEFTARROW, commands.XRIGHTARROW) and node.children is not None and len(node.children) == 2
):
tag = "munderover"
element = SubElement(parent, tag, attributes)
if command in commands.LIMIT:
element.text = command[1:]
elif command in (commands.MOD, commands.PMOD):
element.text = "mod"
SubElement(parent, "mspace", width="0.333em")
elif command == commands.BMOD:
element.text = "mod"
elif command in (commands.XLEFTARROW, commands.XRIGHTARROW):
style = SubElement(element, "mstyle", scriptlevel="0")
arrow = SubElement(style, "mo")
if command == commands.XLEFTARROW:
arrow.text = "←"
elif command == commands.XRIGHTARROW:
arrow.text = "→"
elif node.text is not None:
if command == commands.MIDDLE:
element.text = "&#x{};".format(convert_symbol(node.text))
elif command == commands.HBOX:
mtext: Optional[Element] = element
for text, mode in separate_by_mode(node.text):
if mode == Mode.TEXT:
if mtext is None:
mtext = SubElement(parent, tag, attributes)
mtext.text = text.replace(" ", " ")
_set_font(mtext, "mtext", font)
mtext = None
else:
_row = SubElement(parent, "mrow")
_convert_group(iter(walk(text)), _row)
else:
if command == commands.FBOX:
element = SubElement(element, "mtext")
element.text = node.text.replace(" ", " ")
_set_font(element, "mtext", font)
elif node.delimiter is not None and command not in (commands.FRAC, commands.GENFRAC):
if node.delimiter != ".":
symbol = convert_symbol(node.delimiter)
element.text = node.delimiter if symbol is None else "&#x{};".format(symbol)
if node.children is not None:
_parent = element
if command in (commands.LEFT, commands.MOD, commands.PMOD):
_parent = parent
if command in commands.MATRICES:
if command == commands.CASES:
alignment = "l"
elif command in (commands.SPLIT, commands.ALIGN):
alignment = "rl"
_convert_matrix(iter(node.children), _parent, command, alignment=alignment)
elif command == commands.CFRAC:
for child in node.children:
p = SubElement(_parent, "mstyle", displaystyle="false", scriptlevel="0")
_convert_group(iter([child]), p, font)
elif command == commands.SIDESET:
Node(
r"\style",
children=(Node(r"\mspace", attributes={"width": "-0.167em"}),),
attributes={"scriptlevel": "0"},
),
left, right = node.children
_convert_group(iter([left]), _parent, font)
fill = SubElement(_parent, "mstyle", scriptlevel="0")
SubElement(fill, "mspace", width="-0.167em")
_convert_group(iter([right]), _parent, font)
elif command == commands.SKEW:
child = node.children[0]
new_node = Node(
token=child.token,
children=(
Node(
token=commands.BRACES,
children=(*child.children, Node(token=commands.MKERN, attributes=node.attributes)),
),
),
)
_convert_group(iter([new_node]), _parent, font)
elif command in (commands.XLEFTARROW, commands.XRIGHTARROW):
for child in node.children:
padded = SubElement(
_parent,
"mpadded",
OrderedDict(
[("width", "+0.833em"), ("lspace", "0.556em"), ("voffset", "-.2em"), ("height", "-.2em")]
),
)
_convert_group(iter([child]), padded, font)
SubElement(padded, "mspace", depth=".25em")
else:
_convert_group(iter(node.children), _parent, font)
_add_diacritic(command, element)
_append_postfix_element(node, parent)
def _add_diacritic(command: str, parent: Element) -> None:
if command in commands.DIACRITICS:
text, attributes = copy.deepcopy(commands.DIACRITICS[command])
element = SubElement(parent, "mo", attributes)
element.text = text
def _convert_and_append_command(command: str, parent: Element, attributes: Optional[Dict[str, str]] = None) -> None:
code_point = convert_symbol(command)
mo = SubElement(parent, "mo", attributes if attributes is not None else {})
mo.text = "&#x{};".format(code_point) if code_point else command
def _append_prefix_element(node: Node, parent: Element) -> None:
size = "2.047em"
if parent.attrib.get("displaystyle") == "false" or node.token == commands.TBINOM:
size = "1.2em"
if node.token in (r"\pmatrix", commands.PMOD):
_convert_and_append_command(r"\lparen", parent)
elif node.token in (commands.BINOM, commands.DBINOM, commands.TBINOM):
_convert_and_append_command(r"\lparen", parent, {"minsize": size, "maxsize": size})
elif node.token == r"\bmatrix":
_convert_and_append_command(r"\lbrack", parent)
elif node.token == r"\Bmatrix":
_convert_and_append_command(r"\lbrace", parent)
elif node.token == r"\vmatrix":
_convert_and_append_command(r"\vert", parent)
elif node.token == r"\Vmatrix":
_convert_and_append_command(r"\Vert", parent)
elif node.token in (commands.FRAC, commands.GENFRAC) and node.delimiter is not None and node.delimiter[0] != ".":
# TODO: use 1.2em if inline
_convert_and_append_command(node.delimiter[0], parent, {"minsize": size, "maxsize": size})
def _append_postfix_element(node: Node, parent: Element) -> None:
size = "2.047em"
if parent.attrib.get("displaystyle") == "false" or node.token == commands.TBINOM:
size = "1.2em"
if node.token in (r"\pmatrix", commands.PMOD):
_convert_and_append_command(r"\rparen", parent)
elif node.token in (commands.BINOM, commands.DBINOM, commands.TBINOM):
_convert_and_append_command(r"\rparen", parent, {"minsize": size, "maxsize": size})
elif node.token == r"\bmatrix":
_convert_and_append_command(r"\rbrack", parent)
elif node.token == r"\Bmatrix":
_convert_and_append_command(r"\rbrace", parent)
elif node.token == r"\vmatrix":
_convert_and_append_command(r"\vert", parent)
elif node.token == r"\Vmatrix":
_convert_and_append_command(r"\Vert", parent)
elif node.token in (commands.FRAC, commands.GENFRAC) and node.delimiter is not None and node.delimiter[1] != ".":
# TODO: use 1.2em if inline
_convert_and_append_command(node.delimiter[1], parent, {"minsize": size, "maxsize": size})
elif node.token == commands.SKEW and node.attributes is not None:
SubElement(parent, "mspace", width="-" + node.attributes["width"])
def _convert_symbol(node: Node, parent: Element, font: Optional[Dict[str, Optional[str]]] = None) -> None:
token = node.token
attributes = node.attributes or {}
symbol = convert_symbol(token)
if re.match(r"\d+(.\d+)?", token):
element = SubElement(parent, "mn", attrib=attributes)
element.text = token
_set_font(element, element.tag, font)
elif token in OPERATORS:
element = SubElement(parent, "mo", attrib=attributes)
element.text = token if symbol is None else "&#x{};".format(symbol)
if token == r"\|":
element.attrib["fence"] = "false"
if token == r"\smallint":
element.attrib["largeop"] = "false"
if token in ("(", ")", "[", "]", "|", r"\|", r"\{", r"\}", r"\surd"):
element.attrib["stretchy"] = "false"
_set_font(element, "fence", font)
else:
_set_font(element, element.tag, font)
elif (
symbol
and (
int(symbol, 16) in range(int("2200", 16), int("22FF", 16) + 1)
or int(symbol, 16) in range(int("2190", 16), int("21FF", 16) + 1)
)
or symbol == "."
):
element = SubElement(parent, "mo", attrib=attributes)
element.text = "&#x{};".format(symbol)
_set_font(element, element.tag, font)
elif token in (r"\ ", "~", commands.NOBREAKSPACE, commands.SPACE):
element = SubElement(parent, "mtext", attrib=attributes)
element.text = " "
_set_font(element, "mtext", font)
elif token == commands.NOT:
mpadded = SubElement(parent, "mpadded", width="0")
element = SubElement(mpadded, "mtext")
element.text = "⧸"
elif token in (
commands.DETERMINANT,
commands.GCD,
commands.INTOP,
commands.INJLIM,
commands.LIMINF,
commands.LIMSUP,
commands.PR,
commands.PROJLIM,
):
element = SubElement(parent, "mo", attrib={"movablelimits": "true", **attributes})
texts = {
commands.INJLIM: "inj lim",
commands.INTOP: "∫",
commands.LIMINF: "lim inf",
commands.LIMSUP: "lim sup",
commands.PROJLIM: "proj lim",
}
element.text = texts.get(token, token[1:])
_set_font(element, element.tag, font)
elif token == commands.IDOTSINT:
_parent = SubElement(parent, "mrow", attrib=attributes)
for s in ("∫", "⋯", "∫"):
element = SubElement(_parent, "mo")
element.text = s
elif token in (commands.LATEX, commands.TEX):
_parent = SubElement(parent, "mrow", attrib=attributes)
if token == commands.LATEX:
mi_l = SubElement(_parent, "mi")
mi_l.text = "L"
SubElement(_parent, "mspace", width="-.325em")
mpadded = SubElement(_parent, "mpadded", height="+.21ex", depth="-.21ex", voffset="+.21ex")
mstyle = SubElement(mpadded, "mstyle", displaystyle="false", scriptlevel="1")
mrow = SubElement(mstyle, "mrow")
mi_a = SubElement(mrow, "mi")
mi_a.text = "A"
SubElement(_parent, "mspace", width="-.17em")
_set_font(mi_l, mi_l.tag, font)
_set_font(mi_a, mi_a.tag, font)
mi_t = SubElement(_parent, "mi")
mi_t.text = "T"
SubElement(_parent, "mspace", width="-.14em")
mpadded = SubElement(_parent, "mpadded", height="-.5ex", depth="+.5ex", voffset="-.5ex")
mrow = SubElement(mpadded, "mrow")
mi_e = SubElement(mrow, "mi")
mi_e.text = "E"
SubElement(_parent, "mspace", width="-.115em")
mi_x = SubElement(_parent, "mi")
mi_x.text = "X"
_set_font(mi_t, mi_t.tag, font)
_set_font(mi_e, mi_e.tag, font)
_set_font(mi_x, mi_x.tag, font)
elif token.startswith(commands.OPERATORNAME):
element = SubElement(parent, "mo", attrib=attributes)
element.text = token[14:-1]
elif token.startswith(commands.BACKSLASH):
element = SubElement(parent, "mi", attrib=attributes)
if symbol:
element.text = "&#x{};".format(symbol)
elif token in commands.FUNCTIONS:
element.text = token[1:]
else:
element.text = token
_set_font(element, element.tag, font)
else:
element = SubElement(parent, "mi", attrib=attributes)
element.text = token
_set_font(element, element.tag, font)
def _set_font(element: Element, key: str, font: Optional[Dict[str, Optional[str]]]) -> None:
if font is None:
return
_font = font[key]
if _font is not None:
element.attrib["mathvariant"] = _font
def main() -> None: # pragma: no cover
import argparse
import sys
parser = argparse.ArgumentParser(description="Pure Python library for LaTeX to MathML conversion")
parser.add_argument("-V", "--version", dest="version", action="store_true", required=False, help="Show version")
parser.add_argument("-b", "--block", dest="block", action="store_true", required=False, help="Display block")
required = parser.add_argument_group("required arguments")
group = required.add_mutually_exclusive_group(required=False)
group.add_argument("-t", "--text", dest="text", type=str, required=False, help="Text")
group.add_argument("-f", "--file", dest="file", type=str, required=False, help="File")
group.add_argument("-s", "--stdin", dest="stdin", action="store_true", required=False, help="Stdin")
arguments = parser.parse_args()
display = "block" if arguments.block else "inline"
if arguments.version:
import latex2mathml
print("latex2mathml", latex2mathml.__version__)
elif arguments.text:
print(convert(arguments.text, display=display))
elif arguments.file:
with open(arguments.file) as f:
print(convert(f.read(), display=display))
elif arguments.stdin:
print(convert(sys.stdin.read(), display=display))
if __name__ == "__main__": # pragma: no cover
main()
| 22,814 | 38.404145 | 120 | py |
latex2mathml | latex2mathml-master/latex2mathml/__init__.py | from importlib import metadata
__version__ = metadata.version("latex2mathml")
| 79 | 19 | 46 | py |
zorba | zorba-master/swig/python/tests/test04.py | # Copyright 2006-2016 zorba.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '@pythonPath@')
import zorba_api
def test(zorba):
#Read and write result
print 'Executing: test04.xq'
f = open('test04.xq', 'r')
lines = f.read()
f.close()
xquery = zorba.compileQuery(lines)
result = xquery.execute()
print result
return
store = zorba_api.InMemoryStore_getInstance()
zorba = zorba_api.Zorba_getInstance(store)
print "Running: XQuery execute"
test(zorba)
print "Success"
zorba.shutdown()
zorba_api.InMemoryStore_shutdown(store)
| 1,076 | 24.046512 | 74 | py |
zorba | zorba-master/swig/python/tests/test05.py | # Copyright 2006-2016 zorba.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '@pythonPath@')
import zorba_api
def test(zorba):
#Read and write result
print 'Executing: test05.xq'
f = open('test05.xq', 'r')
lines = f.read()
f.close()
xquery = zorba.compileQuery(lines)
result = xquery.printPlanAsXML()
print result
return
store = zorba_api.InMemoryStore_getInstance()
zorba = zorba_api.Zorba_getInstance(store)
print "Running: XQuery execute - printPlanAsXML"
test(zorba)
print "Success"
zorba.shutdown()
zorba_api.InMemoryStore_shutdown(store)
| 1,100 | 24.604651 | 74 | py |
zorba | zorba-master/swig/python/tests/test02.py | # Copyright 2006-2016 zorba.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '@pythonPath@')
import zorba_api
def test(zorba):
xquery = zorba.compileQuery("(1,2,3,4,5)")
iter = xquery.iterator()
iter.open()
item = zorba_api.Item_createEmptyItem()
while iter.next(item):
print item.getStringValue()
iter.close()
iter.destroy()
return
store = zorba_api.InMemoryStore_getInstance()
zorba = zorba_api.Zorba_getInstance(store)
print "Running: Compile query string"
test(zorba)
print "Success"
zorba.shutdown()
zorba_api.InMemoryStore_shutdown(store)
| 1,102 | 25.261905 | 74 | py |
zorba | zorba-master/swig/python/tests/test14.py | # Copyright 2006-2016 zorba.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '@pythonPath@')
import zorba_api
class MyDiagnosticHandler(zorba_api.DiagnosticHandler):
def error(self, *args):
print "Error args: ", args
def test(zorba):
#Read and write result
print 'Executing: compilerHints.xq'
f = open('compilerHints.xq', 'r')
lines = f.read()
f.close()
diagnosticHandler = MyDiagnosticHandler()
compilerHints = zorba_api.CompilerHints()
compilerHints.setLibModule(True)
compilerHints.setOptimizationLevel(1)
xquery = zorba.compileQuery(lines, compilerHints, diagnosticHandler)
result = xquery.execute()
print result
return
store = zorba_api.InMemoryStore_getInstance()
zorba = zorba_api.Zorba_getInstance(store)
print "Running: CompileQuery string + Dignostinc handler + CompilerHint - with optimization 1 - setLibModule(True)"
test(zorba)
print "Success"
zorba.shutdown()
zorba_api.InMemoryStore_shutdown(store)
| 1,488 | 28.196078 | 115 | py |
zorba | zorba-master/swig/python/tests/test12.py | # Copyright 2006-2016 zorba.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '@pythonPath@')
import zorba_api
def test(zorba):
try:
query = (
" let $sats := jn:json-doc('@pythonPath@/tests/satellites.json')('satellites') "
" return { "
" 'visible' : [ "
" for $sat in jn:keys($sats) "
" where $sats($sat)('visible') "
" return $sat "
" ], "
" 'invisible' : [ "
" for $sat in jn:keys($sats) "
" where not($sats($sat)('visible')) "
" return $sat "
" ] "
" } " )
xquery = zorba.compileQuery(query)
print xquery.execute()
print "Success"
except Exception, e:
print "Caught error: ", e
return
store = zorba_api.InMemoryStore_getInstance()
zorba = zorba_api.Zorba_getInstance(store)
print "Running: Compile query string using JSONiq"
test(zorba)
zorba.shutdown()
zorba_api.InMemoryStore_shutdown(store)
| 1,579 | 29.384615 | 94 | py |
zorba | zorba-master/swig/python/tests/test01.py | # Copyright 2006-2016 zorba.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '@pythonPath@')
import zorba_api
print "Running: Get zorba instance and shutdown"
store = zorba_api.InMemoryStore_getInstance()
zorba = zorba_api.Zorba_getInstance(store)
zorba.shutdown()
zorba_api.InMemoryStore_shutdown(store)
print "Success"
| 854 | 30.666667 | 74 | py |
zorba | zorba-master/swig/python/tests/test10.py | # Copyright 2006-2016 zorba.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '@pythonPath@')
import zorba_api
def test(zorba):
f = open('books.xml', 'r')
lines = f.read()
f.close()
dataManager = zorba.getXmlDataManager()
docIter = dataManager.parseXML(lines)
docIter.open();
doc = zorba_api.Item_createEmptyItem()
docIter.next(doc)
docIter.close()
docIter.destroy()
xquery = zorba.compileQuery(".")
dynCtx = xquery.getDynamicContext();
dynCtx.setContextItem(doc);
print xquery.execute()
return
store = zorba_api.InMemoryStore_getInstance()
zorba = zorba_api.Zorba_getInstance(store)
print "Running: XQuery execute - Get Iterator and print info from its items"
test(zorba)
print "Success"
zorba.shutdown()
zorba_api.InMemoryStore_shutdown(store)
| 1,318 | 24.365385 | 76 | py |
zorba | zorba-master/swig/python/tests/test08.py | # Copyright 2006-2016 zorba.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '@pythonPath@')
import zorba_api
def test(zorba):
#Read and write result
print 'Executing: test08.xq'
lines = '(1, xs:int(2),"bla", <a><b att="{(3, xs:int(4),"foo", "bar")}"/>text<!--a comment--><?pi?></a>)'
xquery = zorba.compileQuery(lines)
result = xquery.execute()
print result
print "Iterator:"
iterator = xquery.iterator()
printIterator(iterator, "")
f = open('test8_result.xml', 'w')
f.write(result)
f.close()
return
def printIterator(iterator, pre):
item = zorba_api.Item()
iterator.open()
while iterator.next(item):
printItem(item, pre)
iterator.close()
def printItem(item, pre):
if item.isAtomic():
typeItem = item.getType()
print pre + "Leaf Atomic: '" + item.getStringValue() + "' \ttype:", typeItem.getStringValue()
return
if item.isNode():
kind = item.getNodeKind()
if kind == 0: # anyNode
print pre + 'Any node'
print pre + " Children:"
printIterator(item.getChildren(), pre+" ")
if kind == 1: # doc
print pre + 'Doc'
print pre + " Children:"
printIterator(item.getChildren(), pre+" ")
if kind == 2: # element
nodeName = zorba_api.Item()
item.getNodeName(nodeName)
typeItem = item.getType()
print pre + "Start Element: ", nodeName.getStringValue(), " \ttype:", typeItem.getStringValue()
print pre + " Attributes:"
printIterator(item.getAttributes(), pre+" ")
print pre + " Children:"
printIterator(item.getChildren(), pre+" ")
print pre + "End Element: ", nodeName.getStringValue()
if kind == 3: # attribute
nodeName = zorba_api.Item()
item.getNodeName(nodeName)
typeItem = item.getType()
print pre + "Attribute: ", nodeName.getStringValue(), "= '" + item.getStringValue() + "'"" \ttype:", typeItem.getStringValue()
print pre+" Atomization value:"
printIterator(item.getAtomizationValue(), pre+" ")
if kind == 4: # text
typeItem = item.getType()
print pre + "Text: ", item.getStringValue(), " \ttype:", typeItem.getStringValue()
print pre+" Atomization value:"
printIterator(item.getAtomizationValue(), pre+" ")
if kind == 5: # pi
nodeName = zorba_api.Item()
item.getNodeName(nodeName)
print pre + "Pi: ", nodeName.getStringValue()
if kind == 6: # comment
print pre + "Comment: ", item.getStringValue()
else:
print pre+"Item not Node, not Atomic"
store = zorba_api.InMemoryStore_getInstance()
zorba = zorba_api.Zorba_getInstance(store)
print "Running: XQuery execute - Get Iterator and print info from its items"
test(zorba)
print "Success"
zorba.shutdown()
zorba_api.InMemoryStore_shutdown(store) | 3,432 | 29.651786 | 132 | py |
zorba | zorba-master/swig/python/tests/test11.py | # Copyright 2006-2016 zorba.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '@pythonPath@')
import zorba_api
store = zorba_api.InMemoryStore_getInstance()
zorba = zorba_api.Zorba_getInstance(store)
print "Running: CompileQuery CollectionManager"
xmlManager = zorba.getXmlDataManager()
collectionManager = xmlManager.getCollectionManager()
itemFactory = zorba.getItemFactory()
name = itemFactory.createQName("http://www.zorba-xquery.com/", "aaa")
collectionManager.createCollection(name)
isAdded = collectionManager.isAvailableCollection(name)
if isAdded :
collection = collectionManager.getCollection(name);
data = xmlManager.parseXMLtoItem("<books><book>Book 1</book><book>Book 2</book></books>");
itemSequence = zorba_api.ItemSequence(data)
collection.insertNodesLast(itemSequence)
print "Success"
zorba.shutdown()
zorba_api.InMemoryStore_shutdown(store)
| 1,404 | 31.674419 | 92 | py |
zorba | zorba-master/swig/python/tests/test07.1.py | # Copyright 2006-2016 zorba.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '@pythonPath@')
import zorba_api
def test(zorba):
f = open('books.xml', 'r')
lines = f.read()
f.close()
dataManager = zorba.getXmlDataManager()
docIter = dataManager.parseXML(lines)
docIter.open();
doc = zorba_api.Item_createEmptyItem()
docIter.next(doc)
docIter.close()
docIter.destroy()
docManager = dataManager.getDocumentManager()
docManager.put("my_fake_books.xml", doc)
xquery = zorba.compileQuery("doc('my_fake_books.xml')")
result = xquery.execute()
print result
return
store = zorba_api.InMemoryStore_getInstance()
zorba = zorba_api.Zorba_getInstance(store)
print "Running: XQuery execute - parsing XML"
test(zorba)
print "Success"
zorba.shutdown()
zorba_api.InMemoryStore_shutdown(store)
| 1,377 | 24.054545 | 74 | py |
zorba | zorba-master/swig/python/tests/test07.2.py | # Copyright 2006-2016 zorba.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '@pythonPath@')
import zorba_api
def test(zorba):
xquery = zorba.compileQuery("<a>text-a1<b at1='att1' at2='{1+2}'>text-b1</b>text-a2</a>")
saxHandler = zorba_api.SAX2ContentHandlerProxy()
saxHandler.setStartDocumentHandler(startDocumentHandler);
saxHandler.setEndDocumentHandler(endDocumentHandler);
saxHandler.setStartElementHandler(startElementHandler);
saxHandler.setEndElementHandler(endElementHandler);
saxHandler.setCharactersHandler(charactersHandler);
saxHandler.setProcessingInstructionHandler(processingInstructionHandler);
saxHandler.setIgnorableWhitespaceHandler(ignorableWhitespaceHandler);
saxHandler.setStartPrefixMappingHandler(startPrefixMappingHandler);
saxHandler.setEndPrefixMappingHandler(endPrefixMappingHandler);
saxHandler.setSkippedEntityHandler(skippedEntityHandler);
xquery.executeSAX(saxHandler)
return
def startDocumentHandler():
print "Start Document"
return
def endDocumentHandler():
print "End Document"
return
def startElementHandler(URI, localName, QName, SAXAttributes):
print "Start Element - ", QName
return
def endElementHandler(URI, localName, QName):
print "End Element - ", QName
return
def charactersHandler(text):
print "Characters - ", text
return
def processingInstructionHandler(target, data):
print "Processing Instruction"
return
def ignorableWhitespaceHandler(text):
print "Ignorable Whitespace - ", text
return
def startPrefixMappingHandler(prefix, URI):
print "Start Prefix Mapping - ", prefix
return
def endPrefixMappingHandler(prefix):
print "End Prefix Mapping - ", prefix
return
def skippedEntityHandler(name):
print "Skipped Entity - ", name
return
store = zorba_api.InMemoryStore_getInstance()
zorba = zorba_api.Zorba_getInstance(store)
print "Running: XQuery execute - executeSAX"
test(zorba)
print "Success"
zorba.shutdown()
zorba_api.InMemoryStore_shutdown(store)
| 2,516 | 29.695122 | 92 | py |
zorba | zorba-master/swig/python/tests/test03.py | # Copyright 2006-2016 zorba.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '@pythonPath@')
import zorba_api
class MyDiagnosticHandler(zorba_api.DiagnosticHandler):
def error(self, *args):
print "Error args: ", args
def test(zorba):
diagnosticHandler = MyDiagnosticHandler()
try:
print "Compiling 1 div 0"
xquery = zorba.compileQuery("1 div 0", diagnosticHandler)
print xquery.execute()
except Exception, e:
print "Caught error: ", e
return
store = zorba_api.InMemoryStore_getInstance()
zorba = zorba_api.Zorba_getInstance(store)
print "Running: Compile query string using Diagnostic Handler"
test(zorba)
print "Success"
zorba.shutdown()
zorba_api.InMemoryStore_shutdown(store)
| 1,247 | 27.363636 | 74 | py |
zorba | zorba-master/swig/python/tests/test06.py | # Copyright 2006-2016 zorba.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '@pythonPath@')
import zorba_api
def test(zorba):
#Read and write result
print 'Executing: test06.xq'
f = open('test06.xq', 'r')
lines = f.read()
f.close()
xquery = zorba.compileQuery(lines)
result = xquery.printPlanAsDOT()
print result
return
store = zorba_api.InMemoryStore_getInstance()
zorba = zorba_api.Zorba_getInstance(store)
print "Running: XQuery execute - printPlanAsXML"
test(zorba)
print "Success"
zorba.shutdown()
zorba_api.InMemoryStore_shutdown(store)
| 1,100 | 24.604651 | 74 | py |
zorba | zorba-master/scripts/cmake.py | """
# Copyright 2006-2016 zorba.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Script to run cmake in the build directory of zorba in a MS Windows
platform. You may need to change the paths to the external libraries
for this script to work. Ths paths shown here are the default install
paths for the respective projects.
From the build directory, you can run this script like this
build> python ..\cmake.py
author: Paul F. Kunz <paulfkunz@gmail.com>
"""
s0 ="cmake "
s1 = '-G "Visual Studio 8 2005" '
s2 = r' -D ICU_LIBRARY="C:\icu\lib\icuuc.lib" '
s3 = r' -D ICU_DATA_LIBRARY="C:\icu\lib\icudt.lib" '
s4 = r' -D ICU_I18N_LIBRARY="C:\icu\lib\icuin.lib" '
s5 = r' -D ICU_INCLUDE="C:\icu\include" '
s6 = r' -D ICONV_INCLUDE_DIR="C:\iconv-1.9.2.win32\include" '
s7 = r' -D ICONV_LIBRARY="C:\iconv-1.9.2.win32\lib\iconv.lib" '
s8 = r' -D LIBXML2_INCLUDE_DIR="C:\libxml2-2.6.32+.win32\include" '
s9 = r' -D LIBXML2_LIBRARIES="C:\libxml2-2.6.32+.win32\lib\libxml2.lib" '
s10 = r' -D BOOST_ROOT="C:\boost" '
s11 = r' -D XERCESC_INCLUDE="C:\xerces-c_2_8_0-x86-windows-vc_8_0\include" '
s12 = r' -D XERCESC_LIBRARY="C:\xerces-c_2_8_0-x86-windows-vc_8_0\lib\xerces-c_2.lib" '
s13 = r' -D LIBTIDY_INCLUDE_DIR="C:\Tidy\include"'
s14 = r' -D LIBTIDY_LIBRARIES="C:\Tidy\build\msvc\releaseDLL\libtidy.lib"'
s15 = r' -D LIBCURL_INCLUDE_DIR="C:\libcurl-7.15.1-msvc-win32-ssl-0.9.8a-zlib-1.2.3\include"'
s16 = r' -D CURL_LIBRARY="C:\libcurl-7.15.1-msvc-win32-ssl-0.9.8a-zlib-1.2.3\libcurl_imp.lib"'
s17 = r" ..\ "
cmd = s0+s1+s2+s3+s4+s5+s6+s7+s8+s9+s10+s11+s12+s13+s14+s15+s16+s17
import os
os.system ( cmd )
| 2,105 | 38 | 94 | py |
DEAT | DEAT-main/preactresnet.py | '''Pre-activation ResNet in PyTorch.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv:1603.05027
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
track_running_stats=True
affine=True
normal_func = nn.BatchNorm2d
# track_running_stats=False
# affine=True
# normal_func = nn.InstanceNorm2d
if not track_running_stats:
print('BN track False')
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
expansion = 1
def __init__(self, in_planes, planes, stride=1, activation='ReLU', softplus_beta=1):
super(PreActBlock, self).__init__()
self.bn1 = normal_func(in_planes, track_running_stats=track_running_stats, affine=affine)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = normal_func(planes, track_running_stats=track_running_stats, affine=affine)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
if activation == 'ReLU':
self.relu = nn.ReLU(inplace=True)
print('ReLU')
elif activation == 'Softplus':
self.relu = nn.Softplus(beta=softplus_beta, threshold=20)
print('Softplus')
elif activation == 'GELU':
self.relu = nn.GELU()
print('GELU')
elif activation == 'ELU':
self.relu = nn.ELU(alpha=1.0, inplace=True)
print('ELU')
elif activation == 'LeakyReLU':
self.relu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
print('LeakyReLU')
elif activation == 'SELU':
self.relu = nn.SELU(inplace=True)
print('SELU')
elif activation == 'CELU':
self.relu = nn.CELU(alpha=1.2, inplace=True)
print('CELU')
elif activation == 'Tanh':
self.relu = nn.Tanh()
print('Tanh')
def forward(self, x):
out = self.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(self.relu(self.bn2(out)))
out += shortcut
return out
class PreActBottleneck(nn.Module):
'''Pre-activation version of the original Bottleneck module.'''
expansion = 4
def __init__(self, in_planes, planes, stride=1, activation='ReLU', softplus_beta=1):
super(PreActBottleneck, self).__init__()
self.bn1 = normal_func(in_planes, track_running_stats=track_running_stats, affine=affine)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = normal_func(planes, track_running_stats=track_running_stats, affine=affine)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = normal_func(planes, track_running_stats=track_running_stats, affine=affine)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, normalize = False, normalize_only_FN = False, scale = 15, activation='ReLU', softplus_beta=1):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.normalize = normalize
self.normalize_only_FN = normalize_only_FN
self.scale = scale
self.activation = activation
self.softplus_beta = softplus_beta
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.bn = normal_func(512 * block.expansion, track_running_stats=track_running_stats, affine=affine)
if self.normalize:
self.linear = nn.Linear(512*block.expansion, num_classes, bias=False)
else:
self.linear = nn.Linear(512*block.expansion, num_classes)
if activation == 'ReLU':
self.relu = nn.ReLU(inplace=True)
print('ReLU')
elif activation == 'Softplus':
self.relu = nn.Softplus(beta=softplus_beta, threshold=20)
print('Softplus')
elif activation == 'GELU':
self.relu = nn.GELU()
print('GELU')
elif activation == 'ELU':
self.relu = nn.ELU(alpha=1.0, inplace=True)
print('ELU')
elif activation == 'LeakyReLU':
self.relu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
print('LeakyReLU')
elif activation == 'SELU':
self.relu = nn.SELU(inplace=True)
print('SELU')
elif activation == 'CELU':
self.relu = nn.CELU(alpha=1.2, inplace=True)
print('CELU')
elif activation == 'Tanh':
self.relu = nn.Tanh()
print('Tanh')
print('Use activation of ' + activation)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride,
activation=self.activation, softplus_beta=self.softplus_beta))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.relu(self.bn(out))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
if self.normalize_only_FN:
out = F.normalize(out, p=2, dim=1)
if self.normalize:
out = F.normalize(out, p=2, dim=1) * self.scale
for _, module in self.linear.named_modules():
if isinstance(module, nn.Linear):
module.weight.data = F.normalize(module.weight, p=2, dim=1)
return self.linear(out)
def PreActResNet18(num_classes=10, normalize = False, normalize_only_FN = False, scale = 15, activation='ReLU', softplus_beta=1):
return PreActResNet(PreActBlock, [2,2,2,2], num_classes=num_classes, normalize = normalize
, normalize_only_FN = normalize_only_FN, scale = scale, activation=activation, softplus_beta=softplus_beta)
def PreActResNet34():
return PreActResNet(PreActBlock, [3,4,6,3])
def PreActResNet50():
return PreActResNet(PreActBottleneck, [3,4,6,3])
def PreActResNet101():
return PreActResNet(PreActBottleneck, [3,4,23,3])
def PreActResNet152():
return PreActResNet(PreActBottleneck, [3,8,36,3])
def test():
net = PreActResNet18()
y = net((torch.randn(1,3,32,32)))
print(y.size())
# test()
| 7,760 | 37.044118 | 152 | py |
DEAT | DEAT-main/utils.py | import numpy as np
from collections import namedtuple
import torch
from torch import nn
import torchvision
from torch.optim.optimizer import Optimizer, required
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
################################################################
## Components from https://github.com/davidcpage/cifar10-fast ##
################################################################
#####################
## data preprocessing
#####################
cifar10_mean = (0.4914, 0.4822, 0.4465) # equals np.mean(train_set.train_data, axis=(0,1,2))/255
cifar10_std = (0.2471, 0.2435, 0.2616) # equals np.std(train_set.train_data, axis=(0,1,2))/255
def pad(x, border=4):
return np.pad(x, [(0, 0), (border, border), (border, border), (0, 0)], mode='reflect')
def transpose(x, source='NHWC', target='NCHW'):
return x.transpose([source.index(d) for d in target])
#####################
## data augmentation
#####################
class Crop(namedtuple('Crop', ('h', 'w'))):
def __call__(self, x, x0, y0):
return x[:,y0:y0+self.h,x0:x0+self.w]
def options(self, x_shape):
C, H, W = x_shape
return {'x0': range(W+1-self.w), 'y0': range(H+1-self.h)}
def output_shape(self, x_shape):
C, H, W = x_shape
return (C, self.h, self.w)
class FlipLR(namedtuple('FlipLR', ())):
def __call__(self, x, choice):
return x[:, :, ::-1].copy() if choice else x
def options(self, x_shape):
return {'choice': [True, False]}
class Cutout(namedtuple('Cutout', ('h', 'w'))):
def __call__(self, x, x0, y0):
x = x.copy()
x[:,y0:y0+self.h,x0:x0+self.w].fill(0.0)
return x
def options(self, x_shape):
C, H, W = x_shape
return {'x0': range(W+1-self.w), 'y0': range(H+1-self.h)}
class Transform():
def __init__(self, dataset, transforms):
self.dataset, self.transforms = dataset, transforms
self.choices = None
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
data, labels = self.dataset[index]
for choices, f in zip(self.choices, self.transforms):
args = {k: v[index] for (k,v) in choices.items()}
data = f(data, **args)
return data, labels
def set_random_choices(self):
self.choices = []
x_shape = self.dataset[0][0].shape
N = len(self)
for t in self.transforms:
options = t.options(x_shape)
x_shape = t.output_shape(x_shape) if hasattr(t, 'output_shape') else x_shape
self.choices.append({k:np.random.choice(v, size=N) for (k,v) in options.items()})
#####################
## dataset
#####################
def cifar10(root):
train_set = torchvision.datasets.CIFAR10(root=root, train=True, download=True)
test_set = torchvision.datasets.CIFAR10(root=root, train=False, download=True)
return {
'train': {'data': train_set.data, 'labels': train_set.targets},
'test': {'data': test_set.data, 'labels': test_set.targets}
}
#####################
## data loading
#####################
class Batches():
def __init__(self, dataset, batch_size, shuffle, set_random_choices=False, num_workers=0, drop_last=False):
self.dataset = dataset
self.batch_size = batch_size
self.set_random_choices = set_random_choices
self.dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True, shuffle=shuffle, drop_last=drop_last
)
def __iter__(self):
if self.set_random_choices:
self.dataset.set_random_choices()
return ({'input': x.to(device).half(), 'target': y.to(device).long()} for (x,y) in self.dataloader)
def __len__(self):
return len(self.dataloader)
#####################
## new optimizer
#####################
class SGD_GCC(Optimizer):
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SGD_GCC, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD_GCC, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
#GC operation for Conv layers
if len(list(d_p.size()))>3:
d_p.add_(-d_p.mean(dim = tuple(range(1,len(list(d_p.size())))), keepdim = True))
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(-group['lr'], d_p)
return loss
class SGD_GC(Optimizer):
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SGD_GC, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD_GC, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
#GC operation for Conv layers and FC layers
if len(list(d_p.size()))>1:
d_p.add_(-d_p.mean(dim = tuple(range(1,len(list(d_p.size())))), keepdim = True))
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(-group['lr'], d_p)
return loss
| 9,103 | 34.84252 | 122 | py |
DEAT | DEAT-main/train_cifar_DEAT.py | import argparse
import logging
import sys
import time
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from Positive_Negative_Momentum.pnm_optim import *
import os
from wideresnet import WideResNet
from preactresnet import PreActResNet18, PreActResNet50
from models import *
from utils import *
mu = torch.tensor(cifar10_mean).view(3,1,1).cuda()
std = torch.tensor(cifar10_std).view(3,1,1).cuda()
def normalize(X):
return (X - mu)/std
upper_limit, lower_limit = 1,0
def clamp(X, lower_limit, upper_limit):
return torch.max(torch.min(X, upper_limit), lower_limit)
class LabelSmoothingLoss(nn.Module):
def __init__(self, classes=10, smoothing=0.0, dim=-1):
super(LabelSmoothingLoss, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.cls = classes
self.dim = dim
def forward(self, pred, target):
pred = pred.log_softmax(dim=self.dim)
with torch.no_grad():
# true_dist = pred.data.clone()
true_dist = torch.zeros_like(pred)
true_dist.fill_(self.smoothing / (self.cls - 1))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))
class Batches():
def __init__(self, dataset, batch_size, shuffle, set_random_choices=False, num_workers=0, drop_last=False):
self.dataset = dataset
self.batch_size = batch_size
self.set_random_choices = set_random_choices
self.dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True, shuffle=shuffle, drop_last=drop_last
)
def __iter__(self):
if self.set_random_choices:
self.dataset.set_random_choices()
return ({'input': x.to(device).float(), 'target': y.to(device).long()} for (x,y) in self.dataloader)
def __len__(self):
return len(self.dataloader)
def mixup_data(x, y, alpha=1.0):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
index = torch.randperm(batch_size).cuda()
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
def dlr_loss(x, y):
x_sorted, ind_sorted = x.sort(dim=1)
ind = (ind_sorted[:, -1] == y).float()
loss_value = -(x[np.arange(x.shape[0]), y] - x_sorted[:, -2] * ind - x_sorted[:, -1]
* (1. - ind)) / (x_sorted[:, -1] - x_sorted[:, -3] + 1e-12)
return loss_value.mean()
def CW_loss(x, y):
x_sorted, ind_sorted = x.sort(dim=1)
ind = (ind_sorted[:, -1] == y).float()
loss_value = -(x[np.arange(x.shape[0]), y] - x_sorted[:, -2] * ind - x_sorted[:, -1] * (1. - ind))
return loss_value.mean()
def attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts,
norm, mixup=False, y_a=None, y_b=None, lam=None,
early_stop=False, early_stop_pgd_max=1,
multitarget=False,
use_DLRloss=False, use_CWloss=False,
epoch=0, totalepoch=110, gamma=0.8,
use_adaptive=False, s_HE=15,
fast_better=False, BNeval=False):
max_loss = torch.zeros(y.shape[0]).cuda()
max_delta = torch.zeros_like(X).cuda()
if BNeval:
model.eval()
for _ in range(restarts):
# early stop pgd counter for each x
early_stop_pgd_count = early_stop_pgd_max * torch.ones(y.shape[0], dtype=torch.int32).cuda()
# initialize perturbation
delta = torch.zeros_like(X).cuda()
if norm == "l_inf":
delta.uniform_(-epsilon, epsilon)
elif norm == "l_2":
delta.normal_()
d_flat = delta.view(delta.size(0),-1)
n = d_flat.norm(p=2,dim=1).view(delta.size(0),1,1,1)
r = torch.zeros_like(n).uniform_(0, 1)
delta *= r/n*epsilon
else:
raise ValueError
delta = clamp(delta, lower_limit-X, upper_limit-X)
delta.requires_grad = True
iter_count = torch.zeros(y.shape[0])
# craft adversarial examples
for _ in range(attack_iters):
output = model(normalize(X + delta))
# if use early stop pgd
if early_stop:
# calculate mask for early stop pgd
if_success_fool = (output.max(1)[1] != y).to(dtype=torch.int32)
early_stop_pgd_count = early_stop_pgd_count - if_success_fool
index = torch.where(early_stop_pgd_count > 0)[0]
iter_count[index] = iter_count[index] + 1
else:
index = slice(None,None,None)
if not isinstance(index, slice) and len(index) == 0:
break
# Whether use mixup criterion
if fast_better:
loss_ori = F.cross_entropy(output, y)
grad_ori = torch.autograd.grad(loss_ori, delta, create_graph=True)[0]
loss_grad = (alpha / 4.) * (torch.norm(grad_ori.view(grad_ori.shape[0], -1), p=2, dim=1) ** 2)
loss = loss_ori + loss_grad.mean()
loss.backward()
grad = delta.grad.detach()
elif not mixup:
if multitarget:
random_label = torch.randint(low=0, high=10, size=y.shape).cuda()
random_direction = 2*((random_label == y).to(dtype=torch.float32) - 0.5)
loss = torch.mean(random_direction * F.cross_entropy(output, random_label, reduction='none'))
loss.backward()
grad = delta.grad.detach()
elif use_DLRloss:
beta_ = gamma * epoch / totalepoch
loss = (1. - beta_) * F.cross_entropy(output, y) + beta_ * dlr_loss(output, y)
loss.backward()
grad = delta.grad.detach()
elif use_CWloss:
beta_ = gamma * epoch / totalepoch
loss = (1. - beta_) * F.cross_entropy(output, y) + beta_ * CW_loss(output, y)
loss.backward()
grad = delta.grad.detach()
else:
if use_adaptive:
loss = F.cross_entropy(s_HE * output, y)
else:
loss = F.cross_entropy(output, y)
loss.backward()
grad = delta.grad.detach()
else:
criterion = nn.CrossEntropyLoss()
loss = mixup_criterion(criterion, model(normalize(X+delta)), y_a, y_b, lam)
loss.backward()
grad = delta.grad.detach()
d = delta[index, :, :, :]
g = grad[index, :, :, :]
x = X[index, :, :, :]
if norm == "l_inf":
d = torch.clamp(d + alpha * torch.sign(g), min=-epsilon, max=epsilon)
elif norm == "l_2":
g_norm = torch.norm(g.view(g.shape[0],-1),dim=1).view(-1,1,1,1)
scaled_g = g/(g_norm + 1e-10)
d = (d + scaled_g*alpha).view(d.size(0),-1).renorm(p=2,dim=0,maxnorm=epsilon).view_as(d)
d = clamp(d, lower_limit - x, upper_limit - x)
delta.data[index, :, :, :] = d
delta.grad.zero_()
if mixup:
criterion = nn.CrossEntropyLoss(reduction='none')
all_loss = mixup_criterion(criterion, model(normalize(X+delta)), y_a, y_b, lam)
else:
all_loss = F.cross_entropy(model(normalize(X+delta)), y, reduction='none')
max_delta[all_loss >= max_loss] = delta.detach()[all_loss >= max_loss]
max_loss = torch.max(max_loss, all_loss)
if BNeval:
model.train()
return max_delta, iter_count
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='PreActResNet18')
parser.add_argument('--l1', default=0, type=float)
parser.add_argument('--data-dir', default='../cifar-data', type=str)
parser.add_argument('--epochs', default=110, type=int)
parser.add_argument('--lr-schedule', default='piecewise', choices=['superconverge', 'piecewise', 'linear', 'piecewisesmoothed', 'piecewisezoom', 'onedrop', 'multipledecay', 'cosine', 'cyclic'])
parser.add_argument('--lr-max', default=0.1, type=float)
parser.add_argument('--lr-one-drop', default=0.01, type=float)
parser.add_argument('--lr-drop-epoch', default=100, type=int)
parser.add_argument('--attack', default='pgd', type=str, choices=['pgd', 'fgsm', 'free', 'none'])
parser.add_argument('--epsilon', default=8, type=int)
parser.add_argument('--test_epsilon', default=8, type=int)
parser.add_argument('--attack-iters', default=10, type=int)
parser.add_argument('--restarts', default=1, type=int)
parser.add_argument('--pgd-alpha', default=2, type=float)
parser.add_argument('--test-pgd-alpha', default=2, type=float)
parser.add_argument('--fgsm-alpha', default=1.25, type=float)
parser.add_argument('--norm', default='l_inf', type=str, choices=['l_inf', 'l_2'])
parser.add_argument('--fgsm-init', default='random', choices=['zero', 'random', 'previous'])
parser.add_argument('--fname', default='cifar_model', type=str)
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--half', action='store_true')
parser.add_argument('--width-factor', default=10, type=int)
parser.add_argument('--resume', default=0, type=int)
parser.add_argument('--eval', action='store_true')
parser.add_argument('--val', action='store_true')
parser.add_argument('--chkpt-iters', default=100, type=int)
parser.add_argument('--mixture', action='store_true') # whether use mixture of clean and adv examples in a mini-batch
parser.add_argument('--mixture_alpha', type=float)
parser.add_argument('--l2', default=0, type=float)
# Group 1
parser.add_argument('--earlystopPGD', action='store_true') # whether use early stop in PGD
parser.add_argument('--earlystopPGDepoch1', default=60, type=int)
parser.add_argument('--earlystopPGDepoch2', default=100, type=int)
parser.add_argument('--warmup_lr', action='store_true') # whether warm_up lr from 0 to max_lr in the first n epochs
parser.add_argument('--warmup_lr_epoch', default=15, type=int)
parser.add_argument('--weight_decay', default=5e-4, type=float)#weight decay
parser.add_argument('--warmup_eps', action='store_true') # whether warm_up eps from 0 to 8/255 in the first n epochs
parser.add_argument('--warmup_eps_epoch', default=15, type=int)
parser.add_argument('--batch-size', default=128, type=int) #batch size
parser.add_argument('--labelsmooth', action='store_true') # whether use label smoothing
parser.add_argument('--labelsmoothvalue', default=0.0, type=float)
parser.add_argument('--lrdecay', default='base', type=str, choices=['intenselr', 'base', 'looselr', 'lineardecay'])
# Group 2
parser.add_argument('--use_DLRloss', action='store_true') # whether use DLRloss
parser.add_argument('--use_CWloss', action='store_true') # whether use CWloss
parser.add_argument('--use_multitarget', action='store_true') # whether use multitarget
parser.add_argument('--use_stronger_adv', action='store_true') # whether use mixture of clean and adv examples in a mini-batch
parser.add_argument('--stronger_index', default=0, type=int)
parser.add_argument('--use_FNandWN', action='store_true') # whether use FN and WN
parser.add_argument('--use_adaptive', action='store_true') # whether use s in attack during training
parser.add_argument('--s_FN', default=15, type=float) # s in FN
parser.add_argument('--m_FN', default=0.2, type=float) # s in FN
parser.add_argument('--use_FNonly', action='store_true') # whether use FN only
parser.add_argument('--fast_better', action='store_true')
parser.add_argument('--BNeval', action='store_true') # whether use eval mode for BN when crafting adversarial examples
parser.add_argument('--focalloss', action='store_true') # whether use focalloss
parser.add_argument('--focallosslambda', default=2., type=float)
parser.add_argument('--activation', default='ReLU', type=str)
parser.add_argument('--softplus_beta', default=1., type=float)
parser.add_argument('--optimizer', default='momentum', choices=['momentum', 'SGD', 'Nesterov', 'SGD_GC', 'SGD_GCC', 'Adam', 'AdamW', 'PNM', 'AdaPNM'])
parser.add_argument('--mixup', action='store_true')
parser.add_argument('--mixup-alpha', type=float)
parser.add_argument('--cutout', action='store_true')
parser.add_argument('--cutout-len', type=int)
return parser.parse_args()
def get_auto_fname(args):
names = args.model + '_' + args.lr_schedule + '_eps' + str(args.epsilon) + '_bs' + str(args.batch_size) + '_maxlr' + str(args.lr_max)
# Group 1
if args.earlystopPGD:
names = names + '_earlystopPGD' + str(args.earlystopPGDepoch1) + str(args.earlystopPGDepoch2)
if args.warmup_lr:
names = names + '_warmuplr' + str(args.warmup_lr_epoch)
if args.warmup_eps:
names = names + '_warmupeps' + str(args.warmup_eps_epoch)
if args.weight_decay != 5e-4:
names = names + '_wd' + str(args.weight_decay)
if args.labelsmooth:
names = names + '_ls' + str(args.labelsmoothvalue)
# Group 2
if args.use_stronger_adv:
names = names + '_usestrongeradv#' + str(args.stronger_index)
if args.use_multitarget:
names = names + '_usemultitarget'
if args.use_DLRloss:
names = names + '_useDLRloss'
if args.use_CWloss:
names = names + '_useCWloss'
if args.use_FNandWN:
names = names + '_HE' + 's' + str(args.s_FN) + 'm' + str(args.m_FN)
if args.use_adaptive:
names = names + 'adaptive'
if args.use_FNonly:
names = names + '_FNonly'
if args.fast_better:
names = names + '_fastbetter'
if args.activation != 'ReLU':
names = names + '_' + args.activation
if args.activation == 'Softplus':
names = names + str(args.softplus_beta)
if args.lrdecay != 'base':
names = names + '_' + args.lrdecay
if args.BNeval:
names = names + '_BNeval'
if args.focalloss:
names = names + '_focalloss' + str(args.focallosslambda)
if args.optimizer != 'momentum':
names = names + '_' + args.optimizer
if args.mixup:
names = names + '_mixup' + str(args.mixup_alpha)
if args.cutout:
names = names + '_cutout' + str(args.cutout_len)
if args.attack != 'pgd':
names = names + '_' + args.attack
print('File name: ', names)
return names
def main():
args = get_args()
if args.fname == 'auto':
names = get_auto_fname(args)
args.fname = 'trained_models/' + names
else:
args.fname = 'trained_models/' + args.fname
if not os.path.exists(args.fname):
os.makedirs(args.fname)
logger = logging.getLogger(__name__)
logging.basicConfig(
format='[%(asctime)s] - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.DEBUG,
handlers=[
logging.FileHandler(os.path.join(args.fname, 'eval.log' if args.eval else 'output.log')),
logging.StreamHandler()
])
logger.info(args)
# Set seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# Prepare data
transforms = [Crop(32, 32), FlipLR()]
if args.cutout:
transforms.append(Cutout(args.cutout_len, args.cutout_len))
if args.val:
try:
dataset = torch.load("cifar10_validation_split.pth")
except:
print("Couldn't find a dataset with a validation split, did you run "
"generate_validation.py?")
return
val_set = list(zip(transpose(dataset['val']['data']/255.), dataset['val']['labels']))
val_batches = Batches(val_set, args.batch_size, shuffle=False, num_workers=4)
else:
dataset = cifar10(args.data_dir)
train_set = list(zip(transpose(pad(dataset['train']['data'], 4)/255.),
dataset['train']['labels']))
train_set_x = Transform(train_set, transforms)
train_batches = Batches(train_set_x, args.batch_size, shuffle=True, set_random_choices=True, num_workers=4)
test_set = list(zip(transpose(dataset['test']['data']/255.), dataset['test']['labels']))
test_batches = Batches(test_set, args.batch_size, shuffle=False, num_workers=4)
# Set perturbations
epsilon = (args.epsilon / 255.)
test_epsilon = (args.test_epsilon / 255.)
pgd_alpha = (args.pgd_alpha / 255.)
test_pgd_alpha = (args.test_pgd_alpha / 255.)
# Set models
if args.model == 'VGG':
model = VGG('VGG19')
elif args.model == 'ResNet18':
model = ResNet18()
elif args.model == 'GoogLeNet':
model = GoogLeNet()
elif args.model == 'DenseNet121':
model = DenseNet121()
elif args.model == 'DenseNet201':
model = DenseNet201()
elif args.model == 'ResNeXt29':
model = ResNeXt29_2x64d()
elif args.model == 'ResNeXt29L':
model = ResNeXt29_32x4d()
elif args.model == 'MobileNet':
model = MobileNet()
elif args.model == 'MobileNetV2':
model = MobileNetV2()
elif args.model == 'DPN26':
model = DPN26()
elif args.model == 'DPN92':
model = DPN92()
elif args.model == 'ShuffleNetG2':
model = ShuffleNetG2()
elif args.model == 'SENet18':
model = SENet18()
elif args.model == 'ShuffleNetV2':
model = ShuffleNetV2(1)
elif args.model == 'EfficientNetB0':
model = EfficientNetB0()
elif args.model == 'PNASNetA':
model = PNASNetA()
elif args.model == 'RegNetX':
model = RegNetX_200MF()
elif args.model == 'RegNetLX':
model = RegNetX_400MF()
elif args.model == 'PreActResNet50':
model = PreActResNet50()
elif args.model == 'PreActResNet18':
model = PreActResNet18(normalize_only_FN=args.use_FNonly, normalize=args.use_FNandWN, scale=args.s_FN,
activation=args.activation, softplus_beta=args.softplus_beta)
elif args.model == 'WideResNet':
model = WideResNet(34, 10, widen_factor=10, dropRate=0.0, normalize=args.use_FNandWN,
activation=args.activation, softplus_beta=args.softplus_beta)
elif args.model == 'WideResNet_20':
model = WideResNet(34, 10, widen_factor=20, dropRate=0.0, normalize=args.use_FNandWN,
activation=args.activation, softplus_beta=args.softplus_beta)
else:
raise ValueError("Unknown model")
model = nn.DataParallel(model).cuda()
model.train()
# Set training hyperparameters
if args.l2:
decay, no_decay = [], []
for name,param in model.named_parameters():
if 'bn' not in name and 'bias' not in name:
decay.append(param)
else:
no_decay.append(param)
params = [{'params':decay, 'weight_decay':args.l2},
{'params':no_decay, 'weight_decay': 0 }]
else:
params = model.parameters()
if args.lr_schedule == 'cyclic':
opt = torch.optim.Adam(params, lr=args.lr_max, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay)
else:
if args.optimizer == 'momentum':
opt = torch.optim.SGD(params, lr=args.lr_max, momentum=0.9, weight_decay=args.weight_decay)
elif args.optimizer == 'SGD':
opt = torch.optim.SGD(params, lr=args.lr_max, momentum=0, weight_decay=args.weight_decay)
elif args.optimizer == 'Nesterov':
opt = torch.optim.SGD(params, lr=args.lr_max, momentum=0.9, weight_decay=args.weight_decay, nesterov=True)
elif args.optimizer == 'SGD_GC':
opt = SGD_GC(params, lr=args.lr_max, momentum=0.9, weight_decay=args.weight_decay)
elif args.optimizer == 'SGD_GCC':
opt = SGD_GCC(params, lr=args.lr_max, momentum=0.9, weight_decay=args.weight_decay)
elif args.optimizer == 'Adam':
opt = torch.optim.Adam(params, lr=args.lr_max, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay)
elif args.optimizer == 'AdamW':
opt = torch.optim.AdamW(params, lr=args.lr_max, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay)
elif args.optimizer == 'PNM':
opt = PNM(params, lr=args.lr_max, betas=(0.9, 1.), weight_decay=args.weight_decay)
elif args.optimizer == 'AdaPNM':
opt = AdaPNM(params, lr=args.lr_max, betas=(0.9, 0.999, 1.), eps=1e-08, weight_decay=args.weight_decay)
# Cross-entropy (mean)
if args.labelsmooth:
criterion = LabelSmoothingLoss(smoothing=args.labelsmoothvalue)
else:
criterion = nn.CrossEntropyLoss()
# If we use freeAT or fastAT with previous init
if args.attack == 'free':
delta = torch.zeros(args.batch_size, 3, 32, 32).cuda()
delta.requires_grad = True
elif args.attack == 'fgsm' and args.fgsm_init == 'previous':
delta = torch.zeros(args.batch_size, 3, 32, 32).cuda()
delta.requires_grad = True
if args.attack == 'free':
epochs = int(math.ceil(args.epochs / args.attack_iters))
else:
epochs = args.epochs
# Set lr schedule
if args.lr_schedule == 'superconverge':
lr_schedule = lambda t: np.interp([t], [0, args.epochs * 2 // 5, args.epochs], [0, args.lr_max, 0])[0]
elif args.lr_schedule == 'piecewise':
def lr_schedule(t, warm_up_lr = args.warmup_lr):
if t < 100:
if warm_up_lr and t < args.warmup_lr_epoch:
return (t + 1.) / args.warmup_lr_epoch * args.lr_max
else:
return args.lr_max
if args.lrdecay == 'lineardecay':
if t < 105:
return args.lr_max * 0.02 * (105 - t)
else:
return 0.
elif args.lrdecay == 'intenselr':
if t < 102:
return args.lr_max / 10.
else:
return args.lr_max / 100.
elif args.lrdecay == 'looselr':
if t < 150:
return args.lr_max / 10.
else:
return args.lr_max / 100.
elif args.lrdecay == 'base':
if t < 105:
return args.lr_max / 10.
else:
return args.lr_max / 100.
elif args.lr_schedule == 'linear':
lr_schedule = lambda t: np.interp([t], [0, args.epochs // 3, args.epochs * 2 // 3, args.epochs], [args.lr_max, args.lr_max, args.lr_max / 10, args.lr_max / 100])[0]
elif args.lr_schedule == 'onedrop':
def lr_schedule(t):
if t < args.lr_drop_epoch:
return args.lr_max
else:
return args.lr_one_drop
elif args.lr_schedule == 'multipledecay':
def lr_schedule(t):
return args.lr_max - (t//(args.epochs//10))*(args.lr_max/10)
elif args.lr_schedule == 'cosine':
def lr_schedule(t):
return args.lr_max * 0.5 * (1 + np.cos(t / args.epochs * np.pi))
elif args.lr_schedule == 'cyclic':
def lr_schedule(t, stepsize=18, min_lr=1e-5, max_lr=args.lr_max):
# Scaler: we can adapt this if we do not want the triangular CLR
scaler = lambda x: 1.
# Additional function to see where on the cycle we are
cycle = math.floor(1 + t / (2 * stepsize))
x = abs(t / stepsize - 2 * cycle + 1)
relative = max(0, (1 - x)) * scaler(cycle)
return min_lr + (max_lr - min_lr) * relative
#### Set stronger adv attacks when decay the lr ####
def eps_alpha_schedule(t, warm_up_eps = args.warmup_eps, if_use_stronger_adv=args.use_stronger_adv, stronger_index=args.stronger_index): # Schedule number 0
if stronger_index == 0:
epsilon_s = [epsilon * 1.5, epsilon * 2]
pgd_alpha_s = [pgd_alpha, pgd_alpha]
elif stronger_index == 1:
epsilon_s = [epsilon * 1.5, epsilon * 2]
pgd_alpha_s = [pgd_alpha * 1.25, pgd_alpha * 1.5]
elif stronger_index == 2:
epsilon_s = [epsilon * 2, epsilon * 2.5]
pgd_alpha_s = [pgd_alpha * 1.5, pgd_alpha * 2]
else:
print('Undefined stronger index')
if if_use_stronger_adv:
if t < 100:
if t < args.warmup_eps_epoch and warm_up_eps:
return (t + 1.) / args.warmup_eps_epoch * epsilon, pgd_alpha, args.restarts
else:
return epsilon, pgd_alpha, args.restarts
elif t < 105:
return epsilon_s[0], pgd_alpha_s[0], args.restarts
else:
return epsilon_s[1], pgd_alpha_s[1], args.restarts
else:
if t < args.warmup_eps_epoch and warm_up_eps:
return (t + 1.) / args.warmup_eps_epoch * epsilon, pgd_alpha, args.restarts
else:
return epsilon, pgd_alpha, args.restarts
#### Set the counter for the early stop of PGD ####
def early_stop_counter_schedule(t):
if t < args.earlystopPGDepoch1:
return 1
elif t < args.earlystopPGDepoch2:
return 2
else:
return 3
best_test_robust_acc = 0
best_val_robust_acc = 0
if args.resume:
start_epoch = args.resume
model.load_state_dict(torch.load(os.path.join(args.fname, f'model_{start_epoch-1}.pth')))
opt.load_state_dict(torch.load(os.path.join(args.fname, f'opt_{start_epoch-1}.pth')))
logger.info(f'Resuming at epoch {start_epoch}')
best_test_robust_acc = torch.load(os.path.join(args.fname, f'model_best.pth'))['test_robust_acc']
if args.val:
best_val_robust_acc = torch.load(os.path.join(args.fname, f'model_val.pth'))['val_robust_acc']
else:
start_epoch = 0
if args.eval:
if not args.resume:
logger.info("No model loaded to evaluate, specify with --resume FNAME")
return
logger.info("[Evaluation mode]")
# logger.info('Epoch \t Train Time \t Test Time \t LR \t Train Loss \t Train Grad \t Train Acc \t Train Robust Loss \t Train Robust Acc || \t Test Loss \t Test Acc \t Test Robust Loss \t Test Robust Acc')
logger.info('Epoch \t Train Acc \t Train Robust Acc \t Test Acc \t Test Robust Acc')
# Records per epoch for savetxt
train_loss_record = []
train_acc_record = []
train_robust_loss_record = []
train_robust_acc_record = []
train_grad_record = []
train_time_record = []
test_loss_record = []
test_acc_record = []
test_robust_loss_record = []
test_robust_acc_record = []
test_grad_record = []
test_time_record = []
for epoch in range(start_epoch, epochs):
model.train()
start_time = time.time()
train_loss = 0
train_acc = 0
train_robust_loss = 0
train_robust_acc = 0
train_n = 0
train_grad = 0
record_iter = torch.tensor([])
for i, batch in enumerate(train_batches):
if args.eval:
break
X, y = batch['input'], batch['target']
onehot_target_withmargin_HE = args.m_FN * args.s_FN * torch.nn.functional.one_hot(y, num_classes=10)
if args.mixup:
X, y_a, y_b, lam = mixup_data(X, y, args.mixup_alpha)
X, y_a, y_b = map(Variable, (X, y_a, y_b))
epoch_now = epoch + (i + 1) / len(train_batches)
lr = lr_schedule(epoch_now)
opt.param_groups[0].update(lr=lr)
if args.attack == 'pgd':
# Random initialization
epsilon_sche, pgd_alpha_sche, restarts_sche = eps_alpha_schedule(epoch_now)
early_counter_max = early_stop_counter_schedule(epoch_now)
if args.mixup:
delta, iter_counts = attack_pgd(model, X, y, epsilon_sche, pgd_alpha_sche, args.attack_iters, restarts_sche, args.norm,
early_stop=args.earlystopPGD, early_stop_pgd_max=early_counter_max,
mixup=True, y_a=y_a, y_b=y_b, lam=lam)
else:
delta, iter_counts = attack_pgd(model, X, y, epsilon_sche, pgd_alpha_sche, args.attack_iters, restarts_sche, args.norm,
early_stop=args.earlystopPGD, early_stop_pgd_max=early_counter_max, multitarget=args.use_multitarget,
use_DLRloss=args.use_DLRloss, use_CWloss=args.use_CWloss,
epoch=epoch_now, totalepoch=args.epochs, gamma=0.8,
use_adaptive=args.use_adaptive, s_HE=args.s_FN,
fast_better=args.fast_better, BNeval=args.BNeval)
record_iter = torch.cat((record_iter, iter_counts))
delta = delta.detach()
elif args.attack == 'fgsm':
delta,_ = attack_pgd(model, X, y, epsilon, args.fgsm_alpha*epsilon, 1, 1, args.norm, fast_better=args.fast_better)
delta = delta.detach()
# Standard training
elif args.attack == 'none':
delta = torch.zeros_like(X)
adv_input = normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit))
adv_input.requires_grad = True
robust_output = model(adv_input)
# Training losses
if args.mixup:
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
robust_loss = mixup_criterion(criterion, robust_output, y_a, y_b, lam)
elif args.mixture:
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
robust_loss = args.mixture_alpha * criterion(robust_output, y) + (1-args.mixture_alpha) * criterion(output, y)
else:
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
if args.focalloss:
criterion_nonreduct = nn.CrossEntropyLoss(reduction='none')
robust_confidence = F.softmax(robust_output, dim=1)[:, y].detach()
robust_loss = (criterion_nonreduct(robust_output, y) * ((1. - robust_confidence) ** args.focallosslambda)).mean()
elif args.use_DLRloss:
beta_ = 0.8 * epoch_now / args.epochs
robust_loss = (1. - beta_) * F.cross_entropy(robust_output, y) + beta_ * dlr_loss(robust_output, y)
elif args.use_CWloss:
beta_ = 0.8 * epoch_now / args.epochs
robust_loss = (1. - beta_) * F.cross_entropy(robust_output, y) + beta_ * CW_loss(robust_output, y)
elif args.use_FNandWN:
#print('use FN and WN with margin')
robust_loss = criterion(args.s_FN * robust_output - onehot_target_withmargin_HE, y)
else:
robust_loss = criterion(robust_output, y)
if args.l1:
for name,param in model.named_parameters():
if 'bn' not in name and 'bias' not in name:
robust_loss += args.l1*param.abs().sum()
opt.zero_grad()
robust_loss.backward()
opt.step()
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
if args.mixup:
loss = mixup_criterion(criterion, output, y_a, y_b, lam)
else:
loss = criterion(output, y)
# Get the gradient norm values
input_grads = torch.autograd.grad(loss, clean_input, create_graph=False)[0]
# Record the statstic values
train_robust_loss += robust_loss.item() * y.size(0)
train_robust_acc += (robust_output.max(1)[1] == y).sum().item()
train_loss += loss.item() * y.size(0)
train_acc += (output.max(1)[1] == y).sum().item()
train_n += y.size(0)
train_grad += input_grads.abs().sum()
train_time = time.time()
if args.earlystopPGD:
print('Iter mean: ', record_iter.mean().item(), ' Iter std: ', record_iter.std().item())
print('Learning rate: ', lr)
#print('Eps: ', epsilon_sche)
# Evaluate on test data
model.eval()
test_loss = 0
test_acc = 0
test_robust_loss = 0
test_robust_acc = 0
test_n = 0
test_grad = 0
for i, batch in enumerate(test_batches):
X, y = batch['input'], batch['target']
# Random initialization
if args.attack == 'none':
delta = torch.zeros_like(X)
else:
delta, _ = attack_pgd(model, X, y, test_epsilon, test_pgd_alpha, args.attack_iters, args.restarts, args.norm, early_stop=False)
delta = delta.detach()
adv_input = normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit))
adv_input.requires_grad = True
robust_output = model(adv_input)
robust_loss = criterion(robust_output, y)
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
loss = criterion(output, y)
# Get the gradient norm values
input_grads = torch.autograd.grad(loss, clean_input, create_graph=False)[0]
test_robust_loss += robust_loss.item() * y.size(0)
test_robust_acc += (robust_output.max(1)[1] == y).sum().item()
test_loss += loss.item() * y.size(0)
test_acc += (output.max(1)[1] == y).sum().item()
test_n += y.size(0)
test_grad += input_grads.abs().sum()
test_time = time.time()
if args.val:
val_loss = 0
val_acc = 0
val_robust_loss = 0
val_robust_acc = 0
val_n = 0
for i, batch in enumerate(val_batches):
X, y = batch['input'], batch['target']
# Random initialization
if args.attack == 'none':
delta = torch.zeros_like(X)
else:
delta, _ = attack_pgd(model, X, y, test_epsilon, pgd_alpha, args.attack_iters, args.restarts, args.norm, early_stop=False)
delta = delta.detach()
robust_output = model(normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit)))
robust_loss = criterion(robust_output, y)
output = model(normalize(X))
loss = criterion(output, y)
val_robust_loss += robust_loss.item() * y.size(0)
val_robust_acc += (robust_output.max(1)[1] == y).sum().item()
val_loss += loss.item() * y.size(0)
val_acc += (output.max(1)[1] == y).sum().item()
val_n += y.size(0)
if not args.eval:
# logger.info('%d \t %.1f \t %.1f \t %.4f \t %.4f \t %.4f \t %.4f \t %.4f \t %.4f \t %.4f %.4f \t %.4f \t %.4f',
# epoch, train_time - start_time, test_time - train_time, lr,
# train_loss/train_n, train_grad/train_n, train_acc/train_n, train_robust_loss/train_n, train_robust_acc/train_n,
# test_loss/test_n, test_acc/test_n, test_robust_loss/test_n, test_robust_acc/test_n)
logger.info('%d \t %.4f \t %.4f \t %.4f \t %.4f',
epoch, train_acc/train_n, train_robust_acc/train_n, test_acc/test_n, test_robust_acc/test_n)
# Save results
train_loss_record.append(train_loss/train_n)
train_acc_record.append(train_acc/train_n)
train_robust_loss_record.append(train_robust_loss/train_n)
train_robust_acc_record.append(train_robust_acc/train_n)
train_grad_record.append(train_grad/train_n)
train_time_record.append(train_time - start_time)
np.savetxt(args.fname+'/train_loss_record.txt', np.array(train_loss_record))
np.savetxt(args.fname+'/train_acc_record.txt', np.array(train_acc_record))
np.savetxt(args.fname+'/train_robust_loss_record.txt', np.array(train_robust_loss_record))
np.savetxt(args.fname+'/train_robust_acc_record.txt', np.array(train_robust_acc_record))
np.savetxt(args.fname+'/train_grad_record.txt', np.array(train_grad_record))
np.savetxt(args.fname+'/train_time_record.txt', np.array(train_time_record))
test_loss_record.append(test_loss/train_n)
test_acc_record.append(test_acc/train_n)
test_robust_loss_record.append(test_robust_loss/train_n)
test_robust_acc_record.append(test_robust_acc/train_n)
test_grad_record.append(test_grad/train_n)
test_time_record.append(test_time - train_time)
np.savetxt(args.fname+'/test_loss_record.txt', np.array(test_loss_record))
np.savetxt(args.fname+'/test_acc_record.txt', np.array(test_acc_record))
np.savetxt(args.fname+'/test_robust_loss_record.txt', np.array(test_robust_loss_record))
np.savetxt(args.fname+'/test_robust_acc_record.txt', np.array(test_robust_acc_record))
np.savetxt(args.fname+'/test_grad_record.txt', np.array(test_grad_record))
np.savetxt(args.fname+'/test_time_record.txt', np.array(test_time_record))
if args.val:
logger.info('validation %.4f \t %.4f \t %.4f \t %.4f',
val_loss/val_n, val_acc/val_n, val_robust_loss/val_n, val_robust_acc/val_n)
if val_robust_acc/val_n > best_val_robust_acc:
torch.save({
'state_dict':model.state_dict(),
'test_robust_acc':test_robust_acc/test_n,
'test_robust_loss':test_robust_loss/test_n,
'test_loss':test_loss/test_n,
'test_acc':test_acc/test_n,
'val_robust_acc':val_robust_acc/val_n,
'val_robust_loss':val_robust_loss/val_n,
'val_loss':val_loss/val_n,
'val_acc':val_acc/val_n,
}, os.path.join(args.fname, f'model_val.pth'))
best_val_robust_acc = val_robust_acc/val_n
# save checkpoint
# if epoch > 99 or (epoch+1) % args.chkpt_iters == 0 or epoch+1 == epochs:
# torch.save(model.state_dict(), os.path.join(args.fname, f'model_{epoch}.pth'))
# torch.save(opt.state_dict(), os.path.join(args.fname, f'opt_{epoch}.pth'))
# save best
if test_robust_acc/test_n > best_test_robust_acc:
torch.save({
'state_dict':model.state_dict(),
'test_robust_acc':test_robust_acc/test_n,
'test_robust_loss':test_robust_loss/test_n,
'test_loss':test_loss/test_n,
'test_acc':test_acc/test_n,
}, os.path.join(args.fname, f'model_best.pth'))
best_test_robust_acc = test_robust_acc/test_n
else:
logger.info('%d \t %.1f \t \t %.1f \t \t %.4f \t %.4f \t %.4f \t %.4f \t \t %.4f \t \t %.4f \t %.4f \t %.4f \t \t %.4f',
epoch, train_time - start_time, test_time - train_time, -1,
-1, -1, -1, -1,
test_loss/test_n, test_acc/test_n, test_robust_loss/test_n, test_robust_acc/test_n)
return
if __name__ == "__main__":
main()
| 40,722 | 41.287643 | 208 | py |
DEAT | DEAT-main/eval_cifar.py | import argparse
import copy
import logging
import os
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from preactresnet import PreActResNet18
from wideresnet import WideResNet
from utils_plus import (upper_limit, lower_limit, std, clamp, get_loaders,
attack_pgd, evaluate_pgd, evaluate_standard, normalize)
from autoattack import AutoAttack
# installing AutoAttack by: pip install git+https://github.com/fra31/auto-attack
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', default=128, type=int)
parser.add_argument('--data-dir', default='../cifar-data', type=str)
parser.add_argument('--epsilon', default=8, type=int)
parser.add_argument('--out-dir', default='train_fgsm_output', type=str, help='Output directory')
parser.add_argument('--seed', default=0, type=int, help='Random seed')
return parser.parse_args()
def main():
args = get_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
logger = logging.getLogger(__name__)
logging.basicConfig(
format='[%(asctime)s] - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.DEBUG,
handlers=[
logging.StreamHandler()
])
logger.info(args)
_, test_loader = get_loaders(args.data_dir, args.batch_size)
best_state_dict = torch.load(os.path.join(args.out_dir, 'model_best.pth'))
# Evaluation
model_test = PreActResNet18().cuda()
# model_test = WideResNet(34, 10, widen_factor=10, dropRate=0.0)
model_test = nn.DataParallel(model_test).cuda()
if 'state_dict' in best_state_dict.keys():
model_test.load_state_dict(best_state_dict['state_dict'])
else:
model_test.load_state_dict(best_state_dict)
model_test.float()
model_test.eval()
### Evaluate clean acc ###
_, test_acc = evaluate_standard(test_loader, model_test)
print('Clean acc: ', test_acc)
### Evaluate PGD (CE loss) acc ###
_, pgd_acc_CE = evaluate_pgd(test_loader, model_test, attack_iters=10, restarts=1, eps=8, step=2, use_CWloss=False)
print('PGD-10 (10 restarts, step 2, CE loss) acc: ', pgd_acc_CE)
### Evaluate PGD (CW loss) acc ###
_, pgd_acc_CW = evaluate_pgd(test_loader, model_test, attack_iters=10, restarts=1, eps=8, step=2, use_CWloss=True)
print('PGD-10 (10 restarts, step 2, CW loss) acc: ', pgd_acc_CW)
### Evaluate AutoAttack ###
l = [x for (x, y) in test_loader]
x_test = torch.cat(l, 0)
l = [y for (x, y) in test_loader]
y_test = torch.cat(l, 0)
class normalize_model():
def __init__(self, model):
self.model_test = model
def __call__(self, x):
return self.model_test(normalize(x))
new_model = normalize_model(model_test)
epsilon = 8 / 255.
adversary = AutoAttack(new_model, norm='Linf', eps=epsilon, version='standard')
X_adv = adversary.run_standard_evaluation(x_test, y_test, bs=128)
if __name__ == "__main__":
main()
| 3,080 | 32.129032 | 119 | py |
DEAT | DEAT-main/utils_plus.py | #import apex.amp as amp
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
upper_limit, lower_limit = 1, 0
cifar10_mean = (0.4914, 0.4822, 0.4465)
cifar10_std = (0.2471, 0.2435, 0.2616)
mu = torch.tensor(cifar10_mean).view(3,1,1).cuda()
std = torch.tensor(cifar10_std).view(3,1,1).cuda()
def normalize(X):
return (X - mu)/std
def clamp(X, lower_limit, upper_limit):
return torch.max(torch.min(X, upper_limit), lower_limit)
def get_loaders(dir_, batch_size, DATASET='CIFAR10'):
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
test_transform = transforms.Compose([
transforms.ToTensor()
])
num_workers = 2
if DATASET == 'CIFAR10':
train_dataset = datasets.CIFAR10(
dir_, train=True, transform=train_transform, download=True)
test_dataset = datasets.CIFAR10(
dir_, train=False, transform=test_transform, download=True)
elif DATASET == 'CIFAR100':
train_dataset = datasets.CIFAR100(
dir_, train=True, transform=train_transform, download=True)
test_dataset = datasets.CIFAR100(
dir_, train=False, transform=test_transform, download=True)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
pin_memory=True,
num_workers=num_workers,
)
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=2,
)
return train_loader, test_loader
def CW_loss(x, y):
x_sorted, ind_sorted = x.sort(dim=1)
ind = (ind_sorted[:, -1] == y).float()
loss_value = -(x[np.arange(x.shape[0]), y] - x_sorted[:, -2] * ind - x_sorted[:, -1] * (1. - ind))
return loss_value.mean()
def attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts, use_CWloss=False):
max_loss = torch.zeros(y.shape[0]).cuda()
max_delta = torch.zeros_like(X).cuda()
for _ in range(restarts):
delta = torch.zeros_like(X).cuda()
delta.uniform_(-epsilon, epsilon)
delta.data = clamp(delta, lower_limit - X, upper_limit - X)
delta.requires_grad = True
for _ in range(attack_iters):
output = model(normalize(X + delta))
index = torch.where(output.max(1)[1] == y)
if len(index[0]) == 0:
break
if use_CWloss:
loss = CW_loss(output, y)
else:
loss = F.cross_entropy(output, y)
loss.backward()
grad = delta.grad.detach()
d = delta[index[0], :, :, :]
g = grad[index[0], :, :, :]
d = torch.clamp(d + alpha * torch.sign(g), -epsilon, epsilon)
d = clamp(d, lower_limit - X[index[0], :, :, :], upper_limit - X[index[0], :, :, :])
delta.data[index[0], :, :, :] = d
delta.grad.zero_()
all_loss = F.cross_entropy(model(normalize(X + delta)), y, reduction='none').detach()
max_delta[all_loss >= max_loss] = delta.detach()[all_loss >= max_loss]
max_loss = torch.max(max_loss, all_loss)
return max_delta
def evaluate_pgd(test_loader, model, attack_iters, restarts, eps=8, step=2, use_CWloss=False):
epsilon = eps / 255.
alpha = step / 255.
pgd_loss = 0
pgd_acc = 0
n = 0
model.eval()
for i, (X, y) in enumerate(test_loader):
X, y = X.cuda(), y.cuda()
pgd_delta = attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts, use_CWloss=use_CWloss)
with torch.no_grad():
output = model(normalize(X + pgd_delta))
loss = F.cross_entropy(output, y)
pgd_loss += loss.item() * y.size(0)
pgd_acc += (output.max(1)[1] == y).sum().item()
n += y.size(0)
return pgd_loss/n, pgd_acc/n
def evaluate_standard(test_loader, model):
test_loss = 0
test_acc = 0
n = 0
model.eval()
with torch.no_grad():
for i, (X, y) in enumerate(test_loader):
X, y = X.cuda(), y.cuda()
output = model(normalize(X))
loss = F.cross_entropy(output, y)
test_loss += loss.item() * y.size(0)
test_acc += (output.max(1)[1] == y).sum().item()
n += y.size(0)
return test_loss/n, test_acc/n
| 4,589 | 34.307692 | 106 | py |
DEAT | DEAT-main/train_cifar.py | import argparse
import logging
import sys
import time
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from wideresnet import WideResNet
from preactresnet import PreActResNet18, PreActResNet50
from models import *
from utils import *
mu = torch.tensor(cifar10_mean).view(3,1,1).cuda()
std = torch.tensor(cifar10_std).view(3,1,1).cuda()
def normalize(X):
return (X - mu)/std
upper_limit, lower_limit = 1,0
def clamp(X, lower_limit, upper_limit):
return torch.max(torch.min(X, upper_limit), lower_limit)
class LabelSmoothingLoss(nn.Module):
def __init__(self, classes=10, smoothing=0.0, dim=-1):
super(LabelSmoothingLoss, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.cls = classes
self.dim = dim
def forward(self, pred, target):
pred = pred.log_softmax(dim=self.dim)
with torch.no_grad():
# true_dist = pred.data.clone()
true_dist = torch.zeros_like(pred)
true_dist.fill_(self.smoothing / (self.cls - 1))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))
class Batches():
def __init__(self, dataset, batch_size, shuffle, set_random_choices=False, num_workers=0, drop_last=False):
self.dataset = dataset
self.batch_size = batch_size
self.set_random_choices = set_random_choices
self.dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True, shuffle=shuffle, drop_last=drop_last
)
def __iter__(self):
if self.set_random_choices:
self.dataset.set_random_choices()
return ({'input': x.to(device).float(), 'target': y.to(device).long()} for (x,y) in self.dataloader)
def __len__(self):
return len(self.dataloader)
def mixup_data(x, y, alpha=1.0):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
index = torch.randperm(batch_size).cuda()
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
def dlr_loss(x, y):
x_sorted, ind_sorted = x.sort(dim=1)
ind = (ind_sorted[:, -1] == y).float()
loss_value = -(x[np.arange(x.shape[0]), y] - x_sorted[:, -2] * ind - x_sorted[:, -1]
* (1. - ind)) / (x_sorted[:, -1] - x_sorted[:, -3] + 1e-12)
return loss_value.mean()
def CW_loss(x, y):
x_sorted, ind_sorted = x.sort(dim=1)
ind = (ind_sorted[:, -1] == y).float()
loss_value = -(x[np.arange(x.shape[0]), y] - x_sorted[:, -2] * ind - x_sorted[:, -1] * (1. - ind))
return loss_value.mean()
def attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts,
norm, mixup=False, y_a=None, y_b=None, lam=None,
early_stop=False, early_stop_pgd_max=1,
multitarget=False,
use_DLRloss=False, use_CWloss=False,
epoch=0, totalepoch=110, gamma=0.8,
use_adaptive=False, s_HE=15,
fast_better=False, BNeval=False):
max_loss = torch.zeros(y.shape[0]).cuda()
max_delta = torch.zeros_like(X).cuda()
if BNeval:
model.eval()
for _ in range(restarts):
# early stop pgd counter for each x
early_stop_pgd_count = early_stop_pgd_max * torch.ones(y.shape[0], dtype=torch.int32).cuda()
# initialize perturbation
delta = torch.zeros_like(X).cuda()
if norm == "l_inf":
delta.uniform_(-epsilon, epsilon)
elif norm == "l_2":
delta.normal_()
d_flat = delta.view(delta.size(0),-1)
n = d_flat.norm(p=2,dim=1).view(delta.size(0),1,1,1)
r = torch.zeros_like(n).uniform_(0, 1)
delta *= r/n*epsilon
else:
raise ValueError
delta = clamp(delta, lower_limit-X, upper_limit-X)
delta.requires_grad = True
iter_count = torch.zeros(y.shape[0])
# craft adversarial examples
for _ in range(attack_iters):
output = model(normalize(X + delta))
# if use early stop pgd
if early_stop:
# calculate mask for early stop pgd
if_success_fool = (output.max(1)[1] != y).to(dtype=torch.int32)
early_stop_pgd_count = early_stop_pgd_count - if_success_fool
index = torch.where(early_stop_pgd_count > 0)[0]
iter_count[index] = iter_count[index] + 1
else:
index = slice(None,None,None)
if not isinstance(index, slice) and len(index) == 0:
break
# Whether use mixup criterion
if fast_better:
loss_ori = F.cross_entropy(output, y)
grad_ori = torch.autograd.grad(loss_ori, delta, create_graph=True)[0]
loss_grad = (alpha / 4.) * (torch.norm(grad_ori.view(grad_ori.shape[0], -1), p=2, dim=1) ** 2)
loss = loss_ori + loss_grad.mean()
loss.backward()
grad = delta.grad.detach()
elif not mixup:
if multitarget:
random_label = torch.randint(low=0, high=10, size=y.shape).cuda()
random_direction = 2*((random_label == y).to(dtype=torch.float32) - 0.5)
loss = torch.mean(random_direction * F.cross_entropy(output, random_label, reduction='none'))
loss.backward()
grad = delta.grad.detach()
elif use_DLRloss:
beta_ = gamma * epoch / totalepoch
loss = (1. - beta_) * F.cross_entropy(output, y) + beta_ * dlr_loss(output, y)
loss.backward()
grad = delta.grad.detach()
elif use_CWloss:
beta_ = gamma * epoch / totalepoch
loss = (1. - beta_) * F.cross_entropy(output, y) + beta_ * CW_loss(output, y)
loss.backward()
grad = delta.grad.detach()
else:
if use_adaptive:
loss = F.cross_entropy(s_HE * output, y)
else:
loss = F.cross_entropy(output, y)
loss.backward()
grad = delta.grad.detach()
else:
criterion = nn.CrossEntropyLoss()
loss = mixup_criterion(criterion, model(normalize(X+delta)), y_a, y_b, lam)
loss.backward()
grad = delta.grad.detach()
d = delta[index, :, :, :]
g = grad[index, :, :, :]
x = X[index, :, :, :]
if norm == "l_inf":
d = torch.clamp(d + alpha * torch.sign(g), min=-epsilon, max=epsilon)
elif norm == "l_2":
g_norm = torch.norm(g.view(g.shape[0],-1),dim=1).view(-1,1,1,1)
scaled_g = g/(g_norm + 1e-10)
d = (d + scaled_g*alpha).view(d.size(0),-1).renorm(p=2,dim=0,maxnorm=epsilon).view_as(d)
d = clamp(d, lower_limit - x, upper_limit - x)
delta.data[index, :, :, :] = d
delta.grad.zero_()
if mixup:
criterion = nn.CrossEntropyLoss(reduction='none')
all_loss = mixup_criterion(criterion, model(normalize(X+delta)), y_a, y_b, lam)
else:
all_loss = F.cross_entropy(model(normalize(X+delta)), y, reduction='none')
max_delta[all_loss >= max_loss] = delta.detach()[all_loss >= max_loss]
max_loss = torch.max(max_loss, all_loss)
if BNeval:
model.train()
return max_delta, iter_count
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='PreActResNet18')
parser.add_argument('--l1', default=0, type=float)
parser.add_argument('--data-dir', default='../cifar-data', type=str)
parser.add_argument('--epochs', default=110, type=int)
parser.add_argument('--lr-schedule', default='piecewise', choices=['superconverge', 'piecewise', 'linear', 'piecewisesmoothed', 'piecewisezoom', 'onedrop', 'multipledecay', 'cosine', 'cyclic'])
parser.add_argument('--lr-max', default=0.1, type=float)
parser.add_argument('--lr-one-drop', default=0.01, type=float)
parser.add_argument('--lr-drop-epoch', default=100, type=int)
parser.add_argument('--attack', default='pgd', type=str, choices=['pgd', 'fgsm', 'free', 'none'])
parser.add_argument('--epsilon', default=8, type=int)
parser.add_argument('--test_epsilon', default=8, type=int)
parser.add_argument('--attack-iters', default=10, type=int)
parser.add_argument('--restarts', default=1, type=int)
parser.add_argument('--pgd-alpha', default=2, type=float)
parser.add_argument('--test-pgd-alpha', default=2, type=float)
parser.add_argument('--fgsm-alpha', default=1.25, type=float)
parser.add_argument('--norm', default='l_inf', type=str, choices=['l_inf', 'l_2'])
parser.add_argument('--fgsm-init', default='random', choices=['zero', 'random', 'previous'])
parser.add_argument('--fname', default='cifar_model', type=str)
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--half', action='store_true')
parser.add_argument('--width-factor', default=10, type=int)
parser.add_argument('--resume', default=0, type=int)
parser.add_argument('--eval', action='store_true')
parser.add_argument('--val', action='store_true')
parser.add_argument('--chkpt-iters', default=100, type=int)
parser.add_argument('--mixture', action='store_true') # whether use mixture of clean and adv examples in a mini-batch
parser.add_argument('--mixture_alpha', type=float)
parser.add_argument('--l2', default=0, type=float)
# Group 1
parser.add_argument('--earlystopPGD', action='store_true') # whether use early stop in PGD
parser.add_argument('--earlystopPGDepoch1', default=60, type=int)
parser.add_argument('--earlystopPGDepoch2', default=100, type=int)
parser.add_argument('--warmup_lr', action='store_true') # whether warm_up lr from 0 to max_lr in the first n epochs
parser.add_argument('--warmup_lr_epoch', default=15, type=int)
parser.add_argument('--weight_decay', default=5e-4, type=float)#weight decay
parser.add_argument('--warmup_eps', action='store_true') # whether warm_up eps from 0 to 8/255 in the first n epochs
parser.add_argument('--warmup_eps_epoch', default=15, type=int)
parser.add_argument('--batch-size', default=128, type=int) #batch size
parser.add_argument('--labelsmooth', action='store_true') # whether use label smoothing
parser.add_argument('--labelsmoothvalue', default=0.0, type=float)
parser.add_argument('--lrdecay', default='base', type=str, choices=['intenselr', 'base', 'looselr', 'lineardecay'])
# Group 2
parser.add_argument('--use_DLRloss', action='store_true') # whether use DLRloss
parser.add_argument('--use_CWloss', action='store_true') # whether use CWloss
parser.add_argument('--use_multitarget', action='store_true') # whether use multitarget
parser.add_argument('--use_stronger_adv', action='store_true') # whether use mixture of clean and adv examples in a mini-batch
parser.add_argument('--stronger_index', default=0, type=int)
parser.add_argument('--use_FNandWN', action='store_true') # whether use FN and WN
parser.add_argument('--use_adaptive', action='store_true') # whether use s in attack during training
parser.add_argument('--s_FN', default=15, type=float) # s in FN
parser.add_argument('--m_FN', default=0.2, type=float) # s in FN
parser.add_argument('--use_FNonly', action='store_true') # whether use FN only
parser.add_argument('--fast_better', action='store_true')
parser.add_argument('--BNeval', action='store_true') # whether use eval mode for BN when crafting adversarial examples
parser.add_argument('--focalloss', action='store_true') # whether use focalloss
parser.add_argument('--focallosslambda', default=2., type=float)
parser.add_argument('--activation', default='ReLU', type=str)
parser.add_argument('--softplus_beta', default=1., type=float)
parser.add_argument('--optimizer', default='momentum', choices=['momentum', 'Nesterov', 'SGD_GC', 'SGD_GCC', 'Adam', 'AdamW'])
parser.add_argument('--mixup', action='store_true')
parser.add_argument('--mixup-alpha', type=float)
parser.add_argument('--cutout', action='store_true')
parser.add_argument('--cutout-len', type=int)
return parser.parse_args()
def get_auto_fname(args):
names = args.model + '_' + args.lr_schedule + '_eps' + str(args.epsilon) + '_bs' + str(args.batch_size) + '_maxlr' + str(args.lr_max)
# Group 1
if args.earlystopPGD:
names = names + '_earlystopPGD' + str(args.earlystopPGDepoch1) + str(args.earlystopPGDepoch2)
if args.warmup_lr:
names = names + '_warmuplr' + str(args.warmup_lr_epoch)
if args.warmup_eps:
names = names + '_warmupeps' + str(args.warmup_eps_epoch)
if args.weight_decay != 5e-4:
names = names + '_wd' + str(args.weight_decay)
if args.labelsmooth:
names = names + '_ls' + str(args.labelsmoothvalue)
# Group 2
if args.use_stronger_adv:
names = names + '_usestrongeradv#' + str(args.stronger_index)
if args.use_multitarget:
names = names + '_usemultitarget'
if args.use_DLRloss:
names = names + '_useDLRloss'
if args.use_CWloss:
names = names + '_useCWloss'
if args.use_FNandWN:
names = names + '_HE' + 's' + str(args.s_FN) + 'm' + str(args.m_FN)
if args.use_adaptive:
names = names + 'adaptive'
if args.use_FNonly:
names = names + '_FNonly'
if args.fast_better:
names = names + '_fastbetter'
if args.activation != 'ReLU':
names = names + '_' + args.activation
if args.activation == 'Softplus':
names = names + str(args.softplus_beta)
if args.lrdecay != 'base':
names = names + '_' + args.lrdecay
if args.BNeval:
names = names + '_BNeval'
if args.focalloss:
names = names + '_focalloss' + str(args.focallosslambda)
if args.optimizer != 'momentum':
names = names + '_' + args.optimizer
if args.mixup:
names = names + '_mixup' + str(args.mixup_alpha)
if args.cutout:
names = names + '_cutout' + str(args.cutout_len)
if args.attack != 'pgd':
names = names + '_' + args.attack
print('File name: ', names)
return names
def main():
args = get_args()
if args.fname == 'auto':
names = get_auto_fname(args)
args.fname = 'trained_models/' + names
else:
args.fname = 'trained_models/' + args.fname
if not os.path.exists(args.fname):
os.makedirs(args.fname)
logger = logging.getLogger(__name__)
logging.basicConfig(
format='[%(asctime)s] - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.DEBUG,
handlers=[
logging.FileHandler(os.path.join(args.fname, 'eval.log' if args.eval else 'output.log')),
logging.StreamHandler()
])
logger.info(args)
# Set seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# Prepare data
transforms = [Crop(32, 32), FlipLR()]
if args.cutout:
transforms.append(Cutout(args.cutout_len, args.cutout_len))
if args.val:
try:
dataset = torch.load("cifar10_validation_split.pth")
except:
print("Couldn't find a dataset with a validation split, did you run "
"generate_validation.py?")
return
val_set = list(zip(transpose(dataset['val']['data']/255.), dataset['val']['labels']))
val_batches = Batches(val_set, args.batch_size, shuffle=False, num_workers=4)
else:
dataset = cifar10(args.data_dir)
train_set = list(zip(transpose(pad(dataset['train']['data'], 4)/255.),
dataset['train']['labels']))
train_set_x = Transform(train_set, transforms)
train_batches = Batches(train_set_x, args.batch_size, shuffle=True, set_random_choices=True, num_workers=4)
test_set = list(zip(transpose(dataset['test']['data']/255.), dataset['test']['labels']))
test_batches = Batches(test_set, args.batch_size, shuffle=False, num_workers=4)
# Set perturbations
epsilon = (args.epsilon / 255.)
test_epsilon = (args.test_epsilon / 255.)
pgd_alpha = (args.pgd_alpha / 255.)
test_pgd_alpha = (args.test_pgd_alpha / 255.)
# Set models
if args.model == 'VGG':
model = VGG('VGG19')
elif args.model == 'ResNet18':
model = ResNet18()
elif args.model == 'GoogLeNet':
model = GoogLeNet()
elif args.model == 'DenseNet121':
model = DenseNet121()
elif args.model == 'DenseNet201':
model = DenseNet201()
elif args.model == 'ResNeXt29':
model = ResNeXt29_2x64d()
elif args.model == 'ResNeXt29L':
model = ResNeXt29_32x4d()
elif args.model == 'MobileNet':
model = MobileNet()
elif args.model == 'MobileNetV2':
model = MobileNetV2()
elif args.model == 'DPN26':
model = DPN26()
elif args.model == 'DPN92':
model = DPN92()
elif args.model == 'ShuffleNetG2':
model = ShuffleNetG2()
elif args.model == 'SENet18':
model = SENet18()
elif args.model == 'ShuffleNetV2':
model = ShuffleNetV2(1)
elif args.model == 'EfficientNetB0':
model = EfficientNetB0()
elif args.model == 'PNASNetA':
model = PNASNetA()
elif args.model == 'RegNetX':
model = RegNetX_200MF()
elif args.model == 'RegNetLX':
model = RegNetX_400MF()
elif args.model == 'PreActResNet50':
model = PreActResNet50()
elif args.model == 'PreActResNet18':
model = PreActResNet18(normalize_only_FN=args.use_FNonly, normalize=args.use_FNandWN, scale=args.s_FN,
activation=args.activation, softplus_beta=args.softplus_beta)
elif args.model == 'WideResNet':
model = WideResNet(34, 10, widen_factor=10, dropRate=0.0, normalize=args.use_FNandWN,
activation=args.activation, softplus_beta=args.softplus_beta)
elif args.model == 'WideResNet_20':
model = WideResNet(34, 10, widen_factor=20, dropRate=0.0, normalize=args.use_FNandWN,
activation=args.activation, softplus_beta=args.softplus_beta)
else:
raise ValueError("Unknown model")
model = nn.DataParallel(model).cuda()
model.train()
# Set training hyperparameters
if args.l2:
decay, no_decay = [], []
for name,param in model.named_parameters():
if 'bn' not in name and 'bias' not in name:
decay.append(param)
else:
no_decay.append(param)
params = [{'params':decay, 'weight_decay':args.l2},
{'params':no_decay, 'weight_decay': 0 }]
else:
params = model.parameters()
if args.lr_schedule == 'cyclic':
opt = torch.optim.Adam(params, lr=args.lr_max, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay)
else:
if args.optimizer == 'momentum':
opt = torch.optim.SGD(params, lr=args.lr_max, momentum=0.9, weight_decay=args.weight_decay)
elif args.optimizer == 'Nesterov':
opt = torch.optim.SGD(params, lr=args.lr_max, momentum=0.9, weight_decay=args.weight_decay, nesterov=True)
elif args.optimizer == 'SGD_GC':
opt = SGD_GC(params, lr=args.lr_max, momentum=0.9, weight_decay=args.weight_decay)
elif args.optimizer == 'SGD_GCC':
opt = SGD_GCC(params, lr=args.lr_max, momentum=0.9, weight_decay=args.weight_decay)
elif args.optimizer == 'Adam':
opt = torch.optim.Adam(params, lr=args.lr_max, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay)
elif args.optimizer == 'AdamW':
opt = torch.optim.AdamW(params, lr=args.lr_max, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay)
# Cross-entropy (mean)
if args.labelsmooth:
criterion = LabelSmoothingLoss(smoothing=args.labelsmoothvalue)
else:
criterion = nn.CrossEntropyLoss()
# If we use freeAT or fastAT with previous init
if args.attack == 'free':
delta = torch.zeros(args.batch_size, 3, 32, 32).cuda()
delta.requires_grad = True
elif args.attack == 'fgsm' and args.fgsm_init == 'previous':
delta = torch.zeros(args.batch_size, 3, 32, 32).cuda()
delta.requires_grad = True
if args.attack == 'free':
epochs = int(math.ceil(args.epochs / args.attack_iters))
else:
epochs = args.epochs
# Set lr schedule
if args.lr_schedule == 'superconverge':
lr_schedule = lambda t: np.interp([t], [0, args.epochs * 2 // 5, args.epochs], [0, args.lr_max, 0])[0]
elif args.lr_schedule == 'piecewise':
def lr_schedule(t, warm_up_lr = args.warmup_lr):
if t < 100:
if warm_up_lr and t < args.warmup_lr_epoch:
return (t + 1.) / args.warmup_lr_epoch * args.lr_max
else:
return args.lr_max
if args.lrdecay == 'lineardecay':
if t < 105:
return args.lr_max * 0.02 * (105 - t)
else:
return 0.
elif args.lrdecay == 'intenselr':
if t < 102:
return args.lr_max / 10.
else:
return args.lr_max / 100.
elif args.lrdecay == 'looselr':
if t < 150:
return args.lr_max / 10.
else:
return args.lr_max / 100.
elif args.lrdecay == 'base':
if t < 105:
return args.lr_max / 10.
else:
return args.lr_max / 100.
elif args.lr_schedule == 'linear':
lr_schedule = lambda t: np.interp([t], [0, args.epochs // 3, args.epochs * 2 // 3, args.epochs], [args.lr_max, args.lr_max, args.lr_max / 10, args.lr_max / 100])[0]
elif args.lr_schedule == 'onedrop':
def lr_schedule(t):
if t < args.lr_drop_epoch:
return args.lr_max
else:
return args.lr_one_drop
elif args.lr_schedule == 'multipledecay':
def lr_schedule(t):
return args.lr_max - (t//(args.epochs//10))*(args.lr_max/10)
elif args.lr_schedule == 'cosine':
def lr_schedule(t):
return args.lr_max * 0.5 * (1 + np.cos(t / args.epochs * np.pi))
elif args.lr_schedule == 'cyclic':
def lr_schedule(t, stepsize=18, min_lr=1e-5, max_lr=args.lr_max):
# Scaler: we can adapt this if we do not want the triangular CLR
scaler = lambda x: 1.
# Additional function to see where on the cycle we are
cycle = math.floor(1 + t / (2 * stepsize))
x = abs(t / stepsize - 2 * cycle + 1)
relative = max(0, (1 - x)) * scaler(cycle)
return min_lr + (max_lr - min_lr) * relative
#### Set stronger adv attacks when decay the lr ####
def eps_alpha_schedule(t, warm_up_eps = args.warmup_eps, if_use_stronger_adv=args.use_stronger_adv, stronger_index=args.stronger_index): # Schedule number 0
if stronger_index == 0:
epsilon_s = [epsilon * 1.5, epsilon * 2]
pgd_alpha_s = [pgd_alpha, pgd_alpha]
elif stronger_index == 1:
epsilon_s = [epsilon * 1.5, epsilon * 2]
pgd_alpha_s = [pgd_alpha * 1.25, pgd_alpha * 1.5]
elif stronger_index == 2:
epsilon_s = [epsilon * 2, epsilon * 2.5]
pgd_alpha_s = [pgd_alpha * 1.5, pgd_alpha * 2]
else:
print('Undefined stronger index')
if if_use_stronger_adv:
if t < 100:
if t < args.warmup_eps_epoch and warm_up_eps:
return (t + 1.) / args.warmup_eps_epoch * epsilon, pgd_alpha, args.restarts
else:
return epsilon, pgd_alpha, args.restarts
elif t < 105:
return epsilon_s[0], pgd_alpha_s[0], args.restarts
else:
return epsilon_s[1], pgd_alpha_s[1], args.restarts
else:
if t < args.warmup_eps_epoch and warm_up_eps:
return (t + 1.) / args.warmup_eps_epoch * epsilon, pgd_alpha, args.restarts
else:
return epsilon, pgd_alpha, args.restarts
#### Set the counter for the early stop of PGD ####
def early_stop_counter_schedule(t):
if t < args.earlystopPGDepoch1:
return 1
elif t < args.earlystopPGDepoch2:
return 2
else:
return 3
best_test_robust_acc = 0
best_val_robust_acc = 0
if args.resume:
start_epoch = args.resume
model.load_state_dict(torch.load(os.path.join(args.fname, f'model_{start_epoch-1}.pth')))
opt.load_state_dict(torch.load(os.path.join(args.fname, f'opt_{start_epoch-1}.pth')))
logger.info(f'Resuming at epoch {start_epoch}')
best_test_robust_acc = torch.load(os.path.join(args.fname, f'model_best.pth'))['test_robust_acc']
if args.val:
best_val_robust_acc = torch.load(os.path.join(args.fname, f'model_val.pth'))['val_robust_acc']
else:
start_epoch = 0
if args.eval:
if not args.resume:
logger.info("No model loaded to evaluate, specify with --resume FNAME")
return
logger.info("[Evaluation mode]")
# logger.info('Epoch \t Train Time \t Test Time \t LR \t Train Loss \t Train Grad \t Train Acc \t Train Robust Loss \t Train Robust Acc || \t Test Loss \t Test Acc \t Test Robust Loss \t Test Robust Acc')
logger.info('Epoch \t Train Acc \t Train Robust Acc \t Test Acc \t Test Robust Acc')
# Records per epoch for savetxt
train_loss_record = []
train_acc_record = []
train_robust_loss_record = []
train_robust_acc_record = []
train_grad_record = []
test_loss_record = []
test_acc_record = []
test_robust_loss_record = []
test_robust_acc_record = []
test_grad_record = []
for epoch in range(start_epoch, epochs):
model.train()
start_time = time.time()
train_loss = 0
train_acc = 0
train_robust_loss = 0
train_robust_acc = 0
train_n = 0
train_grad = 0
record_iter = torch.tensor([])
for i, batch in enumerate(train_batches):
if args.eval:
break
X, y = batch['input'], batch['target']
onehot_target_withmargin_HE = args.m_FN * args.s_FN * torch.nn.functional.one_hot(y, num_classes=10)
if args.mixup:
X, y_a, y_b, lam = mixup_data(X, y, args.mixup_alpha)
X, y_a, y_b = map(Variable, (X, y_a, y_b))
epoch_now = epoch + (i + 1) / len(train_batches)
lr = lr_schedule(epoch_now)
opt.param_groups[0].update(lr=lr)
if args.attack == 'pgd':
# Random initialization
epsilon_sche, pgd_alpha_sche, restarts_sche = eps_alpha_schedule(epoch_now)
early_counter_max = early_stop_counter_schedule(epoch_now)
if args.mixup:
delta, iter_counts = attack_pgd(model, X, y, epsilon_sche, pgd_alpha_sche, args.attack_iters, restarts_sche, args.norm,
early_stop=args.earlystopPGD, early_stop_pgd_max=early_counter_max,
mixup=True, y_a=y_a, y_b=y_b, lam=lam)
else:
delta, iter_counts = attack_pgd(model, X, y, epsilon_sche, pgd_alpha_sche, args.attack_iters, restarts_sche, args.norm,
early_stop=args.earlystopPGD, early_stop_pgd_max=early_counter_max, multitarget=args.use_multitarget,
use_DLRloss=args.use_DLRloss, use_CWloss=args.use_CWloss,
epoch=epoch_now, totalepoch=args.epochs, gamma=0.8,
use_adaptive=args.use_adaptive, s_HE=args.s_FN,
fast_better=args.fast_better, BNeval=args.BNeval)
record_iter = torch.cat((record_iter, iter_counts))
delta = delta.detach()
elif args.attack == 'fgsm':
delta,_ = attack_pgd(model, X, y, epsilon, args.fgsm_alpha*epsilon, 1, 1, args.norm, fast_better=args.fast_better)
delta = delta.detach()
# Standard training
elif args.attack == 'none':
delta = torch.zeros_like(X)
adv_input = normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit))
adv_input.requires_grad = True
robust_output = model(adv_input)
# Training losses
if args.mixup:
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
robust_loss = mixup_criterion(criterion, robust_output, y_a, y_b, lam)
elif args.mixture:
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
robust_loss = args.mixture_alpha * criterion(robust_output, y) + (1-args.mixture_alpha) * criterion(output, y)
else:
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
if args.focalloss:
criterion_nonreduct = nn.CrossEntropyLoss(reduction='none')
robust_confidence = F.softmax(robust_output, dim=1)[:, y].detach()
robust_loss = (criterion_nonreduct(robust_output, y) * ((1. - robust_confidence) ** args.focallosslambda)).mean()
elif args.use_DLRloss:
beta_ = 0.8 * epoch_now / args.epochs
robust_loss = (1. - beta_) * F.cross_entropy(robust_output, y) + beta_ * dlr_loss(robust_output, y)
elif args.use_CWloss:
beta_ = 0.8 * epoch_now / args.epochs
robust_loss = (1. - beta_) * F.cross_entropy(robust_output, y) + beta_ * CW_loss(robust_output, y)
elif args.use_FNandWN:
#print('use FN and WN with margin')
robust_loss = criterion(args.s_FN * robust_output - onehot_target_withmargin_HE, y)
else:
robust_loss = criterion(robust_output, y)
if args.l1:
for name,param in model.named_parameters():
if 'bn' not in name and 'bias' not in name:
robust_loss += args.l1*param.abs().sum()
opt.zero_grad()
robust_loss.backward()
opt.step()
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
if args.mixup:
loss = mixup_criterion(criterion, output, y_a, y_b, lam)
else:
loss = criterion(output, y)
# Get the gradient norm values
input_grads = torch.autograd.grad(loss, clean_input, create_graph=False)[0]
# Record the statstic values
train_robust_loss += robust_loss.item() * y.size(0)
train_robust_acc += (robust_output.max(1)[1] == y).sum().item()
train_loss += loss.item() * y.size(0)
train_acc += (output.max(1)[1] == y).sum().item()
train_n += y.size(0)
train_grad += input_grads.abs().sum()
train_time = time.time()
if args.earlystopPGD:
print('Iter mean: ', record_iter.mean().item(), ' Iter std: ', record_iter.std().item())
print('Learning rate: ', lr)
#print('Eps: ', epsilon_sche)
# Evaluate on test data
model.eval()
test_loss = 0
test_acc = 0
test_robust_loss = 0
test_robust_acc = 0
test_n = 0
test_grad = 0
for i, batch in enumerate(test_batches):
X, y = batch['input'], batch['target']
# Random initialization
if args.attack == 'none':
delta = torch.zeros_like(X)
else:
delta, _ = attack_pgd(model, X, y, test_epsilon, test_pgd_alpha, args.attack_iters, args.restarts, args.norm, early_stop=False)
delta = delta.detach()
adv_input = normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit))
adv_input.requires_grad = True
robust_output = model(adv_input)
robust_loss = criterion(robust_output, y)
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
loss = criterion(output, y)
# Get the gradient norm values
input_grads = torch.autograd.grad(loss, clean_input, create_graph=False)[0]
test_robust_loss += robust_loss.item() * y.size(0)
test_robust_acc += (robust_output.max(1)[1] == y).sum().item()
test_loss += loss.item() * y.size(0)
test_acc += (output.max(1)[1] == y).sum().item()
test_n += y.size(0)
test_grad += input_grads.abs().sum()
test_time = time.time()
if args.val:
val_loss = 0
val_acc = 0
val_robust_loss = 0
val_robust_acc = 0
val_n = 0
for i, batch in enumerate(val_batches):
X, y = batch['input'], batch['target']
# Random initialization
if args.attack == 'none':
delta = torch.zeros_like(X)
else:
delta, _ = attack_pgd(model, X, y, test_epsilon, pgd_alpha, args.attack_iters, args.restarts, args.norm, early_stop=False)
delta = delta.detach()
robust_output = model(normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit)))
robust_loss = criterion(robust_output, y)
output = model(normalize(X))
loss = criterion(output, y)
val_robust_loss += robust_loss.item() * y.size(0)
val_robust_acc += (robust_output.max(1)[1] == y).sum().item()
val_loss += loss.item() * y.size(0)
val_acc += (output.max(1)[1] == y).sum().item()
val_n += y.size(0)
if not args.eval:
# logger.info('%d \t %.1f \t %.1f \t %.4f \t %.4f \t %.4f \t %.4f \t %.4f \t %.4f \t %.4f %.4f \t %.4f \t %.4f',
# epoch, train_time - start_time, test_time - train_time, lr,
# train_loss/train_n, train_grad/train_n, train_acc/train_n, train_robust_loss/train_n, train_robust_acc/train_n,
# test_loss/test_n, test_acc/test_n, test_robust_loss/test_n, test_robust_acc/test_n)
logger.info('%d \t %.4f \t %.4f \t %.4f \t %.4f',
epoch, train_acc/train_n, train_robust_acc/train_n, test_acc/test_n, test_robust_acc/test_n)
# Save results
train_loss_record.append(train_loss/train_n)
train_acc_record.append(train_acc/train_n)
train_robust_loss_record.append(train_robust_loss/train_n)
train_robust_acc_record.append(train_robust_acc/train_n)
train_grad_record.append(train_grad/train_n)
np.savetxt(args.fname+'/train_loss_record.txt', np.array(train_loss_record))
np.savetxt(args.fname+'/train_acc_record.txt', np.array(train_acc_record))
np.savetxt(args.fname+'/train_robust_loss_record.txt', np.array(train_robust_loss_record))
np.savetxt(args.fname+'/train_robust_acc_record.txt', np.array(train_robust_acc_record))
np.savetxt(args.fname+'/train_grad_record.txt', np.array(train_grad_record))
test_loss_record.append(test_loss/train_n)
test_acc_record.append(test_acc/train_n)
test_robust_loss_record.append(test_robust_loss/train_n)
test_robust_acc_record.append(test_robust_acc/train_n)
test_grad_record.append(test_grad/train_n)
np.savetxt(args.fname+'/test_loss_record.txt', np.array(test_loss_record))
np.savetxt(args.fname+'/test_acc_record.txt', np.array(test_acc_record))
np.savetxt(args.fname+'/test_robust_loss_record.txt', np.array(test_robust_loss_record))
np.savetxt(args.fname+'/test_robust_acc_record.txt', np.array(test_robust_acc_record))
np.savetxt(args.fname+'/test_grad_record.txt', np.array(test_grad_record))
if args.val:
logger.info('validation %.4f \t %.4f \t %.4f \t %.4f',
val_loss/val_n, val_acc/val_n, val_robust_loss/val_n, val_robust_acc/val_n)
if val_robust_acc/val_n > best_val_robust_acc:
torch.save({
'state_dict':model.state_dict(),
'test_robust_acc':test_robust_acc/test_n,
'test_robust_loss':test_robust_loss/test_n,
'test_loss':test_loss/test_n,
'test_acc':test_acc/test_n,
'val_robust_acc':val_robust_acc/val_n,
'val_robust_loss':val_robust_loss/val_n,
'val_loss':val_loss/val_n,
'val_acc':val_acc/val_n,
}, os.path.join(args.fname, f'model_val.pth'))
best_val_robust_acc = val_robust_acc/val_n
# save checkpoint
if epoch > 99 or (epoch+1) % args.chkpt_iters == 0 or epoch+1 == epochs:
torch.save(model.state_dict(), os.path.join(args.fname, f'model_{epoch}.pth'))
torch.save(opt.state_dict(), os.path.join(args.fname, f'opt_{epoch}.pth'))
# save best
if test_robust_acc/test_n > best_test_robust_acc:
torch.save({
'state_dict':model.state_dict(),
'test_robust_acc':test_robust_acc/test_n,
'test_robust_loss':test_robust_loss/test_n,
'test_loss':test_loss/test_n,
'test_acc':test_acc/test_n,
}, os.path.join(args.fname, f'model_best.pth'))
best_test_robust_acc = test_robust_acc/test_n
else:
logger.info('%d \t %.1f \t \t %.1f \t \t %.4f \t %.4f \t %.4f \t %.4f \t \t %.4f \t \t %.4f \t %.4f \t %.4f \t \t %.4f',
epoch, train_time - start_time, test_time - train_time, -1,
-1, -1, -1, -1,
test_loss/test_n, test_acc/test_n, test_robust_loss/test_n, test_robust_acc/test_n)
return
if __name__ == "__main__":
main()
| 39,863 | 40.962105 | 208 | py |
DEAT | DEAT-main/wideresnet.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0, activation='ReLU', softplus_beta=1):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
if activation == 'ReLU':
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
print('R')
elif activation == 'Softplus':
self.relu1 = nn.Softplus(beta=softplus_beta, threshold=20)
self.relu2 = nn.Softplus(beta=softplus_beta, threshold=20)
print('S')
elif activation == 'GELU':
self.relu1 = nn.GELU()
self.relu2 = nn.GELU()
print('G')
elif activation == 'ELU':
self.relu1 = nn.ELU(alpha=1.0, inplace=True)
self.relu2 = nn.ELU(alpha=1.0, inplace=True)
print('E')
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0, activation='ReLU', softplus_beta=1):
super(NetworkBlock, self).__init__()
self.activation = activation
self.softplus_beta = softplus_beta
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate,
self.activation, self.softplus_beta))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth=34, num_classes=10, widen_factor=10, dropRate=0.0, normalize=False, activation='ReLU', softplus_beta=1):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
self.normalize = normalize
#self.scale = scale
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate, activation=activation, softplus_beta=softplus_beta)
# 1st sub-block
self.sub_block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate, activation=activation, softplus_beta=softplus_beta)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate, activation=activation, softplus_beta=softplus_beta)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate, activation=activation, softplus_beta=softplus_beta)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
if activation == 'ReLU':
self.relu = nn.ReLU(inplace=True)
elif activation == 'Softplus':
self.relu = nn.Softplus(beta=softplus_beta, threshold=20)
elif activation == 'GELU':
self.relu = nn.GELU()
elif activation == 'ELU':
self.relu = nn.ELU(alpha=1.0, inplace=True)
print('Use activation of ' + activation)
if self.normalize:
self.fc = nn.Linear(nChannels[3], num_classes, bias = False)
else:
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear) and not self.normalize:
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
if self.normalize:
out = F.normalize(out, p=2, dim=1)
for _, module in self.fc.named_modules():
if isinstance(module, nn.Linear):
module.weight.data = F.normalize(module.weight, p=2, dim=1)
return self.fc(out) | 5,747 | 43.90625 | 141 | py |
DEAT | DEAT-main/models/shufflenetv2.py | '''ShuffleNetV2 in PyTorch.
See the paper "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups=2):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N, C, H, W = x.size()
g = self.groups
return x.view(N, g, C//g, H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W)
class SplitBlock(nn.Module):
def __init__(self, ratio):
super(SplitBlock, self).__init__()
self.ratio = ratio
def forward(self, x):
c = int(x.size(1) * self.ratio)
return x[:, :c, :, :], x[:, c:, :, :]
class BasicBlock(nn.Module):
def __init__(self, in_channels, split_ratio=0.5):
super(BasicBlock, self).__init__()
self.split = SplitBlock(split_ratio)
in_channels = int(in_channels * split_ratio)
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=1, padding=1, groups=in_channels, bias=False)
self.bn2 = nn.BatchNorm2d(in_channels)
self.conv3 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(in_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
x1, x2 = self.split(x)
out = F.relu(self.bn1(self.conv1(x2)))
out = self.bn2(self.conv2(out))
out = F.relu(self.bn3(self.conv3(out)))
out = torch.cat([x1, out], 1)
out = self.shuffle(out)
return out
class DownBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DownBlock, self).__init__()
mid_channels = out_channels // 2
# left
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=2, padding=1, groups=in_channels, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(mid_channels)
# right
self.conv3 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(mid_channels)
self.conv4 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=3, stride=2, padding=1, groups=mid_channels, bias=False)
self.bn4 = nn.BatchNorm2d(mid_channels)
self.conv5 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=1, bias=False)
self.bn5 = nn.BatchNorm2d(mid_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
# left
out1 = self.bn1(self.conv1(x))
out1 = F.relu(self.bn2(self.conv2(out1)))
# right
out2 = F.relu(self.bn3(self.conv3(x)))
out2 = self.bn4(self.conv4(out2))
out2 = F.relu(self.bn5(self.conv5(out2)))
# concat
out = torch.cat([out1, out2], 1)
out = self.shuffle(out)
return out
class ShuffleNetV2(nn.Module):
def __init__(self, net_size):
super(ShuffleNetV2, self).__init__()
out_channels = configs[net_size]['out_channels']
num_blocks = configs[net_size]['num_blocks']
self.conv1 = nn.Conv2d(3, 24, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_channels = 24
self.layer1 = self._make_layer(out_channels[0], num_blocks[0])
self.layer2 = self._make_layer(out_channels[1], num_blocks[1])
self.layer3 = self._make_layer(out_channels[2], num_blocks[2])
self.conv2 = nn.Conv2d(out_channels[2], out_channels[3],
kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels[3])
self.linear = nn.Linear(out_channels[3], 10)
def _make_layer(self, out_channels, num_blocks):
layers = [DownBlock(self.in_channels, out_channels)]
for i in range(num_blocks):
layers.append(BasicBlock(out_channels))
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
# out = F.max_pool2d(out, 3, stride=2, padding=1)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn2(self.conv2(out)))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
configs = {
0.5: {
'out_channels': (48, 96, 192, 1024),
'num_blocks': (3, 7, 3)
},
1: {
'out_channels': (116, 232, 464, 1024),
'num_blocks': (3, 7, 3)
},
1.5: {
'out_channels': (176, 352, 704, 1024),
'num_blocks': (3, 7, 3)
},
2: {
'out_channels': (224, 488, 976, 2048),
'num_blocks': (3, 7, 3)
}
}
def test():
net = ShuffleNetV2(net_size=0.5)
x = torch.randn(3, 3, 32, 32)
y = net(x)
print(y.shape)
# test()
| 5,530 | 32.932515 | 107 | py |
DEAT | DEAT-main/models/regnet.py | '''RegNet in PyTorch.
Paper: "Designing Network Design Spaces".
Reference: https://github.com/keras-team/keras-applications/blob/master/keras_applications/efficientnet.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class SE(nn.Module):
'''Squeeze-and-Excitation block.'''
def __init__(self, in_planes, se_planes):
super(SE, self).__init__()
self.se1 = nn.Conv2d(in_planes, se_planes, kernel_size=1, bias=True)
self.se2 = nn.Conv2d(se_planes, in_planes, kernel_size=1, bias=True)
def forward(self, x):
out = F.adaptive_avg_pool2d(x, (1, 1))
out = F.relu(self.se1(out))
out = self.se2(out).sigmoid()
out = x * out
return out
class Block(nn.Module):
def __init__(self, w_in, w_out, stride, group_width, bottleneck_ratio, se_ratio):
super(Block, self).__init__()
# 1x1
w_b = int(round(w_out * bottleneck_ratio))
self.conv1 = nn.Conv2d(w_in, w_b, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(w_b)
# 3x3
num_groups = w_b // group_width
self.conv2 = nn.Conv2d(w_b, w_b, kernel_size=3,
stride=stride, padding=1, groups=num_groups, bias=False)
self.bn2 = nn.BatchNorm2d(w_b)
# se
self.with_se = se_ratio > 0
if self.with_se:
w_se = int(round(w_in * se_ratio))
self.se = SE(w_b, w_se)
# 1x1
self.conv3 = nn.Conv2d(w_b, w_out, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(w_out)
self.shortcut = nn.Sequential()
if stride != 1 or w_in != w_out:
self.shortcut = nn.Sequential(
nn.Conv2d(w_in, w_out,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(w_out)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
if self.with_se:
out = self.se(out)
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class RegNet(nn.Module):
def __init__(self, cfg, num_classes=10):
super(RegNet, self).__init__()
self.cfg = cfg
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(0)
self.layer2 = self._make_layer(1)
self.layer3 = self._make_layer(2)
self.layer4 = self._make_layer(3)
self.linear = nn.Linear(self.cfg['widths'][-1], num_classes)
def _make_layer(self, idx):
depth = self.cfg['depths'][idx]
width = self.cfg['widths'][idx]
stride = self.cfg['strides'][idx]
group_width = self.cfg['group_width']
bottleneck_ratio = self.cfg['bottleneck_ratio']
se_ratio = self.cfg['se_ratio']
layers = []
for i in range(depth):
s = stride if i == 0 else 1
layers.append(Block(self.in_planes, width,
s, group_width, bottleneck_ratio, se_ratio))
self.in_planes = width
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def RegNetX_200MF():
cfg = {
'depths': [1, 1, 4, 7],
'widths': [24, 56, 152, 368],
'strides': [1, 1, 2, 2],
'group_width': 8,
'bottleneck_ratio': 1,
'se_ratio': 0,
}
return RegNet(cfg)
def RegNetX_400MF():
cfg = {
'depths': [1, 2, 7, 12],
'widths': [32, 64, 160, 384],
'strides': [1, 1, 2, 2],
'group_width': 16,
'bottleneck_ratio': 1,
'se_ratio': 0,
}
return RegNet(cfg)
def RegNetY_400MF():
cfg = {
'depths': [1, 2, 7, 12],
'widths': [32, 64, 160, 384],
'strides': [1, 1, 2, 2],
'group_width': 16,
'bottleneck_ratio': 1,
'se_ratio': 0.25,
}
return RegNet(cfg)
def test():
net = RegNetX_200MF()
print(net)
x = torch.randn(2, 3, 32, 32)
y = net(x)
print(y.shape)
if __name__ == '__main__':
test()
| 4,548 | 28.160256 | 106 | py |
DEAT | DEAT-main/models/efficientnet.py | '''EfficientNet in PyTorch.
Paper: "EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks".
Reference: https://github.com/keras-team/keras-applications/blob/master/keras_applications/efficientnet.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
def swish(x):
return x * x.sigmoid()
def drop_connect(x, drop_ratio):
keep_ratio = 1.0 - drop_ratio
mask = torch.empty([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)
mask.bernoulli_(keep_ratio)
x.div_(keep_ratio)
x.mul_(mask)
return x
class SE(nn.Module):
'''Squeeze-and-Excitation block with Swish.'''
def __init__(self, in_channels, se_channels):
super(SE, self).__init__()
self.se1 = nn.Conv2d(in_channels, se_channels,
kernel_size=1, bias=True)
self.se2 = nn.Conv2d(se_channels, in_channels,
kernel_size=1, bias=True)
def forward(self, x):
out = F.adaptive_avg_pool2d(x, (1, 1))
out = swish(self.se1(out))
out = self.se2(out).sigmoid()
out = x * out
return out
class Block(nn.Module):
'''expansion + depthwise + pointwise + squeeze-excitation'''
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
expand_ratio=1,
se_ratio=0.,
drop_rate=0.):
super(Block, self).__init__()
self.stride = stride
self.drop_rate = drop_rate
self.expand_ratio = expand_ratio
# Expansion
channels = expand_ratio * in_channels
self.conv1 = nn.Conv2d(in_channels,
channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn1 = nn.BatchNorm2d(channels)
# Depthwise conv
self.conv2 = nn.Conv2d(channels,
channels,
kernel_size=kernel_size,
stride=stride,
padding=(1 if kernel_size == 3 else 2),
groups=channels,
bias=False)
self.bn2 = nn.BatchNorm2d(channels)
# SE layers
se_channels = int(in_channels * se_ratio)
self.se = SE(channels, se_channels)
# Output
self.conv3 = nn.Conv2d(channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn3 = nn.BatchNorm2d(out_channels)
# Skip connection if in and out shapes are the same (MV-V2 style)
self.has_skip = (stride == 1) and (in_channels == out_channels)
def forward(self, x):
out = x if self.expand_ratio == 1 else swish(self.bn1(self.conv1(x)))
out = swish(self.bn2(self.conv2(out)))
out = self.se(out)
out = self.bn3(self.conv3(out))
if self.has_skip:
if self.training and self.drop_rate > 0:
out = drop_connect(out, self.drop_rate)
out = out + x
return out
class EfficientNet(nn.Module):
def __init__(self, cfg, num_classes=10):
super(EfficientNet, self).__init__()
self.cfg = cfg
self.conv1 = nn.Conv2d(3,
32,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_channels=32)
self.linear = nn.Linear(cfg['out_channels'][-1], num_classes)
def _make_layers(self, in_channels):
layers = []
cfg = [self.cfg[k] for k in ['expansion', 'out_channels', 'num_blocks', 'kernel_size',
'stride']]
b = 0
blocks = sum(self.cfg['num_blocks'])
for expansion, out_channels, num_blocks, kernel_size, stride in zip(*cfg):
strides = [stride] + [1] * (num_blocks - 1)
for stride in strides:
drop_rate = self.cfg['drop_connect_rate'] * b / blocks
layers.append(
Block(in_channels,
out_channels,
kernel_size,
stride,
expansion,
se_ratio=0.25,
drop_rate=drop_rate))
in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
out = swish(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.adaptive_avg_pool2d(out, 1)
out = out.view(out.size(0), -1)
dropout_rate = self.cfg['dropout_rate']
if self.training and dropout_rate > 0:
out = F.dropout(out, p=dropout_rate)
out = self.linear(out)
return out
def EfficientNetB0():
cfg = {
'num_blocks': [1, 2, 2, 3, 3, 4, 1],
'expansion': [1, 6, 6, 6, 6, 6, 6],
'out_channels': [16, 24, 40, 80, 112, 192, 320],
'kernel_size': [3, 3, 5, 3, 5, 5, 3],
'stride': [1, 2, 2, 2, 1, 2, 1],
'dropout_rate': 0.2,
'drop_connect_rate': 0.2,
}
return EfficientNet(cfg)
def test():
net = EfficientNetB0()
x = torch.randn(2, 3, 32, 32)
y = net(x)
print(y.shape)
if __name__ == '__main__':
test()
| 5,719 | 31.5 | 106 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.