repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/data/datasets/tdtr.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. """ Simple dataset class that wraps a list of path names """ import os import numpy as np import torch from maskrcnn_benchmark.structures.bounding_box import BoxList from maskrcnn_benchmark.structures.segmentation_mask import ( CharPolygons, SegmentationCharMask, SegmentationMask, ) from PIL import Image, ImageDraw class Tdtr(object): def __init__(self, use_charann, imgs_dir, gts_dir, transforms=None, ignore_difficult=False): self.use_charann = use_charann self.image_lists = [os.path.join(imgs_dir, img) for img in os.listdir(imgs_dir)] self.gts_dir = gts_dir self.transforms = transforms self.min_proposal_size = 2 self.char_classes = "_0123456789abcdefghijklmnopqrstuvwxyz" self.vis = False self.ignore_difficult = ignore_difficult if self.ignore_difficult and (self.gts_dir is not None) and 'train' in self.gts_dir: self.image_lists = self.filter_image_lists() def filter_image_lists(self): new_image_lists = [] for img_path in self.image_lists: has_positive = False im_name = os.path.basename(img_path) gt_path = os.path.join(self.gts_dir, im_name + ".txt") if not os.path.isfile(gt_path): gt_path = os.path.join( self.gts_dir, "gt_" + im_name.split(".")[0] + ".txt" ) lines = open(gt_path, 'r').readlines() for line in lines: charbbs = [] strs, loc = self.line2boxes(line) word = strs[0] if word == "1": continue else: has_positive = True if has_positive: new_image_lists.append(img_path) return new_image_lists def __getitem__(self, item): im_name = os.path.basename(self.image_lists[item]) # print(self.image_lists[item]) img = Image.open(self.image_lists[item]).convert("RGB") width, height = img.size if self.gts_dir is not None: gt_path = os.path.join(self.gts_dir, im_name + ".txt") words, boxes, charsbbs, segmentations, labels = self.load_gt_from_txt( gt_path, height, width ) if words[0] == "": use_char_ann = False else: use_char_ann = True if not self.use_charann: use_char_ann = False target = BoxList( boxes[:, :4], img.size, mode="xyxy", use_char_ann=use_char_ann ) if self.ignore_difficult: labels = torch.from_numpy(np.array(labels)) else: labels = torch.ones(len(boxes)) target.add_field("labels", labels) masks = SegmentationMask(segmentations, img.size) target.add_field("masks", masks) char_masks = SegmentationCharMask( charsbbs, words=words, use_char_ann=use_char_ann, size=img.size, char_num_classes=len(self.char_classes) ) target.add_field("char_masks", char_masks) else: target = None if self.transforms is not None: img, target = self.transforms(img, target) if self.vis: new_im = img.numpy().copy().transpose([1, 2, 0]) + [ 102.9801, 115.9465, 122.7717, ] new_im = Image.fromarray(new_im.astype(np.uint8)).convert("RGB") mask = target.extra_fields["masks"].polygons[0].convert("mask") mask = Image.fromarray((mask.numpy() * 255).astype(np.uint8)).convert("RGB") if self.use_charann: m, _ = ( target.extra_fields["char_masks"] .chars_boxes[0] .convert("char_mask") ) color = self.creat_color_map(37, 255) color_map = color[m.numpy().astype(np.uint8)] char = Image.fromarray(color_map.astype(np.uint8)).convert("RGB") char = Image.blend(char, new_im, 0.5) else: char = new_im new = Image.blend(char, mask, 0.5) img_draw = ImageDraw.Draw(new) for box in target.bbox.numpy(): box = list(box) box = box[:2] + [box[2], box[1]] + box[2:] + [box[0], box[3]] + box[:2] img_draw.line(box, fill=(255, 0, 0), width=2) new.save("./vis/char_" + im_name) return img, target, self.image_lists[item] def creat_color_map(self, n_class, width): splits = int(np.ceil(np.power((n_class * 1.0), 1.0 / 3))) maps = [] for i in range(splits): r = int(i * width * 1.0 / (splits - 1)) for j in range(splits): g = int(j * width * 1.0 / (splits - 1)) for k in range(splits - 1): b = int(k * width * 1.0 / (splits - 1)) maps.append([r, g, b]) return np.array(maps) def __len__(self): return len(self.image_lists) # def load_gt_from_txt(self, gt_path, height=None, width=None): # words, boxes, charsboxes, segmentations, labels = [], [], [], [], [] # lines = open(gt_path).readlines() # for line in lines: # charbbs = [] # strs, loc = self.line2boxes(line) # word = strs[0] # if word == "###": # labels.append(-1) # continue # else: # labels.append(1) # rect = list(loc[0]) # min_x = min(rect[::2]) - 1 # min_y = min(rect[1::2]) - 1 # max_x = max(rect[::2]) - 1 # max_y = max(rect[1::2]) - 1 # box = [min_x, min_y, max_x, max_y] # segmentations.append([loc[0, :]]) # tindex = len(boxes) # boxes.append(box) # words.append(word) # c_class = self.char2num(strs[1:]) # charbb = np.zeros((10,), dtype=np.float32) # if loc.shape[0] > 1: # for i in range(1, loc.shape[0]): # charbb[:8] = loc[i, :] # charbb[8] = c_class[i - 1] # charbb[9] = tindex # charbbs.append(charbb.copy()) # charsboxes.append(charbbs) # num_boxes = len(boxes) # if len(boxes) > 0: # keep_boxes = np.zeros((num_boxes, 5)) # keep_boxes[:, :4] = np.array(boxes) # keep_boxes[:, 4] = range( # num_boxes # ) # the 5th column is the box label,same as the 10th column of all charsboxes which belong to the box # if self.use_charann: # return words, np.array(keep_boxes), charsboxes, segmentations, labels # else: # charbbs = np.zeros((10,), dtype=np.float32) # for i in range(len(words)): # charsboxes.append([charbbs]) # return words, np.array(keep_boxes), charsboxes, segmentations, labels # else: # words.append("") # charbbs = np.zeros((10,), dtype=np.float32) # return ( # words, # np.zeros((1, 5), dtype=np.float32), # [[charbbs]], # [[np.zeros((8,), dtype=np.float32)]], # labels # ) def load_gt_from_txt(self, gt_path, height=None, width=None): words, boxes, charsboxes, segmentations, labels = [], [], [], [], [] lines = open(gt_path).readlines() for line in lines: charbbs = [] strs, loc = self.line2boxes(line) word = strs[0] if self.ignore_difficult: rect = list(loc[0]) min_x = min(rect[::2]) - 1 min_y = min(rect[1::2]) - 1 max_x = max(rect[::2]) - 1 max_y = max(rect[1::2]) - 1 box = [min_x, min_y, max_x, max_y] # segmentations.append([loc[0, :]]) segmentations.append([[min_x, min_y, max_x, min_y, max_x, max_y, min_x, max_y]]) tindex = len(boxes) boxes.append(box) # words.append(word) if word =='1': labels.append(-1) else: labels.append(1) charbb = np.zeros((10,), dtype=np.float32) if loc.shape[0] > 1: for i in range(1, loc.shape[0]): charbb[9] = tindex charbbs.append(charbb.copy()) charsboxes.append(charbbs) else: continue num_boxes = len(boxes) if len(boxes) > 0: keep_boxes = np.zeros((num_boxes, 5)) keep_boxes[:, :4] = np.array(boxes) keep_boxes[:, 4] = range( num_boxes ) # the 5th column is the box label, # same as the 10th column of all charsboxes which belong to the box if self.use_charann: return words, np.array(keep_boxes), charsboxes, segmentations, labels else: charbbs = np.zeros((10,), dtype=np.float32) if len(charsboxes) == 0: for _ in range(len(words)): charsboxes.append([charbbs]) return words, np.array(keep_boxes), charsboxes, segmentations, labels else: words.append("") charbbs = np.zeros((10,), dtype=np.float32) return ( words, np.zeros((1, 5), dtype=np.float32), [[charbbs]], [[np.zeros((8,), dtype=np.float32)]], [1] ) def line2boxes(self, line): parts = line.strip().split(",") return [parts[-1]], np.array([[float(x) for x in parts[:-1]]]) def check_charbbs(self, charbbs): xmins = np.minimum.reduce( [charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]] ) xmaxs = np.maximum.reduce( [charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]] ) ymins = np.minimum.reduce( [charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]] ) ymaxs = np.maximum.reduce( [charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]] ) return np.logical_and( xmaxs - xmins > self.min_proposal_size, ymaxs - ymins > self.min_proposal_size, ) def check_charbb(self, charbb): xmins = min(charbb[0], charbb[2], charbb[4], charbb[6]) xmaxs = max(charbb[0], charbb[2], charbb[4], charbb[6]) ymins = min(charbb[1], charbb[3], charbb[5], charbb[7]) ymaxs = max(charbb[1], charbb[3], charbb[5], charbb[7]) return ( xmaxs - xmins > self.min_proposal_size and ymaxs - ymins > self.min_proposal_size ) def char2num(self, chars): ## chars ['h', 'e', 'l', 'l', 'o'] nums = [self.char_classes.index(c.lower()) for c in chars] return nums def get_img_info(self, item): """ Return the image dimensions for the image, without loading and pre-processing it """ im_name = os.path.basename(self.image_lists[item]) img = Image.open(self.image_lists[item]) width, height = img.size img_info = {"im_name": im_name, "height": height, "width": width} return img_info
11,925
39.020134
120
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/data/datasets/concat_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import bisect import numpy as np from torch.utils.data.dataset import ConcatDataset as _ConcatDataset class ConcatDataset(_ConcatDataset): """ Same as torch.utils.data.dataset.ConcatDataset, but exposes an extra method for querying the sizes of the image """ def get_idxs(self, idx): dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) if dataset_idx == 0: sample_idx = idx else: sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] return dataset_idx, sample_idx def get_img_info(self, idx): dataset_idx, sample_idx = self.get_idxs(idx) return self.datasets[dataset_idx].get_img_info(sample_idx) class MixDataset(object): def __init__(self, datasets, ratios): self.datasets = datasets self.ratios = ratios self.lengths = [] for dataset in self.datasets: self.lengths.append(len(dataset)) self.lengths = np.array(self.lengths) self.seperate_inds = [] s = 0 for i in self.ratios[:-1]: s += i self.seperate_inds.append(s) def __len__(self): return self.lengths.sum() def __getitem__(self, item): i = np.random.rand() ind = bisect.bisect_right(self.seperate_inds, i) b_ind = np.random.randint(self.lengths[ind]) return self.datasets[ind][b_ind]
1,498
26.254545
72
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/data/datasets/utils.py
#!/usr/bin/env python3 import os import shlex import shutil import subprocess def extract_archive(dataset_archive, tmp_data_path): if not os.path.isfile(dataset_archive): return False dataset_ext = os.path.splitext(dataset_archive)[1] if dataset_ext != ".gz" and dataset_ext != ".tar": return False if os.path.isdir(tmp_data_path): shutil.rmtree(tmp_data_path, ignore_errors=True) os.makedirs(tmp_data_path) if dataset_ext == ".gz": tar_opt = "-xzf" else: tar_opt = "-xf" extract_cmd = ("tar {} {} -C {}").format(tar_opt, dataset_archive, tmp_data_path) subprocess.call(shlex.split(extract_cmd)) return True def tar_file(tar_path, tmp_path): tar_name = tar_path.split('/')[-1] if extract_archive(tar_path, tmp_path): print('extract ' + tar_name + 'successfully!') else: print("fail to extract " + tar_name)
926
22.769231
85
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/data/datasets/synthtext.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. """ Simple dataset class that wraps a list of path names """ import os import numpy as np import torch from maskrcnn_benchmark.structures.bounding_box import BoxList from maskrcnn_benchmark.structures.segmentation_mask import ( SegmentationCharMask, SegmentationMask, ) from PIL import Image, ImageDraw class SynthtextDataset(object): def __init__(self, use_charann, list_file_path, imgs_dir, gts_dir, transforms=None, ignore_difficult=False): self.use_charann = use_charann with open(list_file_path, "r") as list_file: image_lines = list_file.readlines() self.image_lists = [ os.path.join(imgs_dir, line.strip()) for line in image_lines ] self.gt_lists = [ os.path.join(gts_dir, line.strip() + ".txt") for line in image_lines ] self.filtered_gts = [] self.transforms = transforms self.min_proposal_size = 2 self.char_classes = "_0123456789abcdefghijklmnopqrstuvwxyz" self.vis = False self.ignore_difficult = ignore_difficult def __getitem__(self, item): while True: img_path = self.image_lists[item] try: img = Image.open(img_path).convert("RGB") break except BaseException: item += 1 im_name = os.path.basename(img_path) width, height = img.size gt_path = self.gt_lists[item] words, boxes, charsbbs, segmentations = self.load_gt_from_txt( gt_path, height, width ) target = BoxList( boxes[:, :4], img.size, mode="xyxy", use_char_ann=self.use_charann ) classes = torch.ones(len(boxes)) target.add_field("labels", classes) masks = SegmentationMask(segmentations, img.size) target.add_field("masks", masks) if words[0] == "": use_char_ann = False else: use_char_ann = True if not self.use_charann: use_char_ann = False char_masks = SegmentationCharMask( charsbbs, words=words, use_char_ann=use_char_ann, size=img.size, char_num_classes=len(self.char_classes) ) target.add_field("char_masks", char_masks) if self.transforms is not None: img, target = self.transforms(img, target) if self.vis: new_im = img.numpy().copy().transpose([1, 2, 0]) + [ 102.9801, 115.9465, 122.7717, ] new_im = Image.fromarray(new_im.astype(np.uint8)).convert("RGB") mask = target.extra_fields["masks"].polygons[0].convert("mask") mask = Image.fromarray((mask.numpy() * 255).astype(np.uint8)).convert("RGB") if self.use_charann: m, _ = ( target.extra_fields["char_masks"] .chars_boxes[0] .convert("char_mask") ) color = self.creat_color_map(37, 255) color_map = color[m.numpy().astype(np.uint8)] char = Image.fromarray(color_map.astype(np.uint8)).convert("RGB") char = Image.blend(char, new_im, 0.5) else: char = new_im new = Image.blend(char, mask, 0.5) img_draw = ImageDraw.Draw(new) for box in target.bbox.numpy(): box = list(box) box = box[:2] + [box[2], box[1]] + box[2:] + [box[0], box[3]] + box[:2] img_draw.line(box, fill=(255, 0, 0), width=2) new.save("./vis/char_" + im_name) return img, target, self.image_lists[item] def creat_color_map(self, n_class, width): splits = int(np.ceil(np.power((n_class * 1.0), 1.0 / 3))) maps = [] for i in range(splits): r = int(i * width * 1.0 / (splits - 1)) for j in range(splits): g = int(j * width * 1.0 / (splits - 1)) for k in range(splits - 1): b = int(k * width * 1.0 / (splits - 1)) maps.append([r, g, b]) return np.array(maps) def __len__(self): return len(self.image_lists) def load_gt_from_txt(self, gt_path, height=None, width=None): words, boxes, charsboxes, segmentations = [], [], [], [] lines = open(gt_path).readlines() for line in lines: charbbs = [] strs, loc = self.line2boxes(line) word = strs[0] if word == "###": continue else: rect = list(loc[0]) min_x = min(rect[::2]) - 1 min_y = min(rect[1::2]) - 1 max_x = max(rect[::2]) - 1 max_y = max(rect[1::2]) - 1 box = [min_x, min_y, max_x, max_y] segmentations.append([loc[0, :]]) tindex = len(boxes) boxes.append(box) words.append(word) c_class = self.char2num(strs[1:]) charbb = np.zeros((10,), dtype=np.float32) if loc.shape[0] > 1: for i in range(1, loc.shape[0]): charbb[:8] = loc[i, :] charbb[8] = c_class[i - 1] charbb[9] = tindex charbbs.append(charbb.copy()) else: charbbs.append(charbb.copy()) charsboxes.append(charbbs) num_boxes = len(boxes) if len(boxes) > 0: keep_boxes = np.zeros((num_boxes, 5)) keep_boxes[:, :4] = np.array(boxes) keep_boxes[:, 4] = range( num_boxes ) # the 5th column is the box label, # same as the 10th column of all charsboxes which belong to the box if self.use_charann: return words, np.array(keep_boxes), charsboxes, segmentations else: charbbs = np.zeros((10,), dtype=np.float32) for _ in range(len(words)): charsboxes.append([charbbs]) return words, np.array(keep_boxes), [[charbbs]], segmentations else: words.append("") charbbs = np.zeros((10,), dtype=np.float32) return ( words, np.zeros((1, 5), dtype=np.float32), [[charbbs]], [[np.zeros((8,), dtype=np.float32)]], ) def line2boxes(self, line): parts = line.strip().split(",") if "\xef\xbb\xbf" in parts[0]: parts[0] = parts[0][3:] if "\ufeff" in parts[0]: parts[0] = parts[0].replace("\ufeff", "") x1 = np.array([int(float(x)) for x in parts[::9]]) y1 = np.array([int(float(x)) for x in parts[1::9]]) x2 = np.array([int(float(x)) for x in parts[2::9]]) y2 = np.array([int(float(x)) for x in parts[3::9]]) x3 = np.array([int(float(x)) for x in parts[4::9]]) y3 = np.array([int(float(x)) for x in parts[5::9]]) x4 = np.array([int(float(x)) for x in parts[6::9]]) y4 = np.array([int(float(x)) for x in parts[7::9]]) strs = parts[8::9] loc = np.vstack((x1, y1, x2, y2, x3, y3, x4, y4)).transpose() return strs, loc def check_charbbs(self, charbbs): xmins = np.minimum.reduce( [charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]] ) xmaxs = np.maximum.reduce( [charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]] ) ymins = np.minimum.reduce( [charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]] ) ymaxs = np.maximum.reduce( [charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]] ) return np.logical_and( xmaxs - xmins > self.min_proposal_size, ymaxs - ymins > self.min_proposal_size, ) def check_charbb(self, charbb): xmins = min(charbb[0], charbb[2], charbb[4], charbb[6]) xmaxs = max(charbb[0], charbb[2], charbb[4], charbb[6]) ymins = min(charbb[1], charbb[3], charbb[5], charbb[7]) ymaxs = max(charbb[1], charbb[3], charbb[5], charbb[7]) return ( xmaxs - xmins > self.min_proposal_size and ymaxs - ymins > self.min_proposal_size ) def char2num(self, chars): ## chars ['h', 'e', 'l', 'l', 'o'] nums = [self.char_classes.index(c.lower()) for c in chars] return nums def get_img_info(self, item): """ Return the image dimensions for the image, without loading and pre-processing it """ im_name = os.path.basename(self.image_lists[item]) img = Image.open(self.image_lists[item]) width, height = img.size img_info = {"im_name": im_name, "height": height, "width": width} return img_info
9,096
38.042918
116
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/data/datasets/scut.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. """ Simple dataset class that wraps a list of path names """ import os import numpy as np import torch from maskrcnn_benchmark.structures.bounding_box import BoxList from maskrcnn_benchmark.structures.segmentation_mask import ( CharPolygons, SegmentationCharMask, SegmentationMask, ) from PIL import Image, ImageDraw, ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True class ScutDataset(object): def __init__(self, use_charann, imgs_dir, gts_dir, transforms=None, ignore_difficult=False): self.use_charann = use_charann self.image_lists = [os.path.join(imgs_dir, img) for img in os.listdir(imgs_dir)] self.gts_dir = gts_dir self.transforms = transforms self.min_proposal_size = 2 self.char_classes = "_0123456789abcdefghijklmnopqrstuvwxyz" self.vis = False self.ignore_difficult = ignore_difficult if self.ignore_difficult and 'train' in self.gts_dir: self.image_lists = self.filter_image_lists() def filter_image_lists(self): new_image_lists = [] for img_path in self.image_lists: has_positive = False im_name = os.path.basename(img_path) gt_path = os.path.join(self.gts_dir, im_name + ".txt") if not os.path.isfile(gt_path): gt_path = os.path.join( self.gts_dir, "gt_" + im_name.split(".")[0] + ".txt" ) lines = open(gt_path, 'r').readlines() for line in lines: charbbs = [] strs, loc = self.line2boxes(line) word = strs[0] if word == "###": continue else: has_positive = True if has_positive: new_image_lists.append(img_path) return new_image_lists def __getitem__(self, item): im_name = os.path.basename(self.image_lists[item]) # print(self.image_lists[item]) img = Image.open(self.image_lists[item]).convert("RGB") width, height = img.size gt_path = os.path.join(self.gts_dir, im_name + ".txt") words, boxes, charsbbs, segmentations, labels = self.load_gt_from_txt( gt_path, height, width ) if words[0] == "": use_char_ann = False else: use_char_ann = True if not self.use_charann: use_char_ann = False target = BoxList(boxes[:, :4], img.size, mode="xyxy", use_char_ann=use_char_ann) if self.ignore_difficult: labels = torch.from_numpy(np.array(labels)) else: labels = torch.ones(len(boxes)) target.add_field("labels", labels) masks = SegmentationMask(segmentations, img.size) target.add_field("masks", masks) char_masks = SegmentationCharMask( charsbbs, words=words, use_char_ann=use_char_ann, size=img.size, char_num_classes=len(self.char_classes) ) target.add_field("char_masks", char_masks) if self.transforms is not None: img, target = self.transforms(img, target) if self.vis: new_im = img.numpy().copy().transpose([1, 2, 0]) + [ 102.9801, 115.9465, 122.7717, ] new_im = Image.fromarray(new_im.astype(np.uint8)).convert("RGB") mask = target.extra_fields["masks"].polygons[0].convert("mask") mask = Image.fromarray((mask.numpy() * 255).astype(np.uint8)).convert("RGB") if self.use_charann: m, _ = ( target.extra_fields["char_masks"] .chars_boxes[0] .convert("char_mask") ) color = self.creat_color_map(37, 255) color_map = color[m.numpy().astype(np.uint8)] char = Image.fromarray(color_map.astype(np.uint8)).convert("RGB") char = Image.blend(char, new_im, 0.5) else: char = new_im new = Image.blend(char, mask, 0.5) img_draw = ImageDraw.Draw(new) for box in target.bbox.numpy(): box = list(box) box = box[:2] + [box[2], box[1]] + box[2:] + [box[0], box[3]] + box[:2] img_draw.line(box, fill=(255, 0, 0), width=2) new.save("./vis/char_" + im_name) return img, target, self.image_lists[item] def creat_color_map(self, n_class, width): splits = int(np.ceil(np.power((n_class * 1.0), 1.0 / 3))) maps = [] for i in range(splits): r = int(i * width * 1.0 / (splits - 1)) for j in range(splits): g = int(j * width * 1.0 / (splits - 1)) for k in range(splits - 1): b = int(k * width * 1.0 / (splits - 1)) maps.append([r, g, b]) return np.array(maps) def __len__(self): return len(self.image_lists) # def load_gt_from_txt(self, gt_path, height=None, width=None): # words, boxes, charsboxes, segmentations, labels = [], [], [], [], [] # lines = open(gt_path).readlines() # for line in lines: # charbbs = [] # strs, loc = self.line2boxes(line) # word = strs[0] # if word == "###": # labels.append(-1) # continue # else: # labels.append(1) # rect = list(loc[0]) # min_x = min(rect[::2]) - 1 # min_y = min(rect[1::2]) - 1 # max_x = max(rect[::2]) - 1 # max_y = max(rect[1::2]) - 1 # box = [min_x, min_y, max_x, max_y] # segmentations.append([loc[0, :]]) # tindex = len(boxes) # boxes.append(box) # words.append(word) # c_class = self.char2num(strs[1:]) # charbb = np.zeros((10,), dtype=np.float32) # if loc.shape[0] > 1: # for i in range(1, loc.shape[0]): # charbb[:8] = loc[i, :] # charbb[8] = c_class[i - 1] # charbb[9] = tindex # charbbs.append(charbb.copy()) # charsboxes.append(charbbs) # num_boxes = len(boxes) # if len(boxes) > 0: # keep_boxes = np.zeros((num_boxes, 5)) # keep_boxes[:, :4] = np.array(boxes) # keep_boxes[:, 4] = range( # num_boxes # ) # the 5th column is the box label,same as the 10th column of all charsboxes which belong to the box # if self.use_charann: # return words, np.array(keep_boxes), charsboxes, segmentations, labels # else: # charbbs = np.zeros((10,), dtype=np.float32) # if len(charsboxes) == 0: # for i in range(len(words)): # charsboxes.append([charbbs]) # return words, np.array(keep_boxes), charsboxes, segmentations, labels # else: # words.append("") # charbbs = np.zeros((10,), dtype=np.float32) # return ( # words, # np.zeros((1, 5), dtype=np.float32), # [[charbbs]], # [[np.zeros((8,), dtype=np.float32)]], # labels # ) def load_gt_from_txt(self, gt_path, height=None, width=None): words, boxes, charsboxes, segmentations, labels = [], [], [], [], [] lines = open(gt_path).readlines() for line in lines: charbbs = [] strs, loc = self.line2boxes(line) word = strs[0] if word == "###": if self.ignore_difficult: rect = list(loc[0]) min_x = min(rect[::2]) - 1 min_y = min(rect[1::2]) - 1 max_x = max(rect[::2]) - 1 max_y = max(rect[1::2]) - 1 box = [min_x, min_y, max_x, max_y] segmentations.append([loc[0, :]]) tindex = len(boxes) boxes.append(box) words.append(word) labels.append(-1) charbbs = np.zeros((10,), dtype=np.float32) if loc.shape[0] > 1: for i in range(1, loc.shape[0]): charbb[9] = tindex charbbs.append(charbb.copy()) charsboxes.append(charbbs) else: continue else: rect = list(loc[0]) min_x = min(rect[::2]) - 1 min_y = min(rect[1::2]) - 1 max_x = max(rect[::2]) - 1 max_y = max(rect[1::2]) - 1 box = [min_x, min_y, max_x, max_y] segmentations.append([loc[0, :]]) tindex = len(boxes) boxes.append(box) words.append(word) labels.append(1) c_class = self.char2num(strs[1:]) charbb = np.zeros((10,), dtype=np.float32) if loc.shape[0] > 1: for i in range(1, loc.shape[0]): charbb[:8] = loc[i, :] charbb[8] = c_class[i - 1] charbb[9] = tindex charbbs.append(charbb.copy()) charsboxes.append(charbbs) num_boxes = len(boxes) if len(boxes) > 0: keep_boxes = np.zeros((num_boxes, 5)) keep_boxes[:, :4] = np.array(boxes) keep_boxes[:, 4] = range( num_boxes ) # the 5th column is the box label, # same as the 10th column of all charsboxes which belong to the box if self.use_charann: return words, np.array(keep_boxes), charsboxes, segmentations, labels else: charbbs = np.zeros((10,), dtype=np.float32) if len(charsboxes) == 0: for _ in range(len(words)): charsboxes.append([charbbs]) return words, np.array(keep_boxes), charsboxes, segmentations, labels else: words.append("") charbbs = np.zeros((10,), dtype=np.float32) return ( words, np.zeros((1, 5), dtype=np.float32), [[charbbs]], [[np.zeros((8,), dtype=np.float32)]], [1] ) def line2boxes(self, line): parts = line.strip().split(",") if "\xef\xbb\xbf" in parts[0]: parts[0] = parts[0][3:] if "\ufeff" in parts[0]: parts[0] = parts[0].replace("\ufeff", "") x1 = np.array([int(float(x)) for x in parts[::9]]) y1 = np.array([int(float(x)) for x in parts[1::9]]) x2 = np.array([int(float(x)) for x in parts[2::9]]) y2 = np.array([int(float(x)) for x in parts[3::9]]) x3 = np.array([int(float(x)) for x in parts[4::9]]) y3 = np.array([int(float(x)) for x in parts[5::9]]) x4 = np.array([int(float(x)) for x in parts[6::9]]) y4 = np.array([int(float(x)) for x in parts[7::9]]) strs = parts[8::9] loc = np.vstack((x1, y1, x2, y2, x3, y3, x4, y4)).transpose() return strs, loc def check_charbbs(self, charbbs): xmins = np.minimum.reduce( [charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]] ) xmaxs = np.maximum.reduce( [charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]] ) ymins = np.minimum.reduce( [charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]] ) ymaxs = np.maximum.reduce( [charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]] ) return np.logical_and( xmaxs - xmins > self.min_proposal_size, ymaxs - ymins > self.min_proposal_size, ) def check_charbb(self, charbb): xmins = min(charbb[0], charbb[2], charbb[4], charbb[6]) xmaxs = max(charbb[0], charbb[2], charbb[4], charbb[6]) ymins = min(charbb[1], charbb[3], charbb[5], charbb[7]) ymaxs = max(charbb[1], charbb[3], charbb[5], charbb[7]) return ( xmaxs - xmins > self.min_proposal_size and ymaxs - ymins > self.min_proposal_size ) def char2num(self, chars): ## chars ['h', 'e', 'l', 'l', 'o'] nums = [self.char_classes.index(c.lower()) for c in chars] return nums def get_img_info(self, item): """ Return the image dimensions for the image, without loading and pre-processing it """ im_name = os.path.basename(self.image_lists[item]) img = Image.open(self.image_lists[item]) width, height = img.size img_info = {"im_name": im_name, "height": height, "width": width} return img_info
13,327
39.510638
116
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/data/datasets/icdar.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. """ Simple dataset class that wraps a list of path names """ import os import numpy as np import torch from maskrcnn_benchmark.structures.bounding_box import BoxList from maskrcnn_benchmark.structures.segmentation_mask import ( SegmentationCharMask, SegmentationMask, ) from PIL import Image, ImageDraw class IcdarDataset(object): def __init__(self, use_charann, imgs_dir, gts_dir, transforms=None, ignore_difficult=False): self.use_charann = use_charann self.image_lists = [os.path.join(imgs_dir, img) for img in os.listdir(imgs_dir)] self.gts_dir = gts_dir self.transforms = transforms self.min_proposal_size = 2 self.char_classes = "_0123456789abcdefghijklmnopqrstuvwxyz" self.vis = False self.ignore_difficult = ignore_difficult if self.ignore_difficult and self.gts_dir is not None and 'train' in self.gts_dir: self.image_lists = self.filter_image_lists() def filter_image_lists(self): new_image_lists = [] for img_path in self.image_lists: has_positive = False im_name = os.path.basename(img_path) gt_path = os.path.join(self.gts_dir, im_name + ".txt") if not os.path.isfile(gt_path): gt_path = os.path.join( self.gts_dir, "gt_" + im_name.split(".")[0] + ".txt" ) lines = open(gt_path, 'r').readlines() for line in lines: charbbs = [] strs, loc = self.line2boxes(line) word = strs[0] if word == "###": continue else: has_positive = True if has_positive: new_image_lists.append(img_path) return new_image_lists def __getitem__(self, item): im_name = os.path.basename(self.image_lists[item]) img = Image.open(self.image_lists[item]).convert("RGB") width, height = img.size if self.gts_dir is not None: gt_path = os.path.join(self.gts_dir, im_name + ".txt") if not os.path.isfile(gt_path): gt_path = os.path.join( self.gts_dir, "gt_" + im_name.split(".")[0] + ".txt" ) words, boxes, charsbbs, segmentations, labels = self.load_gt_from_txt( gt_path, height, width ) target = BoxList( boxes[:, :4], img.size, mode="xyxy", use_char_ann=self.use_charann ) if self.ignore_difficult: labels = torch.from_numpy(np.array(labels)) else: labels = torch.ones(len(boxes)) target.add_field("labels", labels) masks = SegmentationMask(segmentations, img.size) target.add_field("masks", masks) if words[0] == "": use_char_ann = False else: use_char_ann = True if not self.use_charann: use_char_ann = False char_masks = SegmentationCharMask( charsbbs, words=words, use_char_ann=use_char_ann, size=img.size, char_num_classes=len(self.char_classes) ) target.add_field("char_masks", char_masks) else: target = None if self.transforms is not None: img, target = self.transforms(img, target) if self.vis: new_im = img.numpy().copy().transpose([1, 2, 0]) + [ 102.9801, 115.9465, 122.7717, ] new_im = Image.fromarray(new_im.astype(np.uint8)).convert("RGB") mask = target.extra_fields["masks"].polygons[0].convert("mask") mask = Image.fromarray((mask.numpy() * 255).astype(np.uint8)).convert("RGB") if self.use_charann: m, _ = ( target.extra_fields["char_masks"] .chars_boxes[0] .convert("char_mask") ) color = self.creat_color_map(37, 255) color_map = color[m.numpy().astype(np.uint8)] char = Image.fromarray(color_map.astype(np.uint8)).convert("RGB") char = Image.blend(char, new_im, 0.5) else: char = new_im new = Image.blend(char, mask, 0.5) img_draw = ImageDraw.Draw(new) for box in target.bbox.numpy(): box = list(box) box = box[:2] + [box[2], box[1]] + box[2:] + [box[0], box[3]] + box[:2] img_draw.line(box, fill=(255, 0, 0), width=2) new.save("./vis/char_" + im_name) return img, target, self.image_lists[item] def creat_color_map(self, n_class, width): splits = int(np.ceil(np.power((n_class * 1.0), 1.0 / 3))) maps = [] for i in range(splits): r = int(i * width * 1.0 / (splits - 1)) for j in range(splits): g = int(j * width * 1.0 / (splits - 1)) for k in range(splits - 1): b = int(k * width * 1.0 / (splits - 1)) maps.append([r, g, b]) return np.array(maps) def __len__(self): return len(self.image_lists) def load_gt_from_txt(self, gt_path, height=None, width=None): words, boxes, charsboxes, segmentations, labels = [], [], [], [], [] lines = open(gt_path).readlines() for line in lines: charbbs = [] strs, loc = self.line2boxes(line) word = strs[0] if word == "###": if self.ignore_difficult: rect = list(loc[0]) min_x = min(rect[::2]) - 1 min_y = min(rect[1::2]) - 1 max_x = max(rect[::2]) - 1 max_y = max(rect[1::2]) - 1 box = [min_x, min_y, max_x, max_y] segmentations.append([loc[0, :]]) tindex = len(boxes) boxes.append(box) words.append(word) labels.append(-1) charbbs = np.zeros((10,), dtype=np.float32) if loc.shape[0] > 1: for i in range(1, loc.shape[0]): charbb[9] = tindex charbbs.append(charbb.copy()) charsboxes.append(charbbs) else: continue else: rect = list(loc[0]) min_x = min(rect[::2]) - 1 min_y = min(rect[1::2]) - 1 max_x = max(rect[::2]) - 1 max_y = max(rect[1::2]) - 1 box = [min_x, min_y, max_x, max_y] segmentations.append([loc[0, :]]) tindex = len(boxes) boxes.append(box) words.append(word) labels.append(1) c_class = self.char2num(strs[1:]) charbb = np.zeros((10,), dtype=np.float32) if loc.shape[0] > 1: for i in range(1, loc.shape[0]): charbb[:8] = loc[i, :] charbb[8] = c_class[i - 1] charbb[9] = tindex charbbs.append(charbb.copy()) charsboxes.append(charbbs) num_boxes = len(boxes) if len(boxes) > 0: keep_boxes = np.zeros((num_boxes, 5)) keep_boxes[:, :4] = np.array(boxes) keep_boxes[:, 4] = range( num_boxes ) # the 5th column is the box label, # same as the 10th column of all charsboxes which belong to the box if self.use_charann: return words, np.array(keep_boxes), charsboxes, segmentations, labels else: charbbs = np.zeros((10,), dtype=np.float32) if len(charsboxes) == 0: for _ in range(len(words)): charsboxes.append([charbbs]) return words, np.array(keep_boxes), charsboxes, segmentations, labels else: words.append("") charbbs = np.zeros((10,), dtype=np.float32) return ( words, np.zeros((1, 5), dtype=np.float32), [[charbbs]], [[np.zeros((8,), dtype=np.float32)]], [1] ) def line2boxes(self, line): parts = line.strip().split(",") if "\xef\xbb\xbf" in parts[0]: parts[0] = parts[0][3:] if "\ufeff" in parts[0]: parts[0] = parts[0].replace("\ufeff", "") x1 = np.array([int(float(x)) for x in parts[::9]]) y1 = np.array([int(float(x)) for x in parts[1::9]]) x2 = np.array([int(float(x)) for x in parts[2::9]]) y2 = np.array([int(float(x)) for x in parts[3::9]]) x3 = np.array([int(float(x)) for x in parts[4::9]]) y3 = np.array([int(float(x)) for x in parts[5::9]]) x4 = np.array([int(float(x)) for x in parts[6::9]]) y4 = np.array([int(float(x)) for x in parts[7::9]]) strs = parts[8::9] loc = np.vstack((x1, y1, x2, y2, x3, y3, x4, y4)).transpose() return strs, loc def check_charbbs(self, charbbs): xmins = np.minimum.reduce( [charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]] ) xmaxs = np.maximum.reduce( [charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]] ) ymins = np.minimum.reduce( [charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]] ) ymaxs = np.maximum.reduce( [charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]] ) return np.logical_and( xmaxs - xmins > self.min_proposal_size, ymaxs - ymins > self.min_proposal_size, ) def check_charbb(self, charbb): xmins = min(charbb[0], charbb[2], charbb[4], charbb[6]) xmaxs = max(charbb[0], charbb[2], charbb[4], charbb[6]) ymins = min(charbb[1], charbb[3], charbb[5], charbb[7]) ymaxs = max(charbb[1], charbb[3], charbb[5], charbb[7]) return ( xmaxs - xmins > self.min_proposal_size and ymaxs - ymins > self.min_proposal_size ) def char2num(self, chars): ## chars ['h', 'e', 'l', 'l', 'o'] nums = [self.char_classes.index(c.lower()) for c in chars] return nums def get_img_info(self, item): """ Return the image dimensions for the image, without loading and pre-processing it """ im_name = os.path.basename(self.image_lists[item]) img = Image.open(self.image_lists[item]) width, height = img.size img_info = {"im_name": im_name, "height": height, "width": width} return img_info
11,125
39.458182
120
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/data/datasets/total_text.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. """ Simple dataset class that wraps a list of path names """ import os import numpy as np import torch from maskrcnn_benchmark.structures.bounding_box import BoxList from maskrcnn_benchmark.structures.segmentation_mask import ( CharPolygons, SegmentationCharMask, SegmentationMask, ) from PIL import Image, ImageDraw class TotaltextDataset(object): def __init__(self, use_charann, imgs_dir, gts_dir, transforms=None, ignore_difficult=False): self.use_charann = use_charann self.image_lists = [os.path.join(imgs_dir, img) for img in os.listdir(imgs_dir)] self.gts_dir = gts_dir self.transforms = transforms self.min_proposal_size = 2 self.char_classes = "_0123456789abcdefghijklmnopqrstuvwxyz" self.vis = False self.ignore_difficult = ignore_difficult if self.ignore_difficult and (self.gts_dir is not None) and 'train' in self.gts_dir: self.image_lists = self.filter_image_lists() def filter_image_lists(self): new_image_lists = [] for img_path in self.image_lists: has_positive = False im_name = os.path.basename(img_path) gt_path = os.path.join(self.gts_dir, im_name + ".txt") if not os.path.isfile(gt_path): gt_path = os.path.join( self.gts_dir, "gt_" + im_name.split(".")[0] + ".txt" ) lines = open(gt_path, 'r').readlines() for line in lines: charbbs = [] strs, loc = self.line2boxes(line) word = strs[0] if word == "###": continue else: has_positive = True if has_positive: new_image_lists.append(img_path) return new_image_lists def __getitem__(self, item): im_name = os.path.basename(self.image_lists[item]) # print(self.image_lists[item]) img = Image.open(self.image_lists[item]).convert("RGB") width, height = img.size if self.gts_dir is not None: gt_path = os.path.join(self.gts_dir, im_name + ".txt") words, boxes, charsbbs, segmentations, labels = self.load_gt_from_txt( gt_path, height, width ) if words[0] == "": use_char_ann = False else: use_char_ann = True if not self.use_charann: use_char_ann = False target = BoxList( boxes[:, :4], img.size, mode="xyxy", use_char_ann=use_char_ann ) if self.ignore_difficult: labels = torch.from_numpy(np.array(labels)) else: labels = torch.ones(len(boxes)) target.add_field("labels", labels) masks = SegmentationMask(segmentations, img.size) target.add_field("masks", masks) char_masks = SegmentationCharMask( charsbbs, words=words, use_char_ann=use_char_ann, size=img.size, char_num_classes=len(self.char_classes) ) target.add_field("char_masks", char_masks) else: target = None if self.transforms is not None: img, target = self.transforms(img, target) if self.vis: new_im = img.numpy().copy().transpose([1, 2, 0]) + [ 102.9801, 115.9465, 122.7717, ] new_im = Image.fromarray(new_im.astype(np.uint8)).convert("RGB") mask = target.extra_fields["masks"].polygons[0].convert("mask") mask = Image.fromarray((mask.numpy() * 255).astype(np.uint8)).convert("RGB") if self.use_charann: m, _ = ( target.extra_fields["char_masks"] .chars_boxes[0] .convert("char_mask") ) color = self.creat_color_map(37, 255) color_map = color[m.numpy().astype(np.uint8)] char = Image.fromarray(color_map.astype(np.uint8)).convert("RGB") char = Image.blend(char, new_im, 0.5) else: char = new_im new = Image.blend(char, mask, 0.5) img_draw = ImageDraw.Draw(new) for box in target.bbox.numpy(): box = list(box) box = box[:2] + [box[2], box[1]] + box[2:] + [box[0], box[3]] + box[:2] img_draw.line(box, fill=(255, 0, 0), width=2) new.save("./vis/char_" + im_name) return img, target, self.image_lists[item] def creat_color_map(self, n_class, width): splits = int(np.ceil(np.power((n_class * 1.0), 1.0 / 3))) maps = [] for i in range(splits): r = int(i * width * 1.0 / (splits - 1)) for j in range(splits): g = int(j * width * 1.0 / (splits - 1)) for k in range(splits - 1): b = int(k * width * 1.0 / (splits - 1)) maps.append([r, g, b]) return np.array(maps) def __len__(self): return len(self.image_lists) # def load_gt_from_txt(self, gt_path, height=None, width=None): # words, boxes, charsboxes, segmentations, labels = [], [], [], [], [] # lines = open(gt_path).readlines() # for line in lines: # charbbs = [] # strs, loc = self.line2boxes(line) # word = strs[0] # if word == "###": # labels.append(-1) # continue # else: # labels.append(1) # rect = list(loc[0]) # min_x = min(rect[::2]) - 1 # min_y = min(rect[1::2]) - 1 # max_x = max(rect[::2]) - 1 # max_y = max(rect[1::2]) - 1 # box = [min_x, min_y, max_x, max_y] # segmentations.append([loc[0, :]]) # tindex = len(boxes) # boxes.append(box) # words.append(word) # c_class = self.char2num(strs[1:]) # charbb = np.zeros((10,), dtype=np.float32) # if loc.shape[0] > 1: # for i in range(1, loc.shape[0]): # charbb[:8] = loc[i, :] # charbb[8] = c_class[i - 1] # charbb[9] = tindex # charbbs.append(charbb.copy()) # charsboxes.append(charbbs) # num_boxes = len(boxes) # if len(boxes) > 0: # keep_boxes = np.zeros((num_boxes, 5)) # keep_boxes[:, :4] = np.array(boxes) # keep_boxes[:, 4] = range( # num_boxes # ) # the 5th column is the box label,same as the 10th column of all charsboxes which belong to the box # if self.use_charann: # return words, np.array(keep_boxes), charsboxes, segmentations, labels # else: # charbbs = np.zeros((10,), dtype=np.float32) # for i in range(len(words)): # charsboxes.append([charbbs]) # return words, np.array(keep_boxes), charsboxes, segmentations, labels # else: # words.append("") # charbbs = np.zeros((10,), dtype=np.float32) # return ( # words, # np.zeros((1, 5), dtype=np.float32), # [[charbbs]], # [[np.zeros((8,), dtype=np.float32)]], # labels # ) def load_gt_from_txt(self, gt_path, height=None, width=None): words, boxes, charsboxes, segmentations, labels = [], [], [], [], [] lines = open(gt_path).readlines() for line in lines: charbbs = [] strs, loc = self.line2boxes(line) word = strs[0] if word == "###": if self.ignore_difficult: rect = list(loc[0]) min_x = min(rect[::2]) - 1 min_y = min(rect[1::2]) - 1 max_x = max(rect[::2]) - 1 max_y = max(rect[1::2]) - 1 box = [min_x, min_y, max_x, max_y] # segmentations.append([loc[0, :]]) segmentations.append([[min_x, min_y, max_x, min_y, max_x, max_y, min_x, max_y]]) tindex = len(boxes) boxes.append(box) words.append(word) labels.append(-1) charbbs = np.zeros((10,), dtype=np.float32) if loc.shape[0] > 1: for i in range(1, loc.shape[0]): charbb[9] = tindex charbbs.append(charbb.copy()) charsboxes.append(charbbs) else: continue else: rect = list(loc[0]) min_x = min(rect[::2]) - 1 min_y = min(rect[1::2]) - 1 max_x = max(rect[::2]) - 1 max_y = max(rect[1::2]) - 1 box = [min_x, min_y, max_x, max_y] segmentations.append([loc[0, :]]) tindex = len(boxes) boxes.append(box) words.append(word) labels.append(1) c_class = self.char2num(strs[1:]) charbb = np.zeros((10,), dtype=np.float32) if loc.shape[0] > 1: for i in range(1, loc.shape[0]): charbb[:8] = loc[i, :] charbb[8] = c_class[i - 1] charbb[9] = tindex charbbs.append(charbb.copy()) charsboxes.append(charbbs) num_boxes = len(boxes) if len(boxes) > 0: keep_boxes = np.zeros((num_boxes, 5)) keep_boxes[:, :4] = np.array(boxes) keep_boxes[:, 4] = range( num_boxes ) # the 5th column is the box label, # same as the 10th column of all charsboxes which belong to the box if self.use_charann: return words, np.array(keep_boxes), charsboxes, segmentations, labels else: charbbs = np.zeros((10,), dtype=np.float32) if len(charsboxes) == 0: for _ in range(len(words)): charsboxes.append([charbbs]) return words, np.array(keep_boxes), charsboxes, segmentations, labels else: words.append("") charbbs = np.zeros((10,), dtype=np.float32) return ( words, np.zeros((1, 5), dtype=np.float32), [[charbbs]], [[np.zeros((8,), dtype=np.float32)]], [1] ) def line2boxes(self, line): parts = line.strip().split(",") return [parts[-1]], np.array([[float(x) for x in parts[:-1]]]) def check_charbbs(self, charbbs): xmins = np.minimum.reduce( [charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]] ) xmaxs = np.maximum.reduce( [charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]] ) ymins = np.minimum.reduce( [charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]] ) ymaxs = np.maximum.reduce( [charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]] ) return np.logical_and( xmaxs - xmins > self.min_proposal_size, ymaxs - ymins > self.min_proposal_size, ) def check_charbb(self, charbb): xmins = min(charbb[0], charbb[2], charbb[4], charbb[6]) xmaxs = max(charbb[0], charbb[2], charbb[4], charbb[6]) ymins = min(charbb[1], charbb[3], charbb[5], charbb[7]) ymaxs = max(charbb[1], charbb[3], charbb[5], charbb[7]) return ( xmaxs - xmins > self.min_proposal_size and ymaxs - ymins > self.min_proposal_size ) def char2num(self, chars): ## chars ['h', 'e', 'l', 'l', 'o'] nums = [self.char_classes.index(c.lower()) for c in chars] return nums def get_img_info(self, item): """ Return the image dimensions for the image, without loading and pre-processing it """ im_name = os.path.basename(self.image_lists[item]) img = Image.open(self.image_lists[item]) width, height = img.size img_info = {"im_name": im_name, "height": height, "width": width} return img_info
12,866
39.589905
120
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/data/datasets/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from .coco import COCODataset from .concat_dataset import ConcatDataset, MixDataset from .icdar import IcdarDataset from .scut import ScutDataset from .synthtext import SynthtextDataset from .total_text import TotaltextDataset __all__ = [ "COCODataset", "ConcatDataset", "IcdarDataset", "SynthtextDataset", "MixDataset", "ScutDataset", "TotaltextDataset", ]
459
24.555556
71
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/data/datasets/coco.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch import torchvision from maskrcnn_benchmark.structures.bounding_box import BoxList from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask class COCODataset(torchvision.datasets.coco.CocoDetection): def __init__( self, ann_file, root, remove_images_without_annotations, transforms=None ): super(COCODataset, self).__init__(root, ann_file) # sort indices for reproducible results self.ids = sorted(self.ids) # filter images without detection annotations if remove_images_without_annotations: self.ids = [ img_id for img_id in self.ids if len(self.coco.getAnnIds(imgIds=img_id, iscrowd=None)) > 0 ] self.json_category_id_to_contiguous_id = { v: i + 1 for i, v in enumerate(self.coco.getCatIds()) } self.contiguous_category_id_to_json_id = { v: k for k, v in self.json_category_id_to_contiguous_id.items() } self.id_to_img_map = {k: v for k, v in enumerate(self.ids)} self.transforms = transforms def __getitem__(self, idx): img, anno = super(COCODataset, self).__getitem__(idx) # filter crowd annotations # TODO might be better to add an extra field anno = [obj for obj in anno if obj["iscrowd"] == 0] boxes = [obj["bbox"] for obj in anno] boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes target = BoxList(boxes, img.size, mode="xywh",use_char_ann=False).convert("xyxy") classes = [obj["category_id"] for obj in anno] classes = [self.json_category_id_to_contiguous_id[c] for c in classes] classes = torch.tensor(classes) target.add_field("labels", classes) masks = [obj["segmentation"] for obj in anno] masks = SegmentationMask(masks, img.size) target.add_field("masks", masks) target = target.clip_to_image(remove_empty=True) if self.transforms is not None: img, target = self.transforms(img, target) return img, target, idx def get_img_info(self, index): img_id = self.id_to_img_map[index] img_data = self.coco.imgs[img_id] return img_data
2,363
34.818182
89
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/data/datasets/list_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. """ Simple dataset class that wraps a list of path names """ from PIL import Image from maskrcnn_benchmark.structures.bounding_box import BoxList class ListDataset(object): def __init__(self, image_lists, transforms=None): self.image_lists = image_lists self.transforms = transforms def __getitem__(self, item): img = Image.open(self.image_lists[item]).convert("RGB") # dummy target w, h = img.size target = BoxList([[0, 0, w, h]], img.size, mode="xyxy") if self.transforms is not None: img, target = self.transforms(img, target) return img, target def __len__(self): return len(self.image_lists) def get_img_info(self, item): """ Return the image dimensions for the image, without loading and pre-processing it """ pass
943
24.513514
71
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/data/samplers/grouped_batch_sampler.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import itertools import torch from torch.utils.data.sampler import BatchSampler from torch.utils.data.sampler import Sampler class GroupedBatchSampler(BatchSampler): """ Wraps another sampler to yield a mini-batch of indices. It enforces that elements from the same group should appear in groups of batch_size. It also tries to provide mini-batches which follows an ordering which is as close as possible to the ordering from the original sampler. Arguments: sampler (Sampler): Base sampler. batch_size (int): Size of mini-batch. drop_uneven (bool): If ``True``, the sampler will drop the batches whose size is less than ``batch_size`` """ def __init__(self, sampler, group_ids, batch_size, drop_uneven=False): if not isinstance(sampler, Sampler): raise ValueError( "sampler should be an instance of " "torch.utils.data.Sampler, but got sampler={}".format(sampler) ) self.sampler = sampler self.group_ids = torch.as_tensor(group_ids) assert self.group_ids.dim() == 1 self.batch_size = batch_size self.drop_uneven = drop_uneven self.groups = torch.unique(self.group_ids).sort(0)[0] self._can_reuse_batches = False def _prepare_batches(self): dataset_size = len(self.group_ids) # get the sampled indices from the sampler sampled_ids = torch.as_tensor(list(self.sampler)) # potentially not all elements of the dataset were sampled # by the sampler (e.g., DistributedSampler). # construct a tensor which contains -1 if the element was # not sampled, and a non-negative number indicating the # order where the element was sampled. # for example. if sampled_ids = [3, 1] and dataset_size = 5, # the order is [-1, 1, -1, 0, -1] order = torch.full((dataset_size,), -1, dtype=torch.int64) order[sampled_ids] = torch.arange(len(sampled_ids)) # get a mask with the elements that were sampled mask = order >= 0 # find the elements that belong to each individual cluster clusters = [(self.group_ids == i) & mask for i in self.groups] # get relative order of the elements inside each cluster # that follows the order from the sampler relative_order = [order[cluster] for cluster in clusters] # with the relative order, find the absolute order in the # sampled space permutation_ids = [s[s.sort()[1]] for s in relative_order] # permute each cluster so that they follow the order from # the sampler permuted_clusters = [sampled_ids[idx] for idx in permutation_ids] # splits each cluster in batch_size, and merge as a list of tensors splits = [c.split(self.batch_size) for c in permuted_clusters] merged = tuple(itertools.chain.from_iterable(splits)) # now each batch internally has the right order, but # they are grouped by clusters. Find the permutation between # different batches that brings them as close as possible to # the order that we have in the sampler. For that, we will consider the # ordering as coming from the first element of each batch, and sort # correspondingly first_element_of_batch = [t[0].item() for t in merged] # get and inverse mapping from sampled indices and the position where # they occur (as returned by the sampler) inv_sampled_ids_map = {v: k for k, v in enumerate(sampled_ids.tolist())} # from the first element in each batch, get a relative ordering first_index_of_batch = torch.as_tensor( [inv_sampled_ids_map[s] for s in first_element_of_batch] ) # permute the batches so that they approximately follow the order # from the sampler permutation_order = first_index_of_batch.sort(0)[1].tolist() # finally, permute the batches batches = [merged[i].tolist() for i in permutation_order] if self.drop_uneven: kept = [] for batch in batches: if len(batch) == self.batch_size: kept.append(batch) batches = kept return batches def __iter__(self): if self._can_reuse_batches: batches = self._batches self._can_reuse_batches = False else: batches = self._prepare_batches() self._batches = batches return iter(batches) def __len__(self): if not hasattr(self, "_batches"): self._batches = self._prepare_batches() self._can_reuse_batches = True return len(self._batches)
4,844
41.130435
88
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/data/samplers/iteration_based_batch_sampler.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from torch.utils.data.sampler import BatchSampler class IterationBasedBatchSampler(BatchSampler): """ Wraps a BatchSampler, resampling from it until a specified number of iterations have been sampled """ def __init__(self, batch_sampler, num_iterations, start_iter=0): self.batch_sampler = batch_sampler self.num_iterations = num_iterations self.start_iter = start_iter def __iter__(self): iteration = self.start_iter while iteration <= self.num_iterations: # if the underlying sampler has a set_epoch method, like # DistributedSampler, used for making each process see # a different split of the dataset, then set it if hasattr(self.batch_sampler.sampler, "set_epoch"): self.batch_sampler.sampler.set_epoch(iteration) for batch in self.batch_sampler: iteration += 1 if iteration > self.num_iterations: break yield batch def __len__(self): return self.num_iterations
1,164
35.40625
71
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/data/samplers/distributed.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # Code is copy-pasted exactly as in torch.utils.data.distributed, # with a modification in the import to use the deprecated backend # FIXME remove this once c10d fixes the bug it has import math import torch import torch.distributed as dist from torch.utils.data.sampler import Sampler from maskrcnn_benchmark.utils.comm import get_rank, get_world_size class DistributedSampler(Sampler): """Sampler that restricts data loading to a subset of the dataset. It is especially useful in conjunction with :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each process can pass a DistributedSampler instance as a DataLoader sampler, and load a subset of the original dataset that is exclusive to it. .. note:: Dataset is assumed to be of constant size. Arguments: dataset: Dataset used for sampling. num_replicas (optional): Number of processes participating in distributed training. rank (optional): Rank of the current process within num_replicas. """ def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): if num_replicas is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") # num_replicas = dist.get_world_size() num_replicas = get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") # rank = dist.get_rank() rank = get_rank() self.dataset = dataset self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) self.total_size = self.num_samples * self.num_replicas self.shuffle = True def __iter__(self): if self.shuffle: # deterministically shuffle based on epoch g = torch.Generator() g.manual_seed(self.epoch) indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = torch.arange(len(self.dataset)).tolist() # add extra samples to make it evenly divisible indices += indices[: (self.total_size - len(indices))] assert len(indices) == self.total_size # subsample offset = self.num_samples * self.rank indices = indices[offset : offset + self.num_samples] assert len(indices) == self.num_samples return iter(indices) def __len__(self): return self.num_samples def set_epoch(self, epoch): self.epoch = epoch
2,777
38.126761
86
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/data/samplers/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from .distributed import DistributedSampler from .grouped_batch_sampler import GroupedBatchSampler from .iteration_based_batch_sampler import IterationBasedBatchSampler __all__ = ["DistributedSampler", "GroupedBatchSampler", "IterationBasedBatchSampler"]
328
46
85
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/data/transforms/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from .transforms import Compose from .transforms import Resize from .transforms import RandomHorizontalFlip from .transforms import ToTensor from .transforms import Normalize from .build import build_transforms
285
27.6
71
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/data/transforms/build.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from . import transforms as T def build_transforms(cfg, is_train=True): to_bgr255 = cfg.INPUT.TO_BGR255 normalize_transform = T.Normalize( mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=to_bgr255 ) if is_train: min_size = cfg.INPUT.MIN_SIZE_TRAIN max_size = cfg.INPUT.MAX_SIZE_TRAIN # flip_prob = 0.5 # cfg.INPUT.FLIP_PROB_TRAIN # flip_prob = 0 # rotate_prob = 0.5 rotate_prob = 0.5 pixel_aug_prob = 0.2 random_crop_prob = cfg.DATASETS.RANDOM_CROP_PROB else: min_size = cfg.INPUT.MIN_SIZE_TEST max_size = cfg.INPUT.MAX_SIZE_TEST # flip_prob = 0 rotate_prob = 0 pixel_aug_prob = 0 random_crop_prob = 0 to_bgr255 = cfg.INPUT.TO_BGR255 normalize_transform = T.Normalize( mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=to_bgr255 ) if cfg.DATASETS.AUG and is_train: if cfg.DATASETS.FIX_CROP: transform = T.Compose( [ T.RandomCrop(1.0, crop_min_size=512, crop_max_size=640, max_trys=50), T.RandomBrightness(pixel_aug_prob), T.RandomContrast(pixel_aug_prob), T.RandomHue(pixel_aug_prob), T.RandomSaturation(pixel_aug_prob), T.RandomGamma(pixel_aug_prob), T.RandomRotate(rotate_prob), T.Resize(min_size, max_size, cfg.INPUT.STRICT_RESIZE), T.ToTensor(), normalize_transform, ] ) else: transform = T.Compose( [ T.RandomCrop(random_crop_prob), T.RandomBrightness(pixel_aug_prob), T.RandomContrast(pixel_aug_prob), T.RandomHue(pixel_aug_prob), T.RandomSaturation(pixel_aug_prob), T.RandomGamma(pixel_aug_prob), T.RandomRotate(rotate_prob, max_theta=cfg.DATASETS.MAX_ROTATE_THETA, fix_rotate=cfg.DATASETS.FIX_ROTATE), T.Resize(min_size, max_size, cfg.INPUT.STRICT_RESIZE), T.ToTensor(), normalize_transform, ] ) else: transform = T.Compose( [ T.Resize(min_size, max_size, cfg.INPUT.STRICT_RESIZE), T.ToTensor(), normalize_transform, ] ) return transform
2,636
36.140845
125
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/data/transforms/transforms.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import random import cv2 import numpy as np from PIL import Image from shapely import affinity from shapely.geometry import Polygon from torchvision.transforms import functional as F class Compose(object): def __init__(self, transforms): self.transforms = transforms def __call__(self, image, target): for t in self.transforms: image, target = t(image, target) return image, target def __repr__(self): format_string = self.__class__.__name__ + "(" for t in self.transforms: format_string += "\n" format_string += " {0}".format(t) format_string += "\n)" return format_string class Resize(object): def __init__(self, min_size, max_size, strict_resize): self.min_size = min_size self.max_size = max_size self.strict_resize = strict_resize # modified from torchvision to add support for max size def get_size(self, image_size): w, h = image_size if isinstance(self.min_size, tuple): if len(self.min_size) == 1: size = self.min_size[0] else: random_size_index = random.randint(0, len(self.min_size) - 1) size = self.min_size[random_size_index] else: size = self.min_size max_size = self.max_size if max_size is not None: min_original_size = float(min((w, h))) max_original_size = float(max((w, h))) if max_original_size / min_original_size * size > max_size: size = int(round(max_size * min_original_size / max_original_size)) if (w <= h and w == size) or (h <= w and h == size): if self.strict_resize: h = h if h % 32 == 0 else (h // 32) * 32 w = w if w % 32 == 0 else (w // 32) * 32 return (h, w) if w < h: ow = size oh = int(size * h / w) else: oh = size ow = int(size * w / h) if self.strict_resize: oh = oh if oh % 32 == 0 else (oh // 32) * 32 ow = ow if ow % 32 == 0 else (ow // 32) * 32 return (oh, ow) def __call__(self, image, target): size = self.get_size(image.size) image = F.resize(image, size) if target is not None: target = target.resize(image.size) return image, target class RandomCrop(object): def __init__(self, prob, crop_min_size=500, crop_max_size=1000, max_trys=50): self.min_size = crop_min_size self.max_size = crop_max_size self.max_trys = max_trys self.prob = prob def __call__(self, image, target): if random.random() < self.prob: im = np.array(image) w, h = image.size h_array = np.zeros((h), dtype=np.int32) w_array = np.zeros((w), dtype=np.int32) boxes = target.bbox.numpy() if len(boxes) == 0: return image, target for box in boxes: box = np.round(box, decimals=0).astype(np.int32) minx = box[0] maxx = box[2] w_array[minx:maxx] = 1 miny = box[1] maxy = box[3] h_array[miny:maxy] = 1 h_axis = np.where(h_array == 0)[0] w_axis = np.where(w_array == 0)[0] if len(h_axis) == 0 or len(w_axis) == 0: return image, target for _ in range(self.max_trys): xx = np.random.choice(w_axis, size=2) xmin = min(xx) xmax = max(xx) x_size = xmax - xmin if x_size > self.max_size or x_size < self.min_size: continue yy = np.random.choice(h_axis, size=2) ymin = min(yy) ymax = max(yy) y_size = ymax - ymin if y_size > self.max_size or y_size < self.min_size: continue box_in_area = ( (boxes[:, 0] >= xmin) & (boxes[:, 1] >= ymin) & (boxes[:, 2] <= xmax) & (boxes[:, 3] <= ymax) ) if len(np.where(box_in_area)[0]) == 0: continue im = im[ymin:ymax, xmin:xmax] target = target.crop([xmin, ymin, xmax, ymax]) return Image.fromarray(im), target return image, target else: return image, target # class RandomCropFixSize(object): # def __init__(self, prob, crop_size=512, max_trys=50): # self.crop_size = crop_size # self.max_trys = max_trys # self.prob = prob # def __call__(self, image, target): # if random.random() < self.prob: # im = np.array(image) # w, h = image.size # h_array = np.zeros((h), dtype=np.int32) # w_array = np.zeros((w), dtype=np.int32) # boxes = target.bbox.numpy() # if len(boxes) == 0: # return image, target # for box in boxes: # box = np.round(box, decimals=0).astype(np.int32) # minx = box[0] # maxx = box[2] # w_array[minx:maxx] = 1 # miny = box[1] # maxy = box[3] # h_array[miny:maxy] = 1 # h_axis = np.where(h_array == 0)[0] # w_axis = np.where(w_array == 0)[0] # if len(h_axis) == 0 or len(w_axis) == 0: # return image, target # for _ in range(self.max_trys): # xx = np.random.choice(w_axis, size=2) # xmin = min(xx) # xmax = max(xx) # x_size = xmax - xmin # if x_size > self.max_size or x_size < self.min_size: # continue # yy = np.random.choice(h_axis, size=2) # ymin = min(yy) # ymax = max(yy) # y_size = ymax - ymin # if y_size > self.max_size or y_size < self.min_size: # continue # box_in_area = ( # (boxes[:, 0] >= xmin) # & (boxes[:, 1] >= ymin) # & (boxes[:, 2] <= xmax) # & (boxes[:, 3] <= ymax) # ) # if len(np.where(box_in_area)[0]) == 0: # continue # im = im[ymin:ymax, xmin:xmax] # target = target.crop([xmin, ymin, xmax, ymax]) # return Image.fromarray(im), target # return image, target # else: # return image, target class RandomHorizontalFlip(object): def __init__(self, prob=0.5): self.prob = prob def __call__(self, image, target): if random.random() < self.prob: image = F.hflip(image) target = target.transpose(0) return image, target class ToTensor(object): def __call__(self, image, target): return F.to_tensor(image), target class Normalize(object): def __init__(self, mean, std, to_bgr255=True): self.mean = mean self.std = std self.to_bgr255 = to_bgr255 def __call__(self, image, target): if self.to_bgr255: image = image[[2, 1, 0]] * 255 image = F.normalize(image, mean=self.mean, std=self.std) return image, target class RandomBrightness(object): def __init__(self, prob=0.5): self.prob = prob def __call__(self, image, target): if random.random() < self.prob: brightness_factor = random.uniform(0.5, 2) image = F.adjust_brightness(image, brightness_factor) return image, target class RandomContrast(object): def __init__(self, prob=0.5): self.prob = prob def __call__(self, image, target): if random.random() < self.prob: contrast_factor = random.uniform(0.5, 2) image = F.adjust_contrast(image, contrast_factor) return image, target class RandomHue(object): def __init__(self, prob=0.5): self.prob = prob def __call__(self, image, target): if random.random() < self.prob: hue_factor = random.uniform(-0.25, 0.25) image = F.adjust_hue(image, hue_factor) return image, target class RandomSaturation(object): def __init__(self, prob=0.5): self.prob = prob def __call__(self, image, target): if random.random() < self.prob: saturation_factor = random.uniform(0.5, 2) image = F.adjust_saturation(image, saturation_factor) return image, target class RandomGamma(object): def __init__(self, prob=0.5): self.prob = prob def __call__(self, image, target): if random.random() < self.prob: gamma_factor = random.uniform(0.5, 2) image = F.adjust_gamma(image, gamma_factor) return image, target class RandomRotate(object): def __init__(self, prob, max_theta=30, fix_rotate=False): self.prob = prob self.max_theta = max_theta self.fix_rotate = fix_rotate def __call__(self, image, target): if random.random() < self.prob and target is not None: # try: if self.fix_rotate: delta = 30 else: delta = random.uniform(-1 * self.max_theta, self.max_theta) width, height = image.size ## get the minimal rect to cover the rotated image img_box = [[[0, 0], [width, 0], [width, height], [0, height]]] rotated_img_box = _quad2minrect( _rotate_polygons(img_box, delta, (width / 2, height / 2)) ) r_height = int( max(rotated_img_box[0][3], rotated_img_box[0][1]) - min(rotated_img_box[0][3], rotated_img_box[0][1]) ) r_width = int( max(rotated_img_box[0][2], rotated_img_box[0][0]) - min(rotated_img_box[0][2], rotated_img_box[0][0]) ) r_height = max(r_height, height + 1) r_width = max(r_width, width + 1) ## padding im im_padding = np.zeros((r_height, r_width, 3)) start_h, start_w = ( int((r_height - height) / 2.0), int((r_width - width) / 2.0), ) end_h, end_w = start_h + height, start_w + width im_padding[start_h:end_h, start_w:end_w, :] = image M = cv2.getRotationMatrix2D((r_width / 2, r_height / 2), delta, 1) im = cv2.warpAffine(im_padding, M, (r_width, r_height)) im = Image.fromarray(im.astype(np.uint8)) target = target.rotate( -delta, (r_width / 2, r_height / 2), start_h, start_w ) return im, target # except: # return image, target else: return image, target def _quad2minrect(boxes): ## trans a quad(N*4) to a rectangle(N*4) which has miniual area to cover it return np.hstack( ( boxes[:, ::2].min(axis=1).reshape((-1, 1)), boxes[:, 1::2].min(axis=1).reshape((-1, 1)), boxes[:, ::2].max(axis=1).reshape((-1, 1)), boxes[:, 1::2].max(axis=1).reshape((-1, 1)), ) ) def _boxlist2quads(boxlist): res = np.zeros((len(boxlist), 8)) for i, box in enumerate(boxlist): # print(box) res[i] = np.array( [ box[0][0], box[0][1], box[1][0], box[1][1], box[2][0], box[2][1], box[3][0], box[3][1], ] ) return res def _rotate_polygons(polygons, angle, r_c): ## polygons: N*8 ## r_x: rotate center x ## r_y: rotate center y ## angle: -15~15 rotate_boxes_list = [] for poly in polygons: box = Polygon(poly) rbox = affinity.rotate(box, angle, r_c) if len(list(rbox.exterior.coords)) < 5: print("img_box_ori:", poly) print("img_box_rotated:", rbox) # assert(len(list(rbox.exterior.coords))>=5) rotate_boxes_list.append(rbox.boundary.coords[:-1]) res = _boxlist2quads(rotate_boxes_list) return res
12,621
32.480106
83
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/registry.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from maskrcnn_benchmark.utils.registry import Registry BACKBONES = Registry() RPN_HEADS = Registry() ROI_BOX_FEATURE_EXTRACTORS = Registry() ROI_BOX_PREDICTOR = Registry() ROI_KEYPOINT_FEATURE_EXTRACTORS = Registry() ROI_KEYPOINT_PREDICTOR = Registry() ROI_MASK_FEATURE_EXTRACTORS = Registry() ROI_MASK_PREDICTOR = Registry()
400
29.846154
71
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/matcher.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch class Matcher(object): """ This class assigns to each predicted "element" (e.g., a box) a ground-truth element. Each predicted element will have exactly zero or one matches; each ground-truth element may be assigned to zero or more predicted elements. Matching is based on the MxN match_quality_matrix, that characterizes how well each (ground-truth, predicted)-pair match. For example, if the elements are boxes, the matrix may contain box IoU overlap values. The matcher returns a tensor of size N containing the index of the ground-truth element m that matches to prediction n. If there is no match, a negative value is returned. """ BELOW_LOW_THRESHOLD = -1 BETWEEN_THRESHOLDS = -2 def __init__(self, high_threshold, low_threshold, allow_low_quality_matches=False): """ Args: high_threshold (float): quality values greater than or equal to this value are candidate matches. low_threshold (float): a lower quality threshold used to stratify matches into three levels: 1) matches >= high_threshold 2) BETWEEN_THRESHOLDS matches in [low_threshold, high_threshold) 3) BELOW_LOW_THRESHOLD matches in [0, low_threshold) allow_low_quality_matches (bool): if True, produce additional matches for predictions that have only low-quality match candidates. See set_low_quality_matches_ for more details. """ assert low_threshold <= high_threshold self.high_threshold = high_threshold self.low_threshold = low_threshold self.allow_low_quality_matches = allow_low_quality_matches def __call__(self, match_quality_matrix): """ Args: match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted elements. Returns: matches (Tensor[int64]): an N tensor where N[i] is a matched gt in [0, M - 1] or a negative value indicating that prediction i could not be matched. """ if match_quality_matrix.numel() == 0: # handle empty case device = match_quality_matrix.device return torch.empty((0,), dtype=torch.int64, device=device) # match_quality_matrix is M (gt) x N (predicted) # Max over gt elements (dim 0) to find best gt candidate for each prediction matched_vals, matches = match_quality_matrix.max(dim=0) if self.allow_low_quality_matches: all_matches = matches.clone() # Assign candidate matches with low quality to negative (unassigned) values below_low_threshold = matched_vals < self.low_threshold between_thresholds = (matched_vals >= self.low_threshold) & ( matched_vals < self.high_threshold ) matches[below_low_threshold] = Matcher.BELOW_LOW_THRESHOLD matches[between_thresholds] = Matcher.BETWEEN_THRESHOLDS if self.allow_low_quality_matches: self.set_low_quality_matches_(matches, all_matches, match_quality_matrix) return matches def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix): """ Produce additional matches for predictions that have only low-quality matches. Specifically, for each ground-truth find the set of predictions that have maximum overlap with it (including ties); for each prediction in that set, if it is unmatched, then match it to the ground-truth with which it has the highest quality value. """ # For each gt, find the prediction with which it has highest quality highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) # Find highest quality match available, even if it is low, including ties gt_pred_pairs_of_highest_quality = torch.nonzero( match_quality_matrix == highest_quality_foreach_gt[:, None] ) # Example gt_pred_pairs_of_highest_quality: # tensor([[ 0, 39796], # [ 1, 32055], # [ 1, 32070], # [ 2, 39190], # [ 2, 40255], # [ 3, 40390], # [ 3, 41455], # [ 4, 45470], # [ 5, 45325], # [ 5, 46390]]) # Each row is a (gt index, prediction index) # Note how gt items 1, 2, 3, and 5 each have two ties pred_inds_to_update = gt_pred_pairs_of_highest_quality[:, 1] matches[pred_inds_to_update] = all_matches[pred_inds_to_update]
4,845
44.28972
88
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/make_layers.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. """ Miscellaneous utility functions """ import torch from torch import nn from torch.nn import functional as F from maskrcnn_benchmark.config import cfg from maskrcnn_benchmark.layers import Conv2d from maskrcnn_benchmark.modeling.poolers import Pooler def get_group_gn(dim, dim_per_gp, num_groups): """get number of groups used by GroupNorm, based on number of channels.""" assert dim_per_gp == -1 or num_groups == -1, \ "GroupNorm: can only specify G or C/G." if dim_per_gp > 0: assert dim % dim_per_gp == 0, \ "dim: {}, dim_per_gp: {}".format(dim, dim_per_gp) group_gn = dim // dim_per_gp else: assert dim % num_groups == 0, \ "dim: {}, num_groups: {}".format(dim, num_groups) group_gn = num_groups return group_gn def group_norm(out_channels, affine=True, divisor=1): out_channels = out_channels // divisor dim_per_gp = cfg.MODEL.GROUP_NORM.DIM_PER_GP // divisor num_groups = cfg.MODEL.GROUP_NORM.NUM_GROUPS // divisor eps = cfg.MODEL.GROUP_NORM.EPSILON # default: 1e-5 return torch.nn.GroupNorm( get_group_gn(out_channels, dim_per_gp, num_groups), out_channels, eps, affine ) def make_conv3x3( in_channels, out_channels, dilation=1, stride=1, use_gn=False, use_relu=False, kaiming_init=True ): conv = Conv2d( in_channels, out_channels, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False if use_gn else True ) if kaiming_init: nn.init.kaiming_normal_( conv.weight, mode="fan_out", nonlinearity="relu" ) else: torch.nn.init.normal_(conv.weight, std=0.01) if not use_gn: nn.init.constant_(conv.bias, 0) module = [conv,] if use_gn: module.append(group_norm(out_channels)) if use_relu: module.append(nn.ReLU(inplace=True)) if len(module) > 1: return nn.Sequential(*module) return conv def make_fc(dim_in, hidden_dim, use_gn=False): ''' Caffe2 implementation uses XavierFill, which in fact corresponds to kaiming_uniform_ in PyTorch ''' if use_gn: fc = nn.Linear(dim_in, hidden_dim, bias=False) nn.init.kaiming_uniform_(fc.weight, a=1) return nn.Sequential(fc, group_norm(hidden_dim)) fc = nn.Linear(dim_in, hidden_dim) nn.init.kaiming_uniform_(fc.weight, a=1) nn.init.constant_(fc.bias, 0) return fc def conv_with_kaiming_uniform(use_gn=False, use_relu=False): def make_conv( in_channels, out_channels, kernel_size, stride=1, dilation=1 ): conv = Conv2d( in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=dilation * (kernel_size - 1) // 2, dilation=dilation, bias=False if use_gn else True ) # Caffe2 implementation uses XavierFill, which in fact # corresponds to kaiming_uniform_ in PyTorch nn.init.kaiming_uniform_(conv.weight, a=1) if not use_gn: nn.init.constant_(conv.bias, 0) module = [conv,] if use_gn: module.append(group_norm(out_channels)) if use_relu: module.append(nn.ReLU(inplace=True)) if len(module) > 1: return nn.Sequential(*module) return conv return make_conv
3,557
27.926829
78
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. """ Miscellaneous utility functions """ import torch def cat(tensors, dim=0): """ Efficient version of torch.cat that avoids a copy if there is only a single element in a list """ assert isinstance(tensors, (list, tuple)) if len(tensors) == 1: return tensors[0] return torch.cat(tensors, dim)
404
22.823529
97
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/poolers.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import math import torch import torch.nn.functional as F from torch import nn from maskrcnn_benchmark.layers import ROIAlign from .utils import cat class LevelMapper(object): """Determine which FPN level each RoI in a set of RoIs should map to based on the heuristic in the FPN paper. """ def __init__(self, k_min, k_max, canonical_scale=224, canonical_level=4, eps=1e-6): """ Arguments: k_min (int) k_max (int) canonical_scale (int) canonical_level (int) eps (float) """ self.k_min = k_min self.k_max = k_max self.s0 = canonical_scale self.lvl0 = canonical_level self.eps = eps def __call__(self, boxlists): """ Arguments: boxlists (list[BoxList]) """ # Compute level ids s = torch.sqrt(cat([boxlist.area() for boxlist in boxlists])) # Eqn.(1) in FPN paper target_lvls = torch.floor(self.lvl0 + torch.log2(s / self.s0 + self.eps)) target_lvls = torch.clamp(target_lvls, min=self.k_min, max=self.k_max) return target_lvls.to(torch.int64) - self.k_min class Pooler(nn.Module): """ Pooler for Detection with or without FPN. It currently hard-code ROIAlign in the implementation, but that can be made more generic later on. Also, the requirement of passing the scales is not strictly necessary, as they can be inferred from the size of the feature map / size of original image, which is available thanks to the BoxList. """ def __init__(self, output_size, scales, sampling_ratio): """ Arguments: output_size (list[tuple[int]] or list[int]): output size for the pooled region scales (list[flaot]): scales for each Pooler sampling_ratio (int): sampling ratio for ROIAlign """ super(Pooler, self).__init__() poolers = [] for scale in scales: poolers.append( ROIAlign( output_size, spatial_scale=scale, sampling_ratio=sampling_ratio ) ) self.poolers = nn.ModuleList(poolers) self.output_size = output_size # get the levels in the feature map by leveraging the fact that the network always # downsamples by a factor of 2 at each level. lvl_min = -math.log2(scales[0]) lvl_max = -math.log2(scales[-1]) self.map_levels = LevelMapper(lvl_min, lvl_max) def convert_to_roi_format(self, boxes): concat_boxes = cat([b.bbox for b in boxes], dim=0) device, dtype = concat_boxes.device, concat_boxes.dtype ids = cat( [ torch.full((len(b), 1), i, dtype=dtype, device=device) for i, b in enumerate(boxes) ], dim=0, ) rois = torch.cat([ids, concat_boxes], dim=1) return rois def forward(self, x, boxes): """ Arguments: x (list[Tensor]): feature maps for each level boxes (list[BoxList]): boxes to be used to perform the pooling operation. Returns: result (Tensor) """ num_levels = len(self.poolers) rois = self.convert_to_roi_format(boxes) if num_levels == 1: return self.poolers[0](x[0], rois) levels = self.map_levels(boxes) num_rois = len(rois) num_channels = x[0].shape[1] output_size_h = self.output_size[0] output_size_w = self.output_size[1] dtype, device = x[0].dtype, x[0].device result = torch.zeros( (num_rois, num_channels, output_size_h, output_size_w), dtype=dtype, device=device, ) for level, (per_level_feature, pooler) in enumerate(zip(x, self.poolers)): idx_in_level = torch.nonzero(levels == level).squeeze(1) rois_per_level = rois[idx_in_level] result[idx_in_level] = pooler(per_level_feature, rois_per_level) return result
4,171
32.645161
90
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/balanced_positive_negative_sampler.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch # TODO class BalancedPositiveNegativeSampler(object): """ This class samples batches, ensuring that they contain a fixed proportion of positives """ def __init__(self, batch_size_per_image, positive_fraction): """ Arguments: batch_size_per_image (int): number of elements to be selected per image positive_fraction (float): percentace of positive elements per batch """ self.batch_size_per_image = batch_size_per_image self.positive_fraction = positive_fraction def __call__(self, matched_idxs): """ Arguments: matched idxs: list of tensors containing -1, 0 or positive values. Each tensor corresponds to a specific image. -1 values are ignored, 0 are considered as negatives and > 0 as positives. Returns: pos_idx (list[tensor]) neg_idx (list[tensor]) Returns two lists of binary masks for each image. The first list contains the positive elements that were selected, and the second list the negative example. """ pos_idx = [] neg_idx = [] for matched_idxs_per_image in matched_idxs: positive = torch.nonzero(matched_idxs_per_image >= 1).squeeze(1) negative = torch.nonzero(matched_idxs_per_image == 0).squeeze(1) num_pos = int(self.batch_size_per_image * self.positive_fraction) # protect against not enough positive examples num_pos = min(positive.numel(), num_pos) num_neg = self.batch_size_per_image - num_pos # protect against not enough negative examples num_neg = min(negative.numel(), num_neg) # randomly select positive and negative examples perm1 = torch.randperm(positive.numel())[:num_pos] perm2 = torch.randperm(negative.numel())[:num_neg] pos_idx_per_image = positive[perm1] neg_idx_per_image = negative[perm2] # create binary mask from indices pos_idx_per_image_mask = torch.zeros_like( matched_idxs_per_image, dtype=torch.bool ) neg_idx_per_image_mask = torch.zeros_like( matched_idxs_per_image, dtype=torch.bool ) pos_idx_per_image_mask[pos_idx_per_image] = 1 neg_idx_per_image_mask[neg_idx_per_image] = 1 pos_idx.append(pos_idx_per_image_mask) neg_idx.append(neg_idx_per_image_mask) return pos_idx, neg_idx
2,678
37.271429
83
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/__init__.py
0
0
0
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/box_coder.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import math import torch class BoxCoder(object): """ This class encodes and decodes a set of bounding boxes into the representation used for training the regressors. """ def __init__(self, weights, bbox_xform_clip=None): """ Arguments: weights (4-element tuple) bbox_xform_clip (float) """ self.weights = weights if bbox_xform_clip is None: bbox_xform_clip = math.log(1000.0 / 16) self.bbox_xform_clip = bbox_xform_clip def encode(self, reference_boxes, proposals): """ Encode a set of proposals with respect to some reference boxes Arguments: reference_boxes (Tensor): reference boxes proposals (Tensor): boxes to be encoded """ TO_REMOVE = 1 # TODO remove ex_widths = proposals[:, 2] - proposals[:, 0] + TO_REMOVE ex_heights = proposals[:, 3] - proposals[:, 1] + TO_REMOVE ex_ctr_x = proposals[:, 0] + 0.5 * ex_widths ex_ctr_y = proposals[:, 1] + 0.5 * ex_heights gt_widths = reference_boxes[:, 2] - reference_boxes[:, 0] + TO_REMOVE gt_heights = reference_boxes[:, 3] - reference_boxes[:, 1] + TO_REMOVE gt_ctr_x = reference_boxes[:, 0] + 0.5 * gt_widths gt_ctr_y = reference_boxes[:, 1] + 0.5 * gt_heights wx, wy, ww, wh = self.weights targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights targets_dw = ww * torch.log(gt_widths / ex_widths) targets_dh = wh * torch.log(gt_heights / ex_heights) targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh), dim=1) return targets def decode(self, rel_codes, boxes): """ From a set of original boxes and encoded relative box offsets, get the decoded boxes. Arguments: rel_codes (Tensor): encoded boxes boxes (Tensor): reference boxes. """ boxes = boxes.to(rel_codes.dtype) TO_REMOVE = 1 # TODO remove widths = boxes[:, 2] - boxes[:, 0] + TO_REMOVE heights = boxes[:, 3] - boxes[:, 1] + TO_REMOVE ctr_x = boxes[:, 0] + 0.5 * widths ctr_y = boxes[:, 1] + 0.5 * heights wx, wy, ww, wh = self.weights dx = rel_codes[:, 0::4] / wx dy = rel_codes[:, 1::4] / wy dw = rel_codes[:, 2::4] / ww dh = rel_codes[:, 3::4] / wh # Prevent sending too large values into torch.exp() dw = torch.clamp(dw, max=self.bbox_xform_clip) dh = torch.clamp(dh, max=self.bbox_xform_clip) pred_ctr_x = dx * widths[:, None] + ctr_x[:, None] pred_ctr_y = dy * heights[:, None] + ctr_y[:, None] pred_w = torch.exp(dw) * widths[:, None] pred_h = torch.exp(dh) * heights[:, None] pred_boxes = torch.zeros_like(rel_codes) # x1 pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # y1 pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # x2 (note: "- 1" is correct; don't be fooled by the asymmetry) pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w - 1 # y2 (note: "- 1" is correct; don't be fooled by the asymmetry) pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h - 1 return pred_boxes
3,462
33.979798
86
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/backbone/resnet.py
# # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # """ # Variant of the resnet module that takes cfg as an argument. # Example usage. Strings may be specified in the config file. # model = ResNet( # "StemWithFixedBatchNorm", # "BottleneckWithFixedBatchNorm", # "ResNet50StagesTo4", # ) # Custom implementations may be written in user code and hooked in via the # `register_*` functions. # """ # from collections import namedtuple # import torch # import torch.nn.functional as F # from torch import nn # from maskrcnn_benchmark.layers import FrozenBatchNorm2d # from maskrcnn_benchmark.layers import Conv2d # # ResNet stage specification # StageSpec = namedtuple( # "StageSpec", # [ # "index", # Index of the stage, eg 1, 2, ..,. 5 # "block_count", # Numer of residual blocks in the stage # "return_features", # True => return the last feature map from this stage # ], # ) # # ----------------------------------------------------------------------------- # # Standard ResNet models # # ----------------------------------------------------------------------------- # # ResNet-50 (including all stages) # ResNet50StagesTo5 = ( # StageSpec(index=i, block_count=c, return_features=r) # for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 6, False), (4, 3, True)) # ) # # ResNet-50 up to stage 4 (excludes stage 5) # ResNet50StagesTo4 = ( # StageSpec(index=i, block_count=c, return_features=r) # for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 6, True)) # ) # # ResNet-50-FPN (including all stages) # ResNet50FPNStagesTo5 = ( # StageSpec(index=i, block_count=c, return_features=r) # for (i, c, r) in ((1, 3, True), (2, 4, True), (3, 6, True), (4, 3, True)) # ) # # ResNet-101-FPN (including all stages) # ResNet101FPNStagesTo5 = ( # StageSpec(index=i, block_count=c, return_features=r) # for (i, c, r) in ((1, 3, True), (2, 4, True), (3, 23, True), (4, 3, True)) # ) # class ResNet(nn.Module): # def __init__(self, cfg): # super(ResNet, self).__init__() # # If we want to use the cfg in forward(), then we should make a copy # # of it and store it for later use: # # self.cfg = cfg.clone() # # Translate string names to implementations # stem_module = _STEM_MODULES[cfg.MODEL.RESNETS.STEM_FUNC] # stage_specs = _STAGE_SPECS[cfg.MODEL.BACKBONE.CONV_BODY] # transformation_module = _TRANSFORMATION_MODULES[cfg.MODEL.RESNETS.TRANS_FUNC] # # Construct the stem module # self.stem = stem_module(cfg) # # Constuct the specified ResNet stages # num_groups = cfg.MODEL.RESNETS.NUM_GROUPS # width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP # in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS # stage2_bottleneck_channels = num_groups * width_per_group # stage2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS # self.stages = [] # self.return_features = {} # for stage_spec in stage_specs: # name = "layer" + str(stage_spec.index) # stage2_relative_factor = 2 ** (stage_spec.index - 1) # bottleneck_channels = stage2_bottleneck_channels * stage2_relative_factor # out_channels = stage2_out_channels * stage2_relative_factor # module = _make_stage( # transformation_module, # in_channels, # bottleneck_channels, # out_channels, # stage_spec.block_count, # num_groups, # cfg.MODEL.RESNETS.STRIDE_IN_1X1, # first_stride=int(stage_spec.index > 1) + 1, # ) # in_channels = out_channels # self.add_module(name, module) # self.stages.append(name) # self.return_features[name] = stage_spec.return_features # # Optionally freeze (requires_grad=False) parts of the backbone # self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_CONV_BODY_AT) # def _freeze_backbone(self, freeze_at): # for stage_index in range(freeze_at): # if stage_index == 0: # m = self.stem # stage 0 is the stem # else: # m = getattr(self, "layer" + str(stage_index)) # for p in m.parameters(): # p.requires_grad = False # def forward(self, x): # outputs = [] # x = self.stem(x) # for stage_name in self.stages: # x = getattr(self, stage_name)(x) # if self.return_features[stage_name]: # outputs.append(x) # return outputs # class ResNetHead(nn.Module): # def __init__( # self, # block_module, # stages, # num_groups=1, # width_per_group=64, # stride_in_1x1=True, # stride_init=None, # res2_out_channels=256, # ): # super(ResNetHead, self).__init__() # stage2_relative_factor = 2 ** (stages[0].index - 1) # stage2_bottleneck_channels = num_groups * width_per_group # out_channels = res2_out_channels * stage2_relative_factor # in_channels = out_channels // 2 # bottleneck_channels = stage2_bottleneck_channels * stage2_relative_factor # block_module = _TRANSFORMATION_MODULES[block_module] # self.stages = [] # stride = stride_init # for stage in stages: # name = "layer" + str(stage.index) # if not stride: # stride = int(stage.index > 1) + 1 # module = _make_stage( # block_module, # in_channels, # bottleneck_channels, # out_channels, # stage.block_count, # num_groups, # stride_in_1x1, # first_stride=stride, # ) # stride = None # self.add_module(name, module) # self.stages.append(name) # def forward(self, x): # for stage in self.stages: # x = getattr(self, stage)(x) # return x # def _make_stage( # transformation_module, # in_channels, # bottleneck_channels, # out_channels, # block_count, # num_groups, # stride_in_1x1, # first_stride, # ): # blocks = [] # stride = first_stride # for _ in range(block_count): # blocks.append( # transformation_module( # in_channels, # bottleneck_channels, # out_channels, # num_groups, # stride_in_1x1, # stride, # ) # ) # stride = 1 # in_channels = out_channels # return nn.Sequential(*blocks) # class BottleneckWithFixedBatchNorm(nn.Module): # def __init__( # self, # in_channels, # bottleneck_channels, # out_channels, # num_groups=1, # stride_in_1x1=True, # stride=1, # ): # super(BottleneckWithFixedBatchNorm, self).__init__() # self.downsample = None # if in_channels != out_channels: # self.downsample = nn.Sequential( # Conv2d( # in_channels, out_channels, kernel_size=1, stride=stride, bias=False # ), # FrozenBatchNorm2d(out_channels), # ) # # The original MSRA ResNet models have stride in the first 1x1 conv # # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have # # stride in the 3x3 conv # stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) # self.conv1 = Conv2d( # in_channels, # bottleneck_channels, # kernel_size=1, # stride=stride_1x1, # bias=False, # ) # self.bn1 = FrozenBatchNorm2d(bottleneck_channels) # # TODO: specify init for the above # self.conv2 = Conv2d( # bottleneck_channels, # bottleneck_channels, # kernel_size=3, # stride=stride_3x3, # padding=1, # bias=False, # groups=num_groups, # ) # self.bn2 = FrozenBatchNorm2d(bottleneck_channels) # self.conv3 = Conv2d( # bottleneck_channels, out_channels, kernel_size=1, bias=False # ) # self.bn3 = FrozenBatchNorm2d(out_channels) # def forward(self, x): # residual = x # out = self.conv1(x) # out = self.bn1(out) # out = F.relu_(out) # out = self.conv2(out) # out = self.bn2(out) # out = F.relu_(out) # out0 = self.conv3(out) # out = self.bn3(out0) # if self.downsample is not None: # residual = self.downsample(x) # out += residual # out = F.relu_(out) # return out # class StemWithFixedBatchNorm(nn.Module): # def __init__(self, cfg): # super(StemWithFixedBatchNorm, self).__init__() # out_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS # self.conv1 = Conv2d( # 3, out_channels, kernel_size=7, stride=2, padding=3, bias=False # ) # self.bn1 = FrozenBatchNorm2d(out_channels) # def forward(self, x): # x = self.conv1(x) # x = self.bn1(x) # x = F.relu_(x) # x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) # return x # _TRANSFORMATION_MODULES = {"BottleneckWithFixedBatchNorm": BottleneckWithFixedBatchNorm} # _STEM_MODULES = {"StemWithFixedBatchNorm": StemWithFixedBatchNorm} # _STAGE_SPECS = { # "R-50-C4": ResNet50StagesTo4, # "R-50-C5": ResNet50StagesTo5, # "R-50-FPN": ResNet50FPNStagesTo5, # "R-101-FPN": ResNet101FPNStagesTo5, # } # def register_transformation_module(module_name, module): # _register_generic(_TRANSFORMATION_MODULES, module_name, module) # def register_stem_module(module_name, module): # _register_generic(_STEM_MODULES, module_name, module) # def register_stage_spec(stage_spec_name, stage_spec): # _register_generic(_STAGE_SPECS, stage_spec_name, stage_spec) # def _register_generic(module_dict, module_name, module): # assert module_name not in module_dict # module_dict[module_name] = module # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. """ Variant of the resnet module that takes cfg as an argument. Example usage. Strings may be specified in the config file. model = ResNet( "StemWithFixedBatchNorm", "BottleneckWithFixedBatchNorm", "ResNet50StagesTo4", ) OR: model = ResNet( "StemWithGN", "BottleneckWithGN", "ResNet50StagesTo4", ) Custom implementations may be written in user code and hooked in via the `register_*` functions. """ from collections import namedtuple import torch import torch.nn.functional as F from torch import nn from maskrcnn_benchmark.layers import FrozenBatchNorm2d from maskrcnn_benchmark.layers import Conv2d from maskrcnn_benchmark.layers import DFConv2d from maskrcnn_benchmark.modeling.make_layers import group_norm from maskrcnn_benchmark.utils.registry import Registry # ResNet stage specification StageSpec = namedtuple( "StageSpec", [ "index", # Index of the stage, eg 1, 2, ..,. 5 "block_count", # Number of residual blocks in the stage "return_features", # True => return the last feature map from this stage ], ) # ----------------------------------------------------------------------------- # Standard ResNet models # ----------------------------------------------------------------------------- # ResNet-50 (including all stages) ResNet50StagesTo5 = tuple( StageSpec(index=i, block_count=c, return_features=r) for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 6, False), (4, 3, True)) ) # ResNet-50 up to stage 4 (excludes stage 5) ResNet50StagesTo4 = tuple( StageSpec(index=i, block_count=c, return_features=r) for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 6, True)) ) # ResNet-101 (including all stages) ResNet101StagesTo5 = tuple( StageSpec(index=i, block_count=c, return_features=r) for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 23, False), (4, 3, True)) ) # ResNet-101 up to stage 4 (excludes stage 5) ResNet101StagesTo4 = tuple( StageSpec(index=i, block_count=c, return_features=r) for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 23, True)) ) # ResNet-50-FPN (including all stages) ResNet50FPNStagesTo5 = tuple( StageSpec(index=i, block_count=c, return_features=r) for (i, c, r) in ((1, 3, True), (2, 4, True), (3, 6, True), (4, 3, True)) ) # ResNet-101-FPN (including all stages) ResNet101FPNStagesTo5 = tuple( StageSpec(index=i, block_count=c, return_features=r) for (i, c, r) in ((1, 3, True), (2, 4, True), (3, 23, True), (4, 3, True)) ) # ResNet-152-FPN (including all stages) ResNet152FPNStagesTo5 = tuple( StageSpec(index=i, block_count=c, return_features=r) for (i, c, r) in ((1, 3, True), (2, 8, True), (3, 36, True), (4, 3, True)) ) class ResNet(nn.Module): def __init__(self, cfg): super(ResNet, self).__init__() # If we want to use the cfg in forward(), then we should make a copy # of it and store it for later use: # self.cfg = cfg.clone() # Translate string names to implementations stem_module = _STEM_MODULES[cfg.MODEL.RESNETS.STEM_FUNC] stage_specs = _STAGE_SPECS[cfg.MODEL.BACKBONE.CONV_BODY] transformation_module = _TRANSFORMATION_MODULES[cfg.MODEL.RESNETS.TRANS_FUNC] # Construct the stem module self.stem = stem_module(cfg) # Constuct the specified ResNet stages num_groups = cfg.MODEL.RESNETS.NUM_GROUPS width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS stage2_bottleneck_channels = num_groups * width_per_group stage2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS self.stages = [] self.return_features = {} for stage_spec in stage_specs: name = "layer" + str(stage_spec.index) stage2_relative_factor = 2 ** (stage_spec.index - 1) bottleneck_channels = stage2_bottleneck_channels * stage2_relative_factor out_channels = stage2_out_channels * stage2_relative_factor stage_with_dcn = cfg.MODEL.RESNETS.STAGE_WITH_DCN[stage_spec.index -1] module = _make_stage( transformation_module, in_channels, bottleneck_channels, out_channels, stage_spec.block_count, num_groups, cfg.MODEL.RESNETS.STRIDE_IN_1X1, first_stride=int(stage_spec.index > 1) + 1, dcn_config={ "stage_with_dcn": stage_with_dcn, "with_modulated_dcn": cfg.MODEL.RESNETS.WITH_MODULATED_DCN, "deformable_groups": cfg.MODEL.RESNETS.DEFORMABLE_GROUPS, } ) in_channels = out_channels self.add_module(name, module) self.stages.append(name) self.return_features[name] = stage_spec.return_features # Optionally freeze (requires_grad=False) parts of the backbone self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_CONV_BODY_AT) def _freeze_backbone(self, freeze_at): if freeze_at < 0: return for stage_index in range(freeze_at): if stage_index == 0: m = self.stem # stage 0 is the stem else: m = getattr(self, "layer" + str(stage_index)) for p in m.parameters(): p.requires_grad = False def forward(self, x): outputs = [] x = self.stem(x) for stage_name in self.stages: x = getattr(self, stage_name)(x) if self.return_features[stage_name]: outputs.append(x) return outputs class ResNetHead(nn.Module): def __init__( self, block_module, stages, num_groups=1, width_per_group=64, stride_in_1x1=True, stride_init=None, res2_out_channels=256, dilation=1, dcn_config={} ): super(ResNetHead, self).__init__() stage2_relative_factor = 2 ** (stages[0].index - 1) stage2_bottleneck_channels = num_groups * width_per_group out_channels = res2_out_channels * stage2_relative_factor in_channels = out_channels // 2 bottleneck_channels = stage2_bottleneck_channels * stage2_relative_factor block_module = _TRANSFORMATION_MODULES[block_module] self.stages = [] stride = stride_init for stage in stages: name = "layer" + str(stage.index) if not stride: stride = int(stage.index > 1) + 1 module = _make_stage( block_module, in_channels, bottleneck_channels, out_channels, stage.block_count, num_groups, stride_in_1x1, first_stride=stride, dilation=dilation, dcn_config=dcn_config ) stride = None self.add_module(name, module) self.stages.append(name) self.out_channels = out_channels def forward(self, x): for stage in self.stages: x = getattr(self, stage)(x) return x def _make_stage( transformation_module, in_channels, bottleneck_channels, out_channels, block_count, num_groups, stride_in_1x1, first_stride, dilation=1, dcn_config={} ): blocks = [] stride = first_stride for _ in range(block_count): blocks.append( transformation_module( in_channels, bottleneck_channels, out_channels, num_groups, stride_in_1x1, stride, dilation=dilation, dcn_config=dcn_config ) ) stride = 1 in_channels = out_channels return nn.Sequential(*blocks) class Bottleneck(nn.Module): def __init__( self, in_channels, bottleneck_channels, out_channels, num_groups, stride_in_1x1, stride, dilation, norm_func, dcn_config ): super(Bottleneck, self).__init__() self.downsample = None if in_channels != out_channels: down_stride = stride if dilation == 1 else 1 self.downsample = nn.Sequential( Conv2d( in_channels, out_channels, kernel_size=1, stride=down_stride, bias=False ), norm_func(out_channels), ) for modules in [self.downsample,]: for l in modules.modules(): if isinstance(l, Conv2d): nn.init.kaiming_uniform_(l.weight, a=1) if dilation > 1: stride = 1 # reset to be 1 # The original MSRA ResNet models have stride in the first 1x1 conv # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have # stride in the 3x3 conv stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) self.conv1 = Conv2d( in_channels, bottleneck_channels, kernel_size=1, stride=stride_1x1, bias=False, ) self.bn1 = norm_func(bottleneck_channels) # TODO: specify init for the above with_dcn = dcn_config.get("stage_with_dcn", False) if with_dcn: deformable_groups = dcn_config.get("deformable_groups", 1) with_modulated_dcn = dcn_config.get("with_modulated_dcn", False) self.conv2 = DFConv2d( bottleneck_channels, bottleneck_channels, with_modulated_dcn=with_modulated_dcn, kernel_size=3, stride=stride_3x3, groups=num_groups, dilation=dilation, deformable_groups=deformable_groups, bias=False ) else: self.conv2 = Conv2d( bottleneck_channels, bottleneck_channels, kernel_size=3, stride=stride_3x3, padding=dilation, bias=False, groups=num_groups, dilation=dilation ) nn.init.kaiming_uniform_(self.conv2.weight, a=1) self.bn2 = norm_func(bottleneck_channels) self.conv3 = Conv2d( bottleneck_channels, out_channels, kernel_size=1, bias=False ) self.bn3 = norm_func(out_channels) for l in [self.conv1, self.conv3,]: nn.init.kaiming_uniform_(l.weight, a=1) def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = F.relu_(out) out = self.conv2(out) out = self.bn2(out) out = F.relu_(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = F.relu_(out) return out class BaseStem(nn.Module): def __init__(self, cfg, norm_func): super(BaseStem, self).__init__() out_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS self.conv1 = Conv2d( 3, out_channels, kernel_size=7, stride=2, padding=3, bias=False ) self.bn1 = norm_func(out_channels) for l in [self.conv1,]: nn.init.kaiming_uniform_(l.weight, a=1) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = F.relu_(x) x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) return x class BottleneckWithFixedBatchNorm(Bottleneck): def __init__( self, in_channels, bottleneck_channels, out_channels, num_groups=1, stride_in_1x1=True, stride=1, dilation=1, dcn_config={} ): super(BottleneckWithFixedBatchNorm, self).__init__( in_channels=in_channels, bottleneck_channels=bottleneck_channels, out_channels=out_channels, num_groups=num_groups, stride_in_1x1=stride_in_1x1, stride=stride, dilation=dilation, norm_func=FrozenBatchNorm2d, dcn_config=dcn_config ) class StemWithFixedBatchNorm(BaseStem): def __init__(self, cfg): super(StemWithFixedBatchNorm, self).__init__( cfg, norm_func=FrozenBatchNorm2d ) class BottleneckWithGN(Bottleneck): def __init__( self, in_channels, bottleneck_channels, out_channels, num_groups=1, stride_in_1x1=True, stride=1, dilation=1, dcn_config={} ): super(BottleneckWithGN, self).__init__( in_channels=in_channels, bottleneck_channels=bottleneck_channels, out_channels=out_channels, num_groups=num_groups, stride_in_1x1=stride_in_1x1, stride=stride, dilation=dilation, norm_func=group_norm, dcn_config=dcn_config ) class StemWithGN(BaseStem): def __init__(self, cfg): super(StemWithGN, self).__init__(cfg, norm_func=group_norm) _TRANSFORMATION_MODULES = Registry({ "BottleneckWithFixedBatchNorm": BottleneckWithFixedBatchNorm, "BottleneckWithGN": BottleneckWithGN, }) _STEM_MODULES = Registry({ "StemWithFixedBatchNorm": StemWithFixedBatchNorm, "StemWithGN": StemWithGN, }) _STAGE_SPECS = Registry({ "R-50-C4": ResNet50StagesTo4, "R-50-C5": ResNet50StagesTo5, "R-101-C4": ResNet101StagesTo4, "R-101-C5": ResNet101StagesTo5, "R-50-FPN": ResNet50FPNStagesTo5, "R-50-FPN-RETINANET": ResNet50FPNStagesTo5, "R-101-FPN": ResNet101FPNStagesTo5, "R-101-FPN-RETINANET": ResNet101FPNStagesTo5, "R-152-FPN": ResNet152FPNStagesTo5, })
24,619
30.808786
90
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/backbone/backbone.py
# # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # from collections import OrderedDict # from torch import nn # from . import fpn as fpn_module # from . import resnet # def build_resnet_backbone(cfg): # body = resnet.ResNet(cfg) # model = nn.Sequential(OrderedDict([("body", body)])) # return model # def build_resnet_fpn_backbone(cfg): # body = resnet.ResNet(cfg) # in_channels_stage2 = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS # out_channels = cfg.MODEL.BACKBONE.OUT_CHANNELS # fpn = fpn_module.FPN( # in_channels_list=[ # in_channels_stage2, # in_channels_stage2 * 2, # in_channels_stage2 * 4, # in_channels_stage2 * 8, # ], # out_channels=out_channels, # top_blocks=fpn_module.LastLevelMaxPool(), # ) # model = nn.Sequential(OrderedDict([("body", body), ("fpn", fpn)])) # return model # _BACKBONES = {"resnet": build_resnet_backbone, "resnet-fpn": build_resnet_fpn_backbone} # def build_backbone(cfg): # assert cfg.MODEL.BACKBONE.CONV_BODY.startswith( # "R-" # ), "Only ResNet and ResNeXt models are currently implemented" # # Models using FPN end with "-FPN" # if cfg.MODEL.BACKBONE.CONV_BODY.endswith("-FPN"): # return build_resnet_fpn_backbone(cfg) # return build_resnet_backbone(cfg) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from collections import OrderedDict from torch import nn from maskrcnn_benchmark.modeling import registry from maskrcnn_benchmark.modeling.make_layers import conv_with_kaiming_uniform from . import fpn as fpn_module # from . import resnet @registry.BACKBONES.register("R-50-C4") @registry.BACKBONES.register("R-50-C5") @registry.BACKBONES.register("R-101-C4") @registry.BACKBONES.register("R-101-C5") def build_resnet_backbone(cfg): body = resnet.ResNet(cfg) model = nn.Sequential(OrderedDict([("body", body)])) model.out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS return model @registry.BACKBONES.register("R-18-FPN") @registry.BACKBONES.register("R-34-FPN") @registry.BACKBONES.register("R-50-FPN") @registry.BACKBONES.register("R-101-FPN") @registry.BACKBONES.register("R-152-FPN") def build_resnet_fpn_backbone(cfg): if cfg.MODEL.RESNET34: from . import resnet34 as resnet body = resnet.ResNet(layers=cfg.MODEL.RESNETS.LAYERS) else: from . import resnet body = resnet.ResNet(cfg) in_channels_stage2 = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS fpn = fpn_module.FPN( in_channels_list=[ in_channels_stage2, in_channels_stage2 * 2, in_channels_stage2 * 4, in_channels_stage2 * 8, ], out_channels=out_channels, conv_block=conv_with_kaiming_uniform( cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU ), top_blocks=fpn_module.LastLevelMaxPool(), ) model = nn.Sequential(OrderedDict([("body", body), ("fpn", fpn)])) model.out_channels = out_channels return model @registry.BACKBONES.register("R-50-FPN-RETINANET") @registry.BACKBONES.register("R-101-FPN-RETINANET") def build_resnet_fpn_p3p7_backbone(cfg): body = resnet.ResNet(cfg) in_channels_stage2 = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS in_channels_p6p7 = in_channels_stage2 * 8 if cfg.MODEL.RETINANET.USE_C5 \ else out_channels fpn = fpn_module.FPN( in_channels_list=[ 0, in_channels_stage2 * 2, in_channels_stage2 * 4, in_channels_stage2 * 8, ], out_channels=out_channels, conv_block=conv_with_kaiming_uniform( cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU ), top_blocks=fpn_module.LastLevelP6P7(in_channels_p6p7, out_channels), ) model = nn.Sequential(OrderedDict([("body", body), ("fpn", fpn)])) model.out_channels = out_channels return model def build_backbone(cfg): assert cfg.MODEL.BACKBONE.CONV_BODY in registry.BACKBONES, \ "cfg.MODEL.BACKBONE.CONV_BODY: {} are not registered in registry".format( cfg.MODEL.BACKBONE.CONV_BODY ) return registry.BACKBONES[cfg.MODEL.BACKBONE.CONV_BODY](cfg)
4,395
32.30303
89
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/backbone/resnet34.py
import torch import torch.nn.functional as F from torch import nn import math from maskrcnn_benchmark.layers import FrozenBatchNorm2d from maskrcnn_benchmark.layers import Conv2d def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = FrozenBatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = FrozenBatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block=BasicBlock, layers=[3, 4, 6, 3]): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = FrozenBatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) for m in self.modules(): if isinstance(m, Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, FrozenBatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), FrozenBatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x2 = self.layer1(x) x3 = self.layer2(x2) x4 = self.layer3(x3) x5 = self.layer4(x4) return [x2, x3, x4, x5]
2,729
26.857143
67
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/backbone/fpn.py
# #!/usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # import torch # import torch.nn.functional as F # from torch import nn # class FPN(nn.Module): # """ # Module that adds FPN on top of a list of feature maps. # The feature maps are currently supposed to be in increasing depth # order, and must be consecutive # """ # def __init__(self, in_channels_list, out_channels, top_blocks=None): # """ # Arguments: # in_channels_list (list[int]): number of channels for each feature map that # will be fed # out_channels (int): number of channels of the FPN representation # top_blocks (nn.Module or None): if provided, an extra operation will # be performed on the output of the last (smallest resolution) # FPN output, and the result will extend the result list # """ # super(FPN, self).__init__() # self.inner_blocks = [] # self.layer_blocks = [] # for idx, in_channels in enumerate(in_channels_list, 1): # inner_block = "fpn_inner{}".format(idx) # layer_block = "fpn_layer{}".format(idx) # inner_block_module = nn.Conv2d(in_channels, out_channels, 1) # layer_block_module = nn.Conv2d(out_channels, out_channels, 3, 1, 1) # for module in [inner_block_module, layer_block_module]: # # Caffe2 implementation uses XavierFill, which in fact # # corresponds to kaiming_uniform_ in PyTorch # nn.init.kaiming_uniform_(module.weight, a=1) # nn.init.constant_(module.bias, 0) # self.add_module(inner_block, inner_block_module) # self.add_module(layer_block, layer_block_module) # self.inner_blocks.append(inner_block) # self.layer_blocks.append(layer_block) # self.top_blocks = top_blocks # def forward(self, x): # """ # Arguments: # x (list[Tensor]): feature maps for each feature level. # Returns: # results (tuple[Tensor]): feature maps after FPN layers. # They are ordered from highest resolution first. # """ # last_inner = getattr(self, self.inner_blocks[-1])(x[-1]) # results = [] # results.append(getattr(self, self.layer_blocks[-1])(last_inner)) # for feature, inner_block, layer_block in zip( # x[:-1][::-1], self.inner_blocks[:-1][::-1], self.layer_blocks[:-1][::-1] # ): # inner_top_down = F.interpolate(last_inner, scale_factor=2, mode="nearest") # inner_lateral = getattr(self, inner_block)(feature) # # TODO use size instead of scale to make it robust to different sizes # # inner_top_down = F.upsample(last_inner, size=inner_lateral.shape[-2:], # # mode='bilinear', align_corners=False) # last_inner = inner_lateral + inner_top_down # results.insert(0, getattr(self, layer_block)(last_inner)) # if self.top_blocks is not None: # last_results = self.top_blocks(results[-1]) # results.extend(last_results) # return tuple(results) # class LastLevelMaxPool(nn.Module): # def forward(self, x): # return [F.max_pool2d(x, 1, 2, 0)] # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch import torch.nn.functional as F from torch import nn class FPN(nn.Module): """ Module that adds FPN on top of a list of feature maps. The feature maps are currently supposed to be in increasing depth order, and must be consecutive """ def __init__( self, in_channels_list, out_channels, conv_block, top_blocks=None ): """ Arguments: in_channels_list (list[int]): number of channels for each feature map that will be fed out_channels (int): number of channels of the FPN representation top_blocks (nn.Module or None): if provided, an extra operation will be performed on the output of the last (smallest resolution) FPN output, and the result will extend the result list """ super(FPN, self).__init__() self.inner_blocks = [] self.layer_blocks = [] for idx, in_channels in enumerate(in_channels_list, 1): inner_block = "fpn_inner{}".format(idx) layer_block = "fpn_layer{}".format(idx) if in_channels == 0: continue inner_block_module = conv_block(in_channels, out_channels, 1) layer_block_module = conv_block(out_channels, out_channels, 3, 1) self.add_module(inner_block, inner_block_module) self.add_module(layer_block, layer_block_module) self.inner_blocks.append(inner_block) self.layer_blocks.append(layer_block) self.top_blocks = top_blocks def forward(self, x): """ Arguments: x (list[Tensor]): feature maps for each feature level. Returns: results (tuple[Tensor]): feature maps after FPN layers. They are ordered from highest resolution first. """ last_inner = getattr(self, self.inner_blocks[-1])(x[-1]) results = [] results.append(getattr(self, self.layer_blocks[-1])(last_inner)) for feature, inner_block, layer_block in zip( x[:-1][::-1], self.inner_blocks[:-1][::-1], self.layer_blocks[:-1][::-1] ): if not inner_block: continue inner_top_down = F.interpolate(last_inner, scale_factor=2, mode="nearest") inner_lateral = getattr(self, inner_block)(feature) # TODO use size instead of scale to make it robust to different sizes # inner_top_down = F.upsample(last_inner, size=inner_lateral.shape[-2:], # mode='bilinear', align_corners=False) last_inner = inner_lateral + inner_top_down results.insert(0, getattr(self, layer_block)(last_inner)) if isinstance(self.top_blocks, LastLevelP6P7): last_results = self.top_blocks(x[-1], results[-1]) results.extend(last_results) elif isinstance(self.top_blocks, LastLevelMaxPool): last_results = self.top_blocks(results[-1]) results.extend(last_results) return tuple(results) class LastLevelMaxPool(nn.Module): def forward(self, x): return [F.max_pool2d(x, 1, 2, 0)] class LastLevelP6P7(nn.Module): """ This module is used in RetinaNet to generate extra layers, P6 and P7. """ def __init__(self, in_channels, out_channels): super(LastLevelP6P7, self).__init__() self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) for module in [self.p6, self.p7]: nn.init.kaiming_uniform_(module.weight, a=1) nn.init.constant_(module.bias, 0) self.use_P5 = in_channels == out_channels def forward(self, c5, p5): x = p5 if self.use_P5 else c5 p6 = self.p6(x) p7 = self.p7(F.relu(p6)) return [p6, p7]
7,331
40.659091
88
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/backbone/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from .backbone import build_backbone
109
35.666667
71
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/detector/detectors.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from .generalized_rcnn import GeneralizedRCNN _DETECTION_META_ARCHITECTURES = {"GeneralizedRCNN": GeneralizedRCNN} def build_detection_model(cfg): meta_arch = _DETECTION_META_ARCHITECTURES[cfg.MODEL.META_ARCHITECTURE] return meta_arch(cfg)
347
28
74
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/detector/generalized_rcnn.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. """ Implements the Generalized R-CNN framework """ import torch from torch import nn from maskrcnn_benchmark.structures.image_list import to_image_list from ..backbone import build_backbone from ..rpn.rpn import build_rpn from ..segmentation.segmentation import build_segmentation from ..roi_heads.roi_heads import build_roi_heads import time class GeneralizedRCNN(nn.Module): """ Main class for Generalized R-CNN. Currently supports boxes and masks. It consists of three main parts: - backbone = rpn - heads: takes the features + the proposals from the RPN and computes detections / masks from it. """ def __init__(self, cfg): super(GeneralizedRCNN, self).__init__() self.cfg = cfg self.backbone = build_backbone(cfg) if cfg.MODEL.SEG_ON: self.proposal = build_segmentation(cfg) else: self.proposal = build_rpn(cfg) if cfg.MODEL.TRAIN_DETECTION_ONLY: self.roi_heads = None else: self.roi_heads = build_roi_heads(cfg) def forward(self, images, targets=None): """ Arguments: images (list[Tensor] or ImageList): images to be processed targets (list[BoxList]): ground-truth boxes present in the image (optional) Returns: result (list[BoxList] or dict[Tensor]): the output from the model. During training, it returns a dict[Tensor] which contains the losses. During testing, it returns list[BoxList] contains additional fields like `scores`, `labels` and `mask` (for Mask R-CNN models). """ if self.training and targets is None: raise ValueError("In training mode, targets should be passed") # torch.cuda.synchronize() # start_time = time.time() images = to_image_list(images) # torch.cuda.synchronize() # end_time = time.time() # print('image load time:', end_time - start_time) # torch.cuda.synchronize() # start_time = time.time() features = self.backbone(images.tensors) # torch.cuda.synchronize() # end_time = time.time() # print('backbone time:', end_time - start_time) if self.cfg.MODEL.SEG_ON and not self.training: # torch.cuda.synchronize() # start_time = time.time() (proposals, seg_results), fuse_feature = self.proposal(images, features, targets) # torch.cuda.synchronize() # end_time = time.time() # print('seg time:', end_time - start_time) else: if self.cfg.MODEL.SEG_ON: (proposals, proposal_losses), fuse_feature = self.proposal(images, features, targets) else: proposals, proposal_losses = self.proposal(images, features, targets) if self.roi_heads is not None: if self.cfg.MODEL.SEG_ON and self.cfg.MODEL.SEG.USE_FUSE_FEATURE: x, result, detector_losses = self.roi_heads(fuse_feature, proposals, targets) else: x, result, detector_losses = self.roi_heads(features, proposals, targets) else: # RPN-only models don't have roi_heads # x = features result = proposals detector_losses = {} if self.training: losses = {} if self.roi_heads is not None: losses.update(detector_losses) losses.update(proposal_losses) return losses else: if self.cfg.MODEL.SEG_ON: return result, proposals, seg_results else: return result # return result
3,837
35.903846
101
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/detector/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from .detectors import build_detection_model
117
38.333333
71
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/segmentation/inference.py
#!/usr/bin/env python3 import numpy as np import torch import cv2 import pyclipper from shapely.geometry import Polygon from maskrcnn_benchmark.structures.bounding_box import BoxList from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist, cat_boxlist_gt from maskrcnn_benchmark.structures.boxlist_ops import remove_small_boxes from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask import random import time class SEGPostProcessor(torch.nn.Module): """ Performs post-processing on the outputs of the RPN boxes, before feeding the proposals to the heads """ def __init__( self, top_n, binary_thresh, box_thresh, min_size, cfg, ): """ Arguments: top_n (int) binary_thresh (float) box_thresh (float) min_size (int) """ super(SEGPostProcessor, self).__init__() self.top_n = top_n self.binary_thresh = binary_thresh self.box_thresh = box_thresh self.min_size = min_size self.cfg = cfg def add_gt_proposals(self, proposals, targets): """ Arguments: proposals: list[BoxList] targets: list[BoxList] """ # Get the device we're operating on # device = proposals[0].bbox. if self.cfg.MODEL.SEG.USE_SEG_POLY or self.cfg.MODEL.ROI_BOX_HEAD.USE_MASKED_FEATURE or self.cfg.MODEL.ROI_MASK_HEAD.USE_MASKED_FEATURE: gt_boxes = [target.copy_with_fields(['masks']) for target in targets] else: gt_boxes = [target.copy_with_fields([]) for target in targets] # later cat of bbox requires all fields to be present for all bbox # so we need to add a dummy for objectness that's missing # for gt_box in gt_boxes: # gt_box.add_field("objectness", torch.ones(len(gt_box), device=device)) proposals = [ cat_boxlist_gt([proposal, gt_box]) for proposal, gt_box in zip(proposals, gt_boxes) ] return proposals def aug_tensor_proposals(self, boxes): # boxes: N * 4 boxes = boxes.float() N = boxes.shape[0] device = boxes.device aug_boxes = torch.zeros((4, N, 4), device=device) aug_boxes[0, :, :] = boxes.clone() xmin, ymin, xmax, ymax = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3] x_center = (xmin + xmax) / 2. y_center = (ymin + ymax) / 2. width = xmax - xmin height = ymax - ymin for i in range(3): choice = random.random() if choice < 0.5: # shrink or expand ratio = (torch.randn((N,), device=device) * 3 + 1) / 2. height = height * ratio ratio = (torch.randn((N,), device=device) * 3 + 1) / 2. width = width * ratio else: move_x = width * (torch.randn((N,), device=device) * 4 - 2) move_y = height * (torch.randn((N,), device=device) * 4 - 2) x_center += move_x y_center += move_y boxes[:, 0] = x_center - width / 2 boxes[:, 2] = x_center + width / 2 boxes[:, 1] = y_center - height / 2 boxes[:, 3] = y_center + height / 2 aug_boxes[i+1, :, :] = boxes.clone() return aug_boxes.reshape((-1, 4)) def forward_for_single_feature_map(self, pred, image_shapes): """ Arguments: pred: tensor of size N, 1, H, W """ device = pred.device # torch.cuda.synchronize() # start_time = time.time() bitmap = self.binarize(pred) # torch.cuda.synchronize() # end_time = time.time() # print('binarize time:', end_time - start_time) N, height, width = pred.shape[0], pred.shape[2], pred.shape[3] # torch.cuda.synchronize() # start_time = time.time() bitmap_numpy = bitmap.cpu().numpy() # The first channel pred_map_numpy = pred.cpu().numpy() # torch.cuda.synchronize() # end_time = time.time() # print('gpu2numpy time:', end_time - start_time) boxes_batch = [] rotated_boxes_batch = [] polygons_batch = [] scores_batch = [] # torch.cuda.synchronize() # start_time = time.time() for batch_index in range(N): image_shape = image_shapes[batch_index] boxes, scores, rotated_boxes, polygons = self.boxes_from_bitmap( pred_map_numpy[batch_index], bitmap_numpy[batch_index], width, height) boxes = boxes.to(device) if self.training and self.cfg.MODEL.SEG.AUG_PROPOSALS: boxes = self.aug_tensor_proposals(boxes) if boxes.shape[0] > self.top_n: boxes = boxes[:self.top_n, :] # _, top_index = scores.topk(self.top_n, 0, sorted=False) # boxes = boxes[top_index, :] # scores = scores[top_index] # boxlist = BoxList(boxes, (width, height), mode="xyxy") boxlist = BoxList(boxes, (image_shape[1], image_shape[0]), mode="xyxy") if self.cfg.MODEL.SEG.USE_SEG_POLY or self.cfg.MODEL.ROI_BOX_HEAD.USE_MASKED_FEATURE or self.cfg.MODEL.ROI_MASK_HEAD.USE_MASKED_FEATURE: masks = SegmentationMask(polygons, (image_shape[1], image_shape[0])) boxlist.add_field('masks', masks) boxlist = boxlist.clip_to_image(remove_empty=False) # boxlist = remove_small_boxes(boxlist, self.min_size) boxes_batch.append(boxlist) rotated_boxes_batch.append(rotated_boxes) polygons_batch.append(polygons) scores_batch.append(scores) # torch.cuda.synchronize() # end_time = time.time() # print('loop time:', end_time - start_time) return boxes_batch, rotated_boxes_batch, polygons_batch, scores_batch def forward(self, seg_output, image_shapes, targets=None): """ Arguments: seg_output: list[tensor] Returns: boxlists (list[BoxList]): bounding boxes """ sampled_boxes = [] boxes_batch, rotated_boxes_batch, polygons_batch, scores_batch = self.forward_for_single_feature_map(seg_output, image_shapes) if not self.training: return boxes_batch, rotated_boxes_batch, polygons_batch, scores_batch sampled_boxes.append(boxes_batch) boxlists = list(zip(*sampled_boxes)) boxlists = [cat_boxlist(boxlist) for boxlist in boxlists] # append ground-truth bboxes to proposals if self.training and targets is not None: boxlists = self.add_gt_proposals(boxlists, targets) return boxlists # def select_over_all_levels(self, boxlists): # num_images = len(boxlists) # # different behavior during training and during testing: # # during training, post_nms_top_n is over *all* the proposals combined, while # # during testing, it is over the proposals for each image # # TODO resolve this difference and make it consistent. It should be per image, # # and not per batch # if self.training: # objectness = torch.cat( # [boxlist.get_field("objectness") for boxlist in boxlists], dim=0 # ) # box_sizes = [len(boxlist) for boxlist in boxlists] # post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness)) # _, inds_sorted = torch.topk(objectness, post_nms_top_n, dim=0, sorted=True) # inds_mask = torch.zeros_like(objectness, dtype=torch.uint8) # inds_mask[inds_sorted] = 1 # inds_mask = inds_mask.split(box_sizes) # for i in range(num_images): # boxlists[i] = boxlists[i][inds_mask[i]] # else: # for i in range(num_images): # objectness = boxlists[i].get_field("objectness") # post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness)) # _, inds_sorted = torch.topk( # objectness, post_nms_top_n, dim=0, sorted=True # ) # boxlists[i] = boxlists[i][inds_sorted] # return boxlists def binarize(self, pred): if self.cfg.MODEL.SEG.USE_MULTIPLE_THRESH: binary_maps = [] for thre in self.cfg.MODEL.SEG.MULTIPLE_THRESH: binary_map = pred > thre binary_maps.append(binary_map) return torch.cat(binary_maps, dim=1) else: return pred > self.binary_thresh def boxes_from_bitmap(self, pred, bitmap, dest_width, dest_height): """ _bitmap: single map with shape (1, H, W), whose values are binarized as {0, 1} """ # assert _bitmap.size(0) == 1 # bitmap = _bitmap[0] # The first channel pred = pred[0] height, width = bitmap.shape[1], bitmap.shape[2] boxes = [] scores = [] rotated_boxes = [] polygons = [] contours_all = [] for i in range(bitmap.shape[0]): try: _, contours, _ = cv2.findContours( (bitmap[i] * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE, ) except BaseException: contours, _ = cv2.findContours( (bitmap[i] * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE, ) contours_all.extend(contours) for contour in contours_all: epsilon = 0.01 * cv2.arcLength(contour, True) approx = cv2.approxPolyDP(contour, epsilon, True) polygon = approx.reshape((-1, 2)) points, sside = self.get_mini_boxes(contour) if sside < self.min_size: continue points = np.array(points) score = self.box_score_fast(pred, points) if not self.training and self.box_thresh > score: continue if polygon.shape[0] > 2: polygon = self.unclip(polygon, expand_ratio=self.cfg.MODEL.SEG.EXPAND_RATIO) if len(polygon) > 1: continue else: continue # polygon = polygon.reshape(-1, 2) polygon = polygon.reshape(-1) box = self.unclip(points, expand_ratio=self.cfg.MODEL.SEG.BOX_EXPAND_RATIO).reshape(-1, 2) box = np.array(box) box[:, 0] = np.clip(np.round(box[:, 0] / width * dest_width), 0, dest_width) box[:, 1] = np.clip( np.round(box[:, 1] / height * dest_height), 0, dest_height ) min_x, min_y = min(box[:, 0]), min(box[:, 1]) max_x, max_y = max(box[:, 0]), max(box[:, 1]) horizontal_box = torch.from_numpy(np.array([min_x, min_y, max_x, max_y])) boxes.append(horizontal_box) scores.append(score) rotated_box, _ = self.get_mini_boxes(box.reshape(-1, 1, 2)) rotated_box = np.array(rotated_box) rotated_boxes.append(rotated_box) polygons.append([polygon]) if len(boxes) == 0: boxes = [torch.from_numpy(np.array([0, 0, 0, 0]))] scores = [0.] boxes = torch.stack(boxes) scores = torch.from_numpy(np.array(scores)) return boxes, scores, rotated_boxes, polygons def aug_proposals(self, box): xmin, ymin, xmax, ymax = box[0], box[1], box[2], box[3] x_center = int((xmin + xmax) / 2.) y_center = int((ymin + ymax) / 2.) width = xmax - xmin height = ymax - ymin choice = random.random() if choice < 0.5: # shrink or expand ratio = (random.random() * 3 + 1) / 2. height = height * ratio ratio = (random.random() * 3 + 1) / 2. width = width * ratio else: move_x = width * (random.random() * 4 - 2) move_y = height * (random.random() * 4 - 2) x_center += move_x y_center += move_y xmin = int(x_center - width / 2) xmax = int(x_center + width / 2) ymin = int(y_center - height / 2) ymax = int(y_center + height / 2) return [xmin, ymin, xmax, ymax] def unclip(self, box, expand_ratio=1.5): poly = Polygon(box) distance = poly.area * expand_ratio / poly.length offset = pyclipper.PyclipperOffset() offset.AddPath(box, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON) expanded = np.array(offset.Execute(distance)) return expanded def get_mini_boxes(self, contour): bounding_box = cv2.minAreaRect(contour) points = sorted(list(cv2.boxPoints(bounding_box)), key=lambda x: x[0]) index_1, index_2, index_3, index_4 = 0, 1, 2, 3 if points[1][1] > points[0][1]: index_1 = 0 index_4 = 1 else: index_1 = 1 index_4 = 0 if points[3][1] > points[2][1]: index_2 = 2 index_3 = 3 else: index_2 = 3 index_3 = 2 box = [points[index_1], points[index_2], points[index_3], points[index_4]] return box, min(bounding_box[1]) def box_score(self, bitmap, box): """ naive version of box score computation, only for helping principle understand. """ mask = np.zeros_like(bitmap, dtype=np.uint8) cv2.fillPoly(mask, box.reshape(1, 4, 2).astype(np.int32), 1) return cv2.mean(bitmap, mask)[0] def box_score_fast(self, bitmap, _box): h, w = bitmap.shape[:2] box = _box.copy() xmin = np.clip(np.floor(box[:, 0].min()).astype(np.int), 0, w - 1) xmax = np.clip(np.ceil(box[:, 0].max()).astype(np.int), 0, w - 1) ymin = np.clip(np.floor(box[:, 1].min()).astype(np.int), 0, h - 1) ymax = np.clip(np.ceil(box[:, 1].max()).astype(np.int), 0, h - 1) mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8) box[:, 0] = box[:, 0] - xmin box[:, 1] = box[:, 1] - ymin cv2.fillPoly(mask, box.reshape(1, 4, 2).astype(np.int32), 1) return cv2.mean(bitmap[ymin : ymax + 1, xmin : xmax + 1], mask)[0] def make_seg_postprocessor(config, is_train): top_n = config.MODEL.SEG.TOP_N_TRAIN if not is_train: top_n = config.MODEL.SEG.TOP_N_TEST binary_thresh = config.MODEL.SEG.BINARY_THRESH box_thresh = config.MODEL.SEG.BOX_THRESH min_size = config.MODEL.SEG.MIN_SIZE box_selector = SEGPostProcessor( top_n=top_n, binary_thresh=binary_thresh, box_thresh=box_thresh, min_size=min_size, cfg = config ) return box_selector
15,089
38.815303
148
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/segmentation/loss.py
#!/usr/bin/env python3 """ This file contains specific functions for computing losses on the SEG file """ import torch class SEGLossComputation(object): """ This class computes the SEG loss. """ def __init__(self, cfg): self.eps = 1e-6 self.cfg = cfg def __call__(self, preds, targets): """ Arguments: preds (Tensor) targets (list[Tensor]) masks (list[Tensor]) Returns: seg_loss (Tensor) """ image_size = (preds.shape[2], preds.shape[3]) segm_targets, masks = self.prepare_targets(targets, image_size) device = preds.device segm_targets = segm_targets.float().to(device) masks = masks.float().to(device) seg_loss = self.dice_loss(preds, segm_targets, masks) return seg_loss def dice_loss(self, pred, gt, m): intersection = torch.sum(pred * gt * m) union = torch.sum(pred * m) + torch.sum(gt * m) + self.eps loss = 1 - 2.0 * intersection / union return loss def project_masks_on_image(self, mask_polygons, labels, shrink_ratio, image_size): seg_map, training_mask = mask_polygons.convert_seg_map( labels, shrink_ratio, image_size, self.cfg.MODEL.SEG.IGNORE_DIFFICULT ) return torch.from_numpy(seg_map), torch.from_numpy(training_mask) def prepare_targets(self, targets, image_size): segms = [] training_masks = [] for target_per_image in targets: segmentation_masks = target_per_image.get_field("masks") labels = target_per_image.get_field("labels") seg_maps_per_image, training_masks_per_image = self.project_masks_on_image( segmentation_masks, labels, self.cfg.MODEL.SEG.SHRINK_RATIO, image_size ) segms.append(seg_maps_per_image) training_masks.append(training_masks_per_image) return torch.stack(segms), torch.stack(training_masks) def make_seg_loss_evaluator(cfg): loss_evaluator = SEGLossComputation(cfg) return loss_evaluator
2,122
31.661538
87
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/segmentation/segmentation.py
#!/usr/bin/env python3 import torch from torch import nn from .inference import make_seg_postprocessor from .loss import make_seg_loss_evaluator import time def conv3x3(in_planes, out_planes, stride=1, has_bias=False): "3x3 convolution with padding" return nn.Conv2d( in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=has_bias ) def conv3x3_bn_relu(in_planes, out_planes, stride=1, has_bias=False): return nn.Sequential( conv3x3(in_planes, out_planes, stride), nn.BatchNorm2d(out_planes), nn.ReLU(inplace=True), ) class SEGHead(nn.Module): """ Adds a simple SEG Head with pixel-level prediction """ def __init__(self, in_channels, cfg): """ Arguments: in_channels (int): number of channels of the input feature """ super(SEGHead, self).__init__() self.cfg = cfg ndim = 256 self.fpn_out5 = nn.Sequential( conv3x3(ndim, 64), nn.Upsample(scale_factor=8, mode="nearest") ) self.fpn_out4 = nn.Sequential( conv3x3(ndim, 64), nn.Upsample(scale_factor=4, mode="nearest") ) self.fpn_out3 = nn.Sequential( conv3x3(ndim, 64), nn.Upsample(scale_factor=2, mode="nearest") ) self.fpn_out2 = conv3x3(ndim, 64) self.seg_out = nn.Sequential( conv3x3_bn_relu(in_channels, 64, 1), nn.ConvTranspose2d(64, 64, 2, 2), nn.BatchNorm2d(64), nn.ReLU(True), nn.ConvTranspose2d(64, 1, 2, 2), nn.Sigmoid(), ) if self.cfg.MODEL.SEG.USE_PPM: # PPM Module pool_scales=(2, 4, 8) fc_dim = 256 self.ppm_pooling = [] self.ppm_conv = [] for scale in pool_scales: self.ppm_pooling.append(nn.AdaptiveAvgPool2d(scale)) self.ppm_conv.append(nn.Sequential( nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False), nn.BatchNorm2d(512), nn.ReLU(inplace=True) )) self.ppm_pooling = nn.ModuleList(self.ppm_pooling) self.ppm_conv = nn.ModuleList(self.ppm_conv) self.ppm_last_conv = conv3x3_bn_relu(fc_dim + len(pool_scales)*512, ndim, 1) self.ppm_conv.apply(self.weights_init) self.ppm_last_conv.apply(self.weights_init) self.fpn_out5.apply(self.weights_init) self.fpn_out4.apply(self.weights_init) self.fpn_out3.apply(self.weights_init) self.fpn_out2.apply(self.weights_init) self.seg_out.apply(self.weights_init) def forward(self, x): if self.cfg.MODEL.SEG.USE_PPM: conv5 = x[-2] input_size = conv5.size() ppm_out = [conv5] for pool_scale, pool_conv in zip(self.ppm_pooling, self.ppm_conv): ppm_out.append(pool_conv(nn.functional.interpolate( pool_scale(conv5), (input_size[2], input_size[3]), mode='bilinear', align_corners=False))) ppm_out = torch.cat(ppm_out, 1) f = self.ppm_last_conv(ppm_out) else: f = x[-2] # p5 = self.fpn_out5(x[-2]) p5 = self.fpn_out5(f) p4 = self.fpn_out4(x[-3]) p3 = self.fpn_out3(x[-4]) p2 = self.fpn_out2(x[-5]) fuse = torch.cat((p5, p4, p3, p2), 1) out = self.seg_out(fuse) return out, fuse def weights_init(self, m): classname = m.__class__.__name__ if classname.find("Conv") != -1: nn.init.kaiming_normal_(m.weight.data) elif classname.find("BatchNorm") != -1: m.weight.data.fill_(1.0) m.bias.data.fill_(1e-4) class SEGModule(torch.nn.Module): """ Module for RPN computation. Takes feature maps from the backbone and RPN proposals and losses. Works for both FPN and non-FPN. """ def __init__(self, cfg): super(SEGModule, self).__init__() self.cfg = cfg.clone() in_channels = cfg.MODEL.BACKBONE.OUT_CHANNELS head = SEGHead(in_channels, cfg) box_selector_train = make_seg_postprocessor(cfg, is_train=True) box_selector_test = make_seg_postprocessor(cfg, is_train=False) loss_evaluator = make_seg_loss_evaluator(cfg) # self.anchor_generator = anchor_generator self.head = head self.box_selector_train = box_selector_train self.box_selector_test = box_selector_test self.loss_evaluator = loss_evaluator def forward(self, images, features, targets=None): """ Arguments: images (ImageList): images for which we want to compute the predictions features (Tensor): fused feature from FPN targets (Tensor): segmentaion gt map Returns: boxes (list[BoxList]): the predicted boxes from the RPN, one BoxList per image. losses (dict[Tensor]): the losses for the model during training. During testing, it is an empty dict. """ preds, fuse_feature = self.head(features) # anchors = self.anchor_generator(images, features) image_shapes = images.get_sizes() if self.training: return self._forward_train(preds, targets, image_shapes), [fuse_feature] else: return self._forward_test(preds, image_shapes), [fuse_feature] def _forward_train(self, preds, targets, image_shapes): # Segmentation map must be transformed into boxes for detection. # sampled into a training batch. with torch.no_grad(): boxes = self.box_selector_train(preds, image_shapes, targets) loss_seg = self.loss_evaluator(preds, targets) losses = {"loss_seg": loss_seg} return boxes, losses def _forward_test(self, preds, image_shapes): # torch.cuda.synchronize() # start_time = time.time() boxes, rotated_boxes, polygons, scores = self.box_selector_test(preds, image_shapes) # torch.cuda.synchronize() # end_time = time.time() # print('post time:', end_time - start_time) seg_results = {'rotated_boxes': rotated_boxes, 'polygons': polygons, 'preds': preds, 'scores': scores} return boxes, seg_results def build_segmentation(cfg): """ This gives the gist of it. Not super important because it doesn't change as much """ return SEGModule(cfg)
6,573
34.923497
110
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/rpn/inference.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch from maskrcnn_benchmark.modeling.box_coder import BoxCoder from maskrcnn_benchmark.structures.bounding_box import BoxList from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist from maskrcnn_benchmark.structures.boxlist_ops import boxlist_nms from maskrcnn_benchmark.structures.boxlist_ops import remove_small_boxes import pdb class RPNPostProcessor(torch.nn.Module): """ Performs post-processing on the outputs of the RPN boxes, before feeding the proposals to the heads """ def __init__( self, pre_nms_top_n, post_nms_top_n, nms_thresh, min_size, box_coder=None, fpn_post_nms_top_n=None, ): """ Arguments: pre_nms_top_n (int) post_nms_top_n (int) nms_thresh (float) min_size (int) box_coder (BoxCoder) fpn_post_nms_top_n (int) """ super(RPNPostProcessor, self).__init__() self.pre_nms_top_n = pre_nms_top_n self.post_nms_top_n = post_nms_top_n self.nms_thresh = nms_thresh self.min_size = min_size if box_coder is None: box_coder = BoxCoder(weights=(1.0, 1.0, 1.0, 1.0)) self.box_coder = box_coder if fpn_post_nms_top_n is None: fpn_post_nms_top_n = post_nms_top_n self.fpn_post_nms_top_n = fpn_post_nms_top_n def add_gt_proposals(self, proposals, targets): """ Arguments: proposals: list[BoxList] targets: list[BoxList] """ # Get the device we're operating on device = proposals[0].bbox.device gt_boxes = [target.copy_with_fields([]) for target in targets] # later cat of bbox requires all fields to be present for all bbox # so we need to add a dummy for objectness that's missing for gt_box in gt_boxes: gt_box.add_field("objectness", torch.ones(len(gt_box), device=device)) proposals = [ cat_boxlist((proposal, gt_box)) for proposal, gt_box in zip(proposals, gt_boxes) ] return proposals def forward_for_single_feature_map(self, anchors, objectness, box_regression): """ Arguments: anchors: list[BoxList] objectness: tensor of size N, A, H, W box_regression: tensor of size N, A * 4, H, W """ device = objectness.device N, A, H, W = objectness.shape # put in the same format as anchors objectness = objectness.permute(0, 2, 3, 1).reshape(N, -1) objectness = objectness.sigmoid() box_regression = box_regression.view(N, -1, 4, H, W).permute(0, 3, 4, 1, 2) box_regression = box_regression.reshape(N, -1, 4) num_anchors = A * H * W pre_nms_top_n = min(self.pre_nms_top_n, num_anchors) objectness, topk_idx = objectness.topk(pre_nms_top_n, dim=1, sorted=True) batch_idx = torch.arange(N, device=device)[:, None] box_regression = box_regression[batch_idx, topk_idx] image_shapes = [box.size for box in anchors] concat_anchors = torch.cat([a.bbox for a in anchors], dim=0) concat_anchors = concat_anchors.reshape(N, -1, 4)[batch_idx, topk_idx] proposals = self.box_coder.decode( box_regression.view(-1, 4), concat_anchors.view(-1, 4) ) proposals = proposals.view(N, -1, 4) result = [] for proposal, score, im_shape in zip(proposals, objectness, image_shapes): boxlist = BoxList(proposal, im_shape, mode="xyxy") boxlist.add_field("objectness", score) boxlist = boxlist.clip_to_image(remove_empty=False) boxlist = remove_small_boxes(boxlist, self.min_size) boxlist = boxlist_nms( boxlist, self.nms_thresh, max_proposals=self.post_nms_top_n, score_field="objectness", ) result.append(boxlist) return result def forward(self, anchors, objectness, box_regression, targets=None): """ Arguments: anchors: list[list[BoxList]] objectness: list[tensor] box_regression: list[tensor] Returns: boxlists (list[BoxList]): the post-processed anchors, after applying box decoding and NMS """ sampled_boxes = [] num_levels = len(objectness) anchors = list(zip(*anchors)) for a, o, b in zip(anchors, objectness, box_regression): sampled_boxes.append(self.forward_for_single_feature_map(a, o, b)) boxlists = list(zip(*sampled_boxes)) boxlists = [cat_boxlist(boxlist) for boxlist in boxlists] if num_levels > 1: boxlists = self.select_over_all_levels(boxlists) # append ground-truth bboxes to proposals if self.training and targets is not None: boxlists = self.add_gt_proposals(boxlists, targets) return boxlists def select_over_all_levels(self, boxlists): num_images = len(boxlists) # different behavior during training and during testing: # during training, post_nms_top_n is over *all* the proposals combined, while # during testing, it is over the proposals for each image # TODO resolve this difference and make it consistent. It should be per image, # and not per batch if self.training: objectness = torch.cat( [boxlist.get_field("objectness") for boxlist in boxlists], dim=0 ) box_sizes = [len(boxlist) for boxlist in boxlists] post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness)) _, inds_sorted = torch.topk(objectness, post_nms_top_n, dim=0, sorted=True) inds_mask = torch.zeros_like(objectness, dtype=torch.bool) inds_mask[inds_sorted] = 1 inds_mask = inds_mask.split(box_sizes) for i in range(num_images): boxlists[i] = boxlists[i][inds_mask[i]] else: for i in range(num_images): objectness = boxlists[i].get_field("objectness") post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness)) _, inds_sorted = torch.topk( objectness, post_nms_top_n, dim=0, sorted=True ) boxlists[i] = boxlists[i][inds_sorted] return boxlists def make_rpn_postprocessor(config, rpn_box_coder, is_train): fpn_post_nms_top_n = config.MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN if not is_train: fpn_post_nms_top_n = config.MODEL.RPN.FPN_POST_NMS_TOP_N_TEST pre_nms_top_n = config.MODEL.RPN.PRE_NMS_TOP_N_TRAIN post_nms_top_n = config.MODEL.RPN.POST_NMS_TOP_N_TRAIN if not is_train: pre_nms_top_n = config.MODEL.RPN.PRE_NMS_TOP_N_TEST post_nms_top_n = config.MODEL.RPN.POST_NMS_TOP_N_TEST nms_thresh = config.MODEL.RPN.NMS_THRESH min_size = config.MODEL.RPN.MIN_SIZE box_selector = RPNPostProcessor( pre_nms_top_n=pre_nms_top_n, post_nms_top_n=post_nms_top_n, nms_thresh=nms_thresh, min_size=min_size, box_coder=rpn_box_coder, fpn_post_nms_top_n=fpn_post_nms_top_n, ) return box_selector
7,490
35.720588
87
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/rpn/anchor_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import math import numpy as np import torch from torch import nn from maskrcnn_benchmark.structures.bounding_box import BoxList class BufferList(nn.Module): """ Similar to nn.ParameterList, but for buffers """ def __init__(self, buffers=None): super(BufferList, self).__init__() if buffers is not None: self.extend(buffers) def extend(self, buffers): offset = len(self) for i, buffer in enumerate(buffers): self.register_buffer(str(offset + i), buffer) return self def __len__(self): return len(self._buffers) def __iter__(self): return iter(self._buffers.values()) class AnchorGenerator(nn.Module): """ For a set of image sizes and feature maps, computes a set of anchors """ def __init__( self, sizes=(128, 256, 512), aspect_ratios=(0.5, 1.0, 2.0), anchor_strides=(8, 16, 32), straddle_thresh=0, ): super(AnchorGenerator, self).__init__() if len(anchor_strides) == 1: anchor_stride = anchor_strides[0] cell_anchors = [ generate_anchors(anchor_stride, sizes, aspect_ratios).float() ] else: if len(anchor_strides) != len(sizes): raise RuntimeError("FPN should have #anchor_strides == #sizes") cell_anchors = [ generate_anchors(anchor_stride, (size,), aspect_ratios).float() for anchor_stride, size in zip(anchor_strides, sizes) ] self.strides = anchor_strides self.cell_anchors = BufferList(cell_anchors) self.straddle_thresh = straddle_thresh def num_anchors_per_location(self): return [len(cell_anchors) for cell_anchors in self.cell_anchors] def grid_anchors(self, grid_sizes): anchors = [] for size, stride, base_anchors in zip( grid_sizes, self.strides, self.cell_anchors ): grid_height, grid_width = size device = base_anchors.device shifts_x = torch.arange( 0, grid_width * stride, step=stride, dtype=torch.float32, device=device ) shifts_y = torch.arange( 0, grid_height * stride, step=stride, dtype=torch.float32, device=device ) shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) shift_x = shift_x.reshape(-1) shift_y = shift_y.reshape(-1) shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1) anchors.append( (shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4) ) return anchors def add_visibility_to(self, boxlist): image_width, image_height = boxlist.size anchors = boxlist.bbox if self.straddle_thresh >= 0: inds_inside = ( (anchors[..., 0] >= -self.straddle_thresh) & (anchors[..., 1] >= -self.straddle_thresh) & (anchors[..., 2] < image_width + self.straddle_thresh) & (anchors[..., 3] < image_height + self.straddle_thresh) ) else: device = anchors.device inds_inside = torch.ones(anchors.shape[0], dtype=torch.bool, device=device) boxlist.add_field("visibility", inds_inside) def forward(self, image_list, feature_maps): grid_height, grid_width = feature_maps[0].shape[-2:] grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps] anchors_over_all_feature_maps = self.grid_anchors(grid_sizes) anchors = [] for i, (image_height, image_width) in enumerate(image_list.image_sizes): anchors_in_image = [] for anchors_per_feature_map in anchors_over_all_feature_maps: boxlist = BoxList( anchors_per_feature_map, (image_width, image_height), mode="xyxy" ) self.add_visibility_to(boxlist) anchors_in_image.append(boxlist) anchors.append(anchors_in_image) return anchors def make_anchor_generator(config): anchor_sizes = config.MODEL.RPN.ANCHOR_SIZES aspect_ratios = config.MODEL.RPN.ASPECT_RATIOS anchor_stride = config.MODEL.RPN.ANCHOR_STRIDE straddle_thresh = config.MODEL.RPN.STRADDLE_THRESH if config.MODEL.RPN.USE_FPN: assert len(anchor_stride) == len( anchor_sizes ), "FPN should have len(ANCHOR_STRIDE) == len(ANCHOR_SIZES)" else: assert len(anchor_stride) == 1, "Non-FPN should have a single ANCHOR_STRIDE" anchor_generator = AnchorGenerator( anchor_sizes, aspect_ratios, anchor_stride, straddle_thresh ) return anchor_generator # Copyright (c) 2017-present, Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## # # Based on: # -------------------------------------------------------- # Faster R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Ross Girshick and Sean Bell # -------------------------------------------------------- # Verify that we compute the same anchors as Shaoqing's matlab implementation: # # >> load output/rpn_cachedir/faster_rcnn_VOC2007_ZF_stage1_rpn/anchors.mat # >> anchors # # anchors = # # -83 -39 100 56 # -175 -87 192 104 # -359 -183 376 200 # -55 -55 72 72 # -119 -119 136 136 # -247 -247 264 264 # -35 -79 52 96 # -79 -167 96 184 # -167 -343 184 360 # array([[ -83., -39., 100., 56.], # [-175., -87., 192., 104.], # [-359., -183., 376., 200.], # [ -55., -55., 72., 72.], # [-119., -119., 136., 136.], # [-247., -247., 264., 264.], # [ -35., -79., 52., 96.], # [ -79., -167., 96., 184.], # [-167., -343., 184., 360.]]) def generate_anchors( stride=16, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2) ): """Generates a matrix of anchor boxes in (x1, y1, x2, y2) format. Anchors are centered on stride / 2, have (approximate) sqrt areas of the specified sizes, and aspect ratios as given. """ return _generate_anchors( stride, np.array(sizes, dtype=np.float) / stride, np.array(aspect_ratios, dtype=np.float), ) def _generate_anchors(base_size, scales, aspect_ratios): """Generate anchor (reference) windows by enumerating aspect ratios X scales wrt a reference (0, 0, base_size - 1, base_size - 1) window. """ anchor = np.array([1, 1, base_size, base_size], dtype=np.float) - 1 anchors = _ratio_enum(anchor, aspect_ratios) anchors = np.vstack( [_scale_enum(anchors[i, :], scales) for i in range(anchors.shape[0])] ) return torch.from_numpy(anchors) def _whctrs(anchor): """Return width, height, x center, and y center for an anchor (window).""" w = anchor[2] - anchor[0] + 1 h = anchor[3] - anchor[1] + 1 x_ctr = anchor[0] + 0.5 * (w - 1) y_ctr = anchor[1] + 0.5 * (h - 1) return w, h, x_ctr, y_ctr def _mkanchors(ws, hs, x_ctr, y_ctr): """Given a vector of widths (ws) and heights (hs) around a center (x_ctr, y_ctr), output a set of anchors (windows). """ ws = ws[:, np.newaxis] hs = hs[:, np.newaxis] anchors = np.hstack( ( x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1), ) ) return anchors def _ratio_enum(anchor, ratios): """Enumerate a set of anchors for each aspect ratio wrt an anchor.""" w, h, x_ctr, y_ctr = _whctrs(anchor) size = w * h size_ratios = size / ratios ws = np.round(np.sqrt(size_ratios)) hs = np.round(ws * ratios) anchors = _mkanchors(ws, hs, x_ctr, y_ctr) return anchors def _scale_enum(anchor, scales): """Enumerate a set of anchors for each scale wrt an anchor.""" w, h, x_ctr, y_ctr = _whctrs(anchor) ws = w * scales hs = h * scales anchors = _mkanchors(ws, hs, x_ctr, y_ctr) return anchors
8,907
32.742424
88
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/rpn/loss.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. """ This file contains specific functions for computing losses on the RPN file """ import torch from torch.nn import functional as F from ..balanced_positive_negative_sampler import BalancedPositiveNegativeSampler from ..utils import cat from maskrcnn_benchmark.layers import smooth_l1_loss from maskrcnn_benchmark.modeling.matcher import Matcher from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist class RPNLossComputation(object): """ This class computes the RPN loss. """ def __init__(self, proposal_matcher, fg_bg_sampler, box_coder): """ Arguments: proposal_matcher (Matcher) fg_bg_sampler (BalancedPositiveNegativeSampler) box_coder (BoxCoder) """ # self.target_preparator = target_preparator self.proposal_matcher = proposal_matcher self.fg_bg_sampler = fg_bg_sampler self.box_coder = box_coder def match_targets_to_anchors(self, anchor, target): match_quality_matrix = boxlist_iou(target, anchor) matched_idxs = self.proposal_matcher(match_quality_matrix) # RPN doesn't need any fields from target # for creating the labels, so clear them all target = target.copy_with_fields([]) # get the targets corresponding GT for each anchor # NB: need to clamp the indices because we can have a single # GT in the image, and matched_idxs can be -2, which goes # out of bounds matched_targets = target[matched_idxs.clamp(min=0)] matched_targets.add_field("matched_idxs", matched_idxs) return matched_targets def prepare_targets(self, anchors, targets): labels = [] regression_targets = [] for anchors_per_image, targets_per_image in zip(anchors, targets): matched_targets = self.match_targets_to_anchors( anchors_per_image, targets_per_image ) matched_idxs = matched_targets.get_field("matched_idxs") labels_per_image = matched_idxs >= 0 labels_per_image = labels_per_image.to(dtype=torch.float32) # discard anchors that go out of the boundaries of the image labels_per_image[~anchors_per_image.get_field("visibility")] = -1 # discard indices that are between thresholds inds_to_discard = matched_idxs == Matcher.BETWEEN_THRESHOLDS labels_per_image[inds_to_discard] = -1 # compute regression targets regression_targets_per_image = self.box_coder.encode( matched_targets.bbox, anchors_per_image.bbox ) labels.append(labels_per_image) regression_targets.append(regression_targets_per_image) return labels, regression_targets def __call__(self, anchors, objectness, box_regression, targets): """ Arguments: anchors (list[BoxList]) objectness (list[Tensor]) box_regression (list[Tensor]) targets (list[BoxList]) Returns: objectness_loss (Tensor) box_loss (Tensor """ anchors = [cat_boxlist(anchors_per_image) for anchors_per_image in anchors] labels, regression_targets = self.prepare_targets(anchors, targets) sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels) sampled_pos_inds = torch.nonzero(torch.cat(sampled_pos_inds, dim=0)).squeeze(1) sampled_neg_inds = torch.nonzero(torch.cat(sampled_neg_inds, dim=0)).squeeze(1) sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0) objectness_flattened = [] box_regression_flattened = [] # for each feature level, permute the outputs to make them be in the # same format as the labels. Note that the labels are computed for # all feature levels concatenated, so we keep the same representation # for the objectness and the box_regression for objectness_per_level, box_regression_per_level in zip( objectness, box_regression ): N, A, H, W = objectness_per_level.shape objectness_per_level = objectness_per_level.permute(0, 2, 3, 1).reshape( N, -1 ) box_regression_per_level = box_regression_per_level.view(N, -1, 4, H, W) box_regression_per_level = box_regression_per_level.permute(0, 3, 4, 1, 2) box_regression_per_level = box_regression_per_level.reshape(N, -1, 4) objectness_flattened.append(objectness_per_level) box_regression_flattened.append(box_regression_per_level) # concatenate on the first dimension (representing the feature levels), to # take into account the way the labels were generated (with all feature maps # being concatenated as well) objectness = cat(objectness_flattened, dim=1).reshape(-1) box_regression = cat(box_regression_flattened, dim=1).reshape(-1, 4) labels = torch.cat(labels, dim=0) regression_targets = torch.cat(regression_targets, dim=0) box_loss = smooth_l1_loss( box_regression[sampled_pos_inds], regression_targets[sampled_pos_inds], beta=1.0 / 9, size_average=False, ) / (sampled_inds.numel()) objectness_loss = F.binary_cross_entropy_with_logits( objectness[sampled_inds], labels[sampled_inds] ) return objectness_loss, box_loss def make_rpn_loss_evaluator(cfg, box_coder): matcher = Matcher( cfg.MODEL.RPN.FG_IOU_THRESHOLD, cfg.MODEL.RPN.BG_IOU_THRESHOLD, allow_low_quality_matches=True, ) fg_bg_sampler = BalancedPositiveNegativeSampler( cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE, cfg.MODEL.RPN.POSITIVE_FRACTION ) loss_evaluator = RPNLossComputation(matcher, fg_bg_sampler, box_coder) return loss_evaluator
6,123
39.026144
87
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/rpn/rpn.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch import torch.nn.functional as F from torch import nn from maskrcnn_benchmark.modeling.box_coder import BoxCoder from .loss import make_rpn_loss_evaluator from .anchor_generator import make_anchor_generator from .inference import make_rpn_postprocessor class RPNHead(nn.Module): """ Adds a simple RPN Head with classification and regression heads """ def __init__(self, in_channels, num_anchors): """ Arguments: in_channels (int): number of channels of the input feature num_anchors (int): number of anchors to be predicted """ super(RPNHead, self).__init__() self.conv = nn.Conv2d( in_channels, in_channels, kernel_size=3, stride=1, padding=1 ) self.cls_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1) self.bbox_pred = nn.Conv2d( in_channels, num_anchors * 4, kernel_size=1, stride=1 ) for l in [self.conv, self.cls_logits, self.bbox_pred]: torch.nn.init.normal_(l.weight, std=0.01) torch.nn.init.constant_(l.bias, 0) def forward(self, x): logits = [] bbox_reg = [] for feature in x: t = F.relu(self.conv(feature)) logits.append(self.cls_logits(t)) bbox_reg.append(self.bbox_pred(t)) return logits, bbox_reg class RPNModule(torch.nn.Module): """ Module for RPN computation. Takes feature maps from the backbone and RPN proposals and losses. Works for both FPN and non-FPN. """ def __init__(self, cfg): super(RPNModule, self).__init__() self.cfg = cfg.clone() anchor_generator = make_anchor_generator(cfg) in_channels = cfg.MODEL.BACKBONE.OUT_CHANNELS head = RPNHead(in_channels, anchor_generator.num_anchors_per_location()[0]) rpn_box_coder = BoxCoder(weights=(1.0, 1.0, 1.0, 1.0)) box_selector_train = make_rpn_postprocessor(cfg, rpn_box_coder, is_train=True) box_selector_test = make_rpn_postprocessor(cfg, rpn_box_coder, is_train=False) loss_evaluator = make_rpn_loss_evaluator(cfg, rpn_box_coder) self.anchor_generator = anchor_generator self.head = head self.box_selector_train = box_selector_train self.box_selector_test = box_selector_test self.loss_evaluator = loss_evaluator def forward(self, images, features, targets=None): """ Arguments: images (ImageList): images for which we want to compute the predictions features (list[Tensor]): features computed from the images that are used for computing the predictions. Each tensor in the list correspond to different feature levels targets (list[BoxList): ground-truth boxes present in the image (optional) Returns: boxes (list[BoxList]): the predicted boxes from the RPN, one BoxList per image. losses (dict[Tensor]): the losses for the model during training. During testing, it is an empty dict. """ objectness, rpn_box_regression = self.head(features) anchors = self.anchor_generator(images, features) if self.training: return self._forward_train(anchors, objectness, rpn_box_regression, targets) else: return self._forward_test(anchors, objectness, rpn_box_regression) def _forward_train(self, anchors, objectness, rpn_box_regression, targets): if self.cfg.MODEL.RPN_ONLY: # When training an RPN-only model, the loss is determined by the # predicted objectness and rpn_box_regression values and there is # no need to transform the anchors into predicted boxes; this is an # optimization that avoids the unnecessary transformation. boxes = anchors else: # For end-to-end models, anchors must be transformed into boxes and # sampled into a training batch. with torch.no_grad(): boxes = self.box_selector_train( anchors, objectness, rpn_box_regression, targets ) loss_objectness, loss_rpn_box_reg = self.loss_evaluator( anchors, objectness, rpn_box_regression, targets ) losses = { "loss_objectness": loss_objectness, "loss_rpn_box_reg": loss_rpn_box_reg, } return boxes, losses def _forward_test(self, anchors, objectness, rpn_box_regression): boxes = self.box_selector_test(anchors, objectness, rpn_box_regression) if self.cfg.MODEL.RPN_ONLY: # For end-to-end models, the RPN proposals are an intermediate state # and don't bother to sort them in decreasing score order. For RPN-only # models, the proposals are the final output and we return them in # high-to-low confidence order. inds = [ box.get_field("objectness").sort(descending=True)[1] for box in boxes ] boxes = [box[ind] for box, ind in zip(boxes, inds)] return boxes, {} def build_rpn(cfg): """ This gives the gist of it. Not super important because it doesn't change as much """ return RPNModule(cfg)
5,453
37.680851
88
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/rpn/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # from .rpn import build_rpn
101
33
71
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/roi_heads.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch from .box_head.box_head import build_roi_box_head from .mask_head.mask_head import build_roi_mask_head class CombinedROIHeads(torch.nn.ModuleDict): """ Combines a set of individual heads (for box prediction or masks) into a single head. """ def __init__(self, cfg, heads): super(CombinedROIHeads, self).__init__(heads) self.cfg = cfg.clone() if cfg.MODEL.MASK_ON and cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR: self.mask.feature_extractor = self.box.feature_extractor def forward(self, features, proposals, targets=None): losses = {} # TODO rename x to roi_box_features, if it doesn't increase memory consumption x, detections, loss_box = self.box(features, proposals, targets) losses.update(loss_box) if self.cfg.MODEL.MASK_ON or self.cfg.SEQUENCE.SEQ_ON: mask_features = features # optimization: during training, if we share the feature extractor between # the box and the mask heads, # then we can reuse the features already computed if ( self.training and self.cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR ): mask_features = x # During training, self.box() will return # the unaltered proposals as "detections" # this makes the API consistent during training and testing x, detections, loss_mask = self.mask(mask_features, detections, targets) if loss_mask is not None: losses.update(loss_mask) return x, detections, losses def build_roi_heads(cfg): # individually create the heads, that will be combined together # afterwards roi_heads = [] if not cfg.MODEL.RPN_ONLY: roi_heads.append(("box", build_roi_box_head(cfg))) if cfg.MODEL.MASK_ON or cfg.SEQUENCE.SEQ_ON: roi_heads.append(("mask", build_roi_mask_head(cfg))) # combine individual heads in a single module if roi_heads: roi_heads = CombinedROIHeads(cfg, roi_heads) return roi_heads
2,237
36.932203
86
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/__init__.py
0
0
0
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/mask_head/inference.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import numpy as np import torch from PIL import Image from torch import nn import cv2 from torch.nn import functional as F from maskrcnn_benchmark.structures.bounding_box import BoxList # TODO check if want to return a single BoxList or a composite # object class MaskPostProcessor(nn.Module): """ From the results of the CNN, post process the masks by taking the mask corresponding to the class with max probability (which are of fixed size and directly output by the CNN) and return the masks in the mask field of the BoxList. If a masker object is passed, it will additionally project the masks in the image according to the locations in boxes, """ def __init__(self, masker=None): super(MaskPostProcessor, self).__init__() self.masker = masker def forward(self, x, boxes): """ Arguments: x (Tensor): the mask logits boxes (list[BoxList]): bounding boxes that are used as reference, one for ech image Returns: results (list[BoxList]): one BoxList for each image, containing the extra field mask """ mask_prob = x.sigmoid() # select masks coresponding to the predicted classes num_masks = x.shape[0] labels = [bbox.get_field("labels") for bbox in boxes] labels = torch.cat(labels) index = torch.arange(num_masks, device=labels.device) mask_prob = mask_prob[index, labels][:, None] if self.masker: mask_prob = self.masker(mask_prob, boxes) boxes_per_image = [len(box) for box in boxes] mask_prob = mask_prob.split(boxes_per_image, dim=0) results = [] for prob, box in zip(mask_prob, boxes): bbox = BoxList(box.bbox, box.size, mode="xyxy") for field in box.fields(): bbox.add_field(field, box.get_field(field)) bbox.add_field("mask", prob) results.append(bbox) return results # TODO class CharMaskPostProcessor(nn.Module): """ From the results of the CNN, post process the masks by taking the mask corresponding to the class with max probability (which are of fixed size and directly output by the CNN) and return the masks in the mask field of the BoxList. If a masker object is passed, it will additionally project the masks in the image according to the locations in boxes, """ def __init__(self, cfg, masker=None): super(CharMaskPostProcessor, self).__init__() self.masker = masker self.cfg = cfg def forward(self, x, char_mask, boxes, seq_outputs=None, seq_scores=None, detailed_seq_scores=None): """ Arguments: x (Tensor): the mask logits char_mask (Tensor): the char mask logits boxes (list[BoxList]): bounding boxes that are used as reference, one for ech image Returns: results (list[BoxList]): one BoxList for each image, containing the extra field mask """ if x is not None: mask_prob = x.sigmoid() mask_prob = mask_prob.squeeze(dim=1)[:, None] if self.masker: mask_prob = self.masker(mask_prob, boxes) boxes_per_image = [len(box) for box in boxes] if x is not None: mask_prob = mask_prob.split(boxes_per_image, dim=0) if self.cfg.MODEL.CHAR_MASK_ON: char_mask_softmax = F.softmax(char_mask, dim=1) char_results = {'char_mask': char_mask_softmax.cpu().numpy(), 'boxes': boxes[0].bbox.cpu().numpy(), 'seq_outputs': seq_outputs, 'seq_scores': seq_scores, 'detailed_seq_scores': detailed_seq_scores} else: char_results = {'char_mask': None, 'boxes': boxes[0].bbox.cpu().numpy(), 'seq_outputs': seq_outputs, 'seq_scores': seq_scores, 'detailed_seq_scores': detailed_seq_scores} results = [] if x is not None: for prob, box in zip(mask_prob, boxes): bbox = BoxList(box.bbox, box.size, mode="xyxy") for field in box.fields(): bbox.add_field(field, box.get_field(field)) bbox.add_field("mask", prob) results.append(bbox) else: for box in boxes: bbox = BoxList(box.bbox, box.size, mode="xyxy") for field in box.fields(): bbox.add_field(field, box.get_field(field)) results.append(bbox) return [results, char_results] class MaskPostProcessorCOCOFormat(MaskPostProcessor): """ From the results of the CNN, post process the results so that the masks are pasted in the image, and additionally convert the results to COCO format. """ def forward(self, x, boxes): import pycocotools.mask as mask_util import numpy as np results = super(MaskPostProcessorCOCOFormat, self).forward(x, boxes) for result in results: masks = result.get_field("mask").cpu() rles = [ mask_util.encode(np.array(mask[0, :, :, np.newaxis], order="F"))[0] for mask in masks ] for rle in rles: rle["counts"] = rle["counts"].decode("utf-8") result.add_field("mask", rles) return results # the next two functions should be merged inside Masker # but are kept here for the moment while we need them # temporarily gor paste_mask_in_image def expand_boxes(boxes, scale): w_half = (boxes[:, 2] - boxes[:, 0]) * .5 h_half = (boxes[:, 3] - boxes[:, 1]) * .5 x_c = (boxes[:, 2] + boxes[:, 0]) * .5 y_c = (boxes[:, 3] + boxes[:, 1]) * .5 w_half *= scale[1] h_half *= scale[0] boxes_exp = torch.zeros_like(boxes) boxes_exp[:, 0] = x_c - w_half boxes_exp[:, 2] = x_c + w_half boxes_exp[:, 1] = y_c - h_half boxes_exp[:, 3] = y_c + h_half return boxes_exp def expand_masks(mask, padding): N = mask.shape[0] M_H = mask.shape[-2] M_W = mask.shape[-1] pad2 = 2 * padding scale = (float(M_H + pad2) / M_H, float(M_W + pad2) / M_W) padded_mask = mask.new_zeros((N, 1, M_H + pad2, M_W + pad2)) padded_mask[:, :, padding:-padding, padding:-padding] = mask return padded_mask, scale def paste_mask_in_image(mask, box, im_h, im_w, thresh=0.5, padding=1): # Need to work on the CPU, where fp16 isn't supported - cast to float to avoid this mask = mask.float() box = box.float() padded_mask, scale = expand_masks(mask[None], padding=padding) mask = padded_mask[0, 0] box = expand_boxes(box[None], scale)[0] box = box.numpy().astype(np.int32) TO_REMOVE = 1 w = box[2] - box[0] + TO_REMOVE h = box[3] - box[1] + TO_REMOVE w = max(w, 1) h = max(h, 1) mask = Image.fromarray(mask.cpu().numpy()) mask = mask.resize((w, h), resample=Image.BILINEAR) mask = np.array(mask, copy=False) if thresh >= 0: mask = np.array(mask > thresh, dtype=np.uint8) mask = torch.from_numpy(mask) else: # for visualization and debugging, we also # allow it to return an unmodified mask mask = torch.from_numpy(mask * 255).to(torch.bool) im_mask = torch.zeros((im_h, im_w), dtype=torch.bool) x_0 = max(box[0], 0) x_1 = min(box[2] + 1, im_w) y_0 = max(box[1], 0) y_1 = min(box[3] + 1, im_h) im_mask[y_0:y_1, x_0:x_1] = mask[ (y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0]) ] return im_mask class Masker(object): """ Projects a set of masks in an image on the locations specified by the bounding boxes """ def __init__(self, threshold=0.5, padding=1): self.threshold = threshold self.padding = padding def forward_single_image(self, masks, boxes): boxes = boxes.convert("xyxy") im_w, im_h = boxes.size res = [ paste_mask_in_image(mask[0], box, im_h, im_w, self.threshold, self.padding) for mask, box in zip(masks, boxes.bbox) ] if len(res) > 0: res = torch.stack(res, dim=0)[:, None] else: res = masks.new_empty((0, 1, masks.shape[-2], masks.shape[-1])) return res def __call__(self, masks, boxes): # TODO do this properly if isinstance(boxes, BoxList): boxes = [boxes] assert len(boxes) == 1, "Only single image batch supported" result = self.forward_single_image(masks, boxes[0]) return result def make_roi_mask_post_processor(cfg): masker = None if cfg.MODEL.CHAR_MASK_ON or cfg.SEQUENCE.SEQ_ON: mask_post_processor = CharMaskPostProcessor(cfg, masker) else: mask_post_processor = MaskPostProcessor(masker) return mask_post_processor
8,971
34.184314
209
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_feature_extractors.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from torch import nn from torch.nn import functional as F from ..box_head.roi_box_feature_extractors import ResNet50Conv5ROIFeatureExtractor from maskrcnn_benchmark.modeling.poolers import Pooler from maskrcnn_benchmark.layers import Conv2d class MaskRCNNFPNFeatureExtractor(nn.Module): """ Heads for FPN for classification """ def __init__(self, cfg): """ Arguments: num_classes (int): number of output classes input_size (int): number of channels of the input once it's flattened representation_size (int): size of the intermediate representation """ super(MaskRCNNFPNFeatureExtractor, self).__init__() # resolution = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION if cfg.MODEL.CHAR_MASK_ON or cfg.SEQUENCE.SEQ_ON: resolution_h = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION_H resolution_w = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION_W else: resolution_h = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION resolution_w = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION scales = cfg.MODEL.ROI_MASK_HEAD.POOLER_SCALES sampling_ratio = cfg.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO pooler = Pooler( output_size=(resolution_h, resolution_w), scales=scales, sampling_ratio=sampling_ratio, ) input_size = cfg.MODEL.BACKBONE.OUT_CHANNELS self.pooler = pooler layers = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS next_feature = input_size self.blocks = [] for layer_idx, layer_features in enumerate(layers, 1): layer_name = "mask_fcn{}".format(layer_idx) module = Conv2d(next_feature, layer_features, 3, stride=1, padding=1) # Caffe2 implementation uses MSRAFill, which in fact # corresponds to kaiming_normal_ in PyTorch nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu") nn.init.constant_(module.bias, 0) self.add_module(layer_name, module) next_feature = layer_features self.blocks.append(layer_name) def forward(self, x, proposals): x = self.pooler(x, proposals) for layer_name in self.blocks: x = F.relu(getattr(self, layer_name)(x)) return x _ROI_MASK_FEATURE_EXTRACTORS = { "ResNet50Conv5ROIFeatureExtractor": ResNet50Conv5ROIFeatureExtractor, "MaskRCNNFPNFeatureExtractor": MaskRCNNFPNFeatureExtractor, } def make_roi_mask_feature_extractor(cfg): func = _ROI_MASK_FEATURE_EXTRACTORS[cfg.MODEL.ROI_MASK_HEAD.FEATURE_EXTRACTOR] return func(cfg)
2,762
36.849315
87
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/mask_head/loss.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch # from maskrcnn_benchmark.layers import smooth_l1_loss from maskrcnn_benchmark.modeling.matcher import Matcher from maskrcnn_benchmark.modeling.utils import cat from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou from torch.nn import functional as F def project_masks_on_boxes(segmentation_masks, proposals, discretization_size): """ Given segmentation masks and the bounding boxes corresponding to the location of the masks in the image, this function crops and resizes the masks in the position defined by the boxes. This prepares the masks for them to be fed to the loss computation as the targets. Arguments: segmentation_masks: an instance of SegmentationMask proposals: an instance of BoxList """ masks = [] M = discretization_size device = proposals.bbox.device proposals = proposals.convert("xyxy") assert segmentation_masks.size == proposals.size, "{}, {}".format( segmentation_masks, proposals ) # TODO put the proposals on the CPU, as the representation for the # masks is not efficient GPU-wise (possibly several small tensors for # representing a single instance mask) proposals = proposals.bbox.to(torch.device("cpu")) for segmentation_mask, proposal in zip(segmentation_masks, proposals): # crop the masks, resize them to the desired resolution and # then convert them to the tensor representation, # instead of the list representation that was used cropped_mask = segmentation_mask.crop(proposal) scaled_mask = cropped_mask.resize((M, M)) mask = scaled_mask.convert(mode="mask") masks.append(mask) if len(masks) == 0: return torch.empty(0, dtype=torch.float32, device=device) return torch.stack(masks, dim=0).to(device, dtype=torch.float32) class MaskRCNNLossComputation(object): def __init__(self, proposal_matcher, discretization_size): """ Arguments: proposal_matcher (Matcher) discretization_size (int) """ self.proposal_matcher = proposal_matcher self.discretization_size = discretization_size def match_targets_to_proposals(self, proposal, target): match_quality_matrix = boxlist_iou(target, proposal) matched_idxs = self.proposal_matcher(match_quality_matrix) # Mask RCNN needs "labels" and "masks "fields for creating the targets target = target.copy_with_fields(["labels", "masks"]) # get the targets corresponding GT for each proposal # NB: need to clamp the indices because we can have a single # GT in the image, and matched_idxs can be -2, which goes # out of bounds matched_targets = target[matched_idxs.clamp(min=0)] matched_targets.add_field("matched_idxs", matched_idxs) return matched_targets def prepare_targets(self, proposals, targets): labels = [] masks = [] for proposals_per_image, targets_per_image in zip(proposals, targets): matched_targets = self.match_targets_to_proposals( proposals_per_image, targets_per_image ) matched_idxs = matched_targets.get_field("matched_idxs") labels_per_image = matched_targets.get_field("labels") labels_per_image = labels_per_image.to(dtype=torch.int64) # this can probably be removed, but is left here for clarity # and completeness neg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD labels_per_image[neg_inds] = 0 # mask scores are only computed on positive samples positive_inds = torch.nonzero(labels_per_image > 0).squeeze(1) segmentation_masks = matched_targets.get_field("masks") segmentation_masks = segmentation_masks[positive_inds] positive_proposals = proposals_per_image[positive_inds] masks_per_image = project_masks_on_boxes( segmentation_masks, positive_proposals, self.discretization_size ) labels.append(labels_per_image) masks.append(masks_per_image) return labels, masks def __call__(self, proposals, mask_logits, targets): """ Arguments: proposals (list[BoxList]) mask_logits (Tensor) targets (list[BoxList]) Return: mask_loss (Tensor): scalar tensor containing the loss """ labels, mask_targets = self.prepare_targets(proposals, targets) labels = cat(labels, dim=0) mask_targets = cat(mask_targets, dim=0) positive_inds = torch.nonzero(labels > 0).squeeze(1) labels_pos = labels[positive_inds] # torch.mean (in binary_cross_entropy_with_logits) doesn't # accept empty tensors, so handle it separately if mask_targets.numel() == 0: return mask_logits.sum() * 0 mask_loss = F.binary_cross_entropy_with_logits( mask_logits[positive_inds, labels_pos], mask_targets ) return mask_loss class CharMaskRCNNLossComputation(object): def __init__(self, use_weighted_loss=False): """ Arguments: proposal_matcher (Matcher) discretization_size (int) """ self.use_weighted_loss = use_weighted_loss def __call__( self, proposals, mask_logits, char_mask_logits, mask_targets, char_mask_targets, char_mask_weights, ): """ Arguments: proposals (list[BoxList]) mask_logits (Tensor) targets (list[BoxList]) Return: mask_loss (Tensor): scalar tensor containing the loss """ mask_targets = cat(mask_targets, dim=0) char_mask_targets = cat(char_mask_targets, dim=0) char_mask_weights = cat(char_mask_weights, dim=0) char_mask_weights = char_mask_weights.mean(dim=0) # torch.mean (in binary_cross_entropy_with_logits) doesn't # accept empty tensors, so handle it separately if mask_targets.numel() == 0 or char_mask_targets.numel() == 0: return mask_logits.sum() * 0, char_mask_targets.sum() * 0 mask_loss = F.binary_cross_entropy_with_logits( mask_logits.squeeze(dim=1), mask_targets ) if self.use_weighted_loss: char_mask_loss = F.cross_entropy( char_mask_logits, char_mask_targets, char_mask_weights, ignore_index=-1 ) else: char_mask_loss = F.cross_entropy( char_mask_logits, char_mask_targets, ignore_index=-1 ) return mask_loss, char_mask_loss class SeqMaskRCNNLossComputation(object): def __init__(self): """ Arguments: proposal_matcher (Matcher) discretization_size (int) """ def __call__( self, proposals, mask_logits, mask_targets, ): """ Arguments: proposals (list[BoxList]) mask_logits (Tensor) targets (list[BoxList]) Return: mask_loss (Tensor): scalar tensor containing the loss """ mask_targets = cat(mask_targets, dim=0) # torch.mean (in binary_cross_entropy_with_logits) doesn't # accept empty tensors, so handle it separately if mask_targets.numel() == 0: return mask_logits.sum() * 0 mask_loss = F.binary_cross_entropy_with_logits( mask_logits.squeeze(dim=1), mask_targets ) return mask_loss def make_roi_mask_loss_evaluator(cfg): matcher = Matcher( cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD, cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD, allow_low_quality_matches=False, ) if cfg.MODEL.CHAR_MASK_ON: loss_evaluator = CharMaskRCNNLossComputation( use_weighted_loss=cfg.MODEL.ROI_MASK_HEAD.USE_WEIGHTED_CHAR_MASK ) else: if cfg.SEQUENCE.SEQ_ON: loss_evaluator = SeqMaskRCNNLossComputation() else: loss_evaluator = MaskRCNNLossComputation( matcher, cfg.MODEL.ROI_MASK_HEAD.RESOLUTION ) return loss_evaluator
8,431
34.428571
87
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_seq_predictors.py
# Written by Minghui Liao import math import random import numpy as np import torch from maskrcnn_benchmark.utils.chars import char2num, num2char from torch import nn from torch.nn import functional as F gpu_device = torch.device("cuda") cpu_device = torch.device("cpu") def reduce_mul(l): out = 1.0 for x in l: out *= x return out def check_all_done(seqs): for seq in seqs: if not seq[-1]: return False return True # TODO class SequencePredictor(nn.Module): def __init__(self, cfg, dim_in): super(SequencePredictor, self).__init__() self.cfg = cfg if cfg.SEQUENCE.TWO_CONV: self.seq_encoder = nn.Sequential( nn.Conv2d(dim_in, dim_in, 3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(2, stride=2, ceil_mode=True), nn.Conv2d(dim_in, 256, 3, padding=1), nn.ReLU(inplace=True), ) else: self.seq_encoder = nn.Sequential( nn.Conv2d(dim_in, 256, 3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(2, stride=2, ceil_mode=True), ) x_onehot_size = int(cfg.SEQUENCE.RESIZE_WIDTH / 2) y_onehot_size = int(cfg.SEQUENCE.RESIZE_HEIGHT / 2) self.seq_decoder = BahdanauAttnDecoderRNN( 256, cfg.SEQUENCE.NUM_CHAR, cfg.SEQUENCE.NUM_CHAR, n_layers=1, dropout_p=0.1, onehot_size = (y_onehot_size, x_onehot_size) ) # self.criterion_seq_decoder = nn.NLLLoss(ignore_index = -1, reduce=False) self.criterion_seq_decoder = nn.NLLLoss(ignore_index=-1, reduction="none") # self.rescale = nn.Upsample(size=(16, 64), mode="bilinear", align_corners=False) self.rescale = nn.Upsample(size=(cfg.SEQUENCE.RESIZE_HEIGHT, cfg.SEQUENCE.RESIZE_WIDTH), mode="bilinear", align_corners=False) self.x_onehot = nn.Embedding(x_onehot_size, x_onehot_size) self.x_onehot.weight.data = torch.eye(x_onehot_size) self.y_onehot = nn.Embedding(y_onehot_size, y_onehot_size) self.y_onehot.weight.data = torch.eye(y_onehot_size) for name, param in self.named_parameters(): if "bias" in name: nn.init.constant_(param, 0) elif "weight" in name: # Caffe2 implementation uses MSRAFill, which in fact # corresponds to kaiming_normal_ in PyTorch nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu") def forward( self, x, decoder_targets=None, word_targets=None, use_beam_search=False ): rescale_out = self.rescale(x) seq_decoder_input = self.seq_encoder(rescale_out) x_onehot_size = int(self.cfg.SEQUENCE.RESIZE_WIDTH / 2) y_onehot_size = int(self.cfg.SEQUENCE.RESIZE_HEIGHT / 2) x_t, y_t = np.meshgrid(np.linspace(0, x_onehot_size - 1, x_onehot_size), np.linspace(0, y_onehot_size - 1, y_onehot_size)) x_t = torch.LongTensor(x_t, device=cpu_device).cuda() y_t = torch.LongTensor(y_t, device=cpu_device).cuda() x_onehot_embedding = ( self.x_onehot(x_t) .transpose(0, 2) .transpose(1, 2) .repeat(seq_decoder_input.size(0), 1, 1, 1) ) y_onehot_embedding = ( self.y_onehot(y_t) .transpose(0, 2) .transpose(1, 2) .repeat(seq_decoder_input.size(0), 1, 1, 1) ) seq_decoder_input_loc = torch.cat( [seq_decoder_input, x_onehot_embedding, y_onehot_embedding], 1 ) seq_decoder_input_reshape = ( seq_decoder_input_loc.view( seq_decoder_input_loc.size(0), seq_decoder_input_loc.size(1), -1 ) .transpose(0, 2) .transpose(1, 2) ) if self.training: bos_onehot = np.zeros( (seq_decoder_input_reshape.size(1), 1), dtype=np.int32 ) bos_onehot[:, 0] = self.cfg.SEQUENCE.BOS_TOKEN decoder_input = torch.tensor(bos_onehot.tolist(), device=gpu_device) decoder_hidden = torch.zeros( (seq_decoder_input_reshape.size(1), 256), device=gpu_device ) use_teacher_forcing = ( True if random.random() < self.cfg.SEQUENCE.TEACHER_FORCE_RATIO else False ) target_length = decoder_targets.size(1) if use_teacher_forcing: # Teacher forcing: Feed the target as the next input for di in range(target_length): decoder_output, decoder_hidden, decoder_attention = self.seq_decoder( decoder_input, decoder_hidden, seq_decoder_input_reshape ) if di == 0: loss_seq_decoder = self.criterion_seq_decoder( decoder_output, word_targets[:, di] ) else: loss_seq_decoder += self.criterion_seq_decoder( decoder_output, word_targets[:, di] ) decoder_input = decoder_targets[:, di] # Teacher forcing else: # Without teacher forcing: use its own predictions as the next input for di in range(target_length): decoder_output, decoder_hidden, decoder_attention = self.seq_decoder( decoder_input, decoder_hidden, seq_decoder_input_reshape ) topv, topi = decoder_output.topk(1) decoder_input = topi.squeeze( 1 ).detach() # detach from history as input if di == 0: loss_seq_decoder = self.criterion_seq_decoder( decoder_output, word_targets[:, di] ) else: loss_seq_decoder += self.criterion_seq_decoder( decoder_output, word_targets[:, di] ) loss_seq_decoder = loss_seq_decoder.sum() / loss_seq_decoder.size(0) loss_seq_decoder = 0.2 * loss_seq_decoder return loss_seq_decoder else: words = [] decoded_scores = [] detailed_decoded_scores = [] # real_length = 0 if use_beam_search: for batch_index in range(seq_decoder_input_reshape.size(1)): decoder_hidden = torch.zeros((1, 256), device=gpu_device) word = [] char_scores = [] detailed_char_scores = [] top_seqs = self.beam_search( seq_decoder_input_reshape[:, batch_index : batch_index + 1, :], decoder_hidden, beam_size=6, max_len=self.cfg.SEQUENCE.MAX_LENGTH, ) top_seq = top_seqs[0] for character in top_seq[1:]: character_index = character[0] if character_index == self.cfg.SEQUENCE.NUM_CHAR - 1: char_scores.append(character[1]) detailed_char_scores.append(character[2]) break else: if character_index == 0: word.append("~") char_scores.append(0.0) else: word.append(num2char(character_index)) char_scores.append(character[1]) detailed_char_scores.append(character[2]) words.append("".join(word)) decoded_scores.append(char_scores) detailed_decoded_scores.append(detailed_char_scores) else: for batch_index in range(seq_decoder_input_reshape.size(1)): bos_onehot = np.zeros((1, 1), dtype=np.int32) bos_onehot[:, 0] = self.cfg.SEQUENCE.BOS_TOKEN decoder_input = torch.tensor(bos_onehot.tolist(), device=gpu_device) decoder_hidden = torch.zeros((1, 256), device=gpu_device) word = [] char_scores = [] for di in range(self.cfg.SEQUENCE.MAX_LENGTH): decoder_output, decoder_hidden, decoder_attention = self.seq_decoder( decoder_input, decoder_hidden, seq_decoder_input_reshape[ :, batch_index : batch_index + 1, : ], ) # decoder_attentions[di] = decoder_attention.data topv, topi = decoder_output.data.topk(1) char_scores.append(topv.item()) if topi.item() == self.cfg.SEQUENCE.NUM_CHAR - 1: break else: if topi.item() == 0: word.append("~") else: word.append(num2char(topi.item())) # real_length = di decoder_input = topi.squeeze(1).detach() words.append("".join(word)) decoded_scores.append(char_scores) return words, decoded_scores, detailed_decoded_scores def beam_search_step(self, encoder_context, top_seqs, k): all_seqs = [] for seq in top_seqs: seq_score = reduce_mul([_score for _, _score, _, _ in seq]) if seq[-1][0] == self.cfg.SEQUENCE.NUM_CHAR - 1: all_seqs.append((seq, seq_score, seq[-1][2], True)) continue decoder_hidden = seq[-1][-1][0] onehot = np.zeros((1, 1), dtype=np.int32) onehot[:, 0] = seq[-1][0] decoder_input = torch.tensor(onehot.tolist(), device=gpu_device) decoder_output, decoder_hidden, decoder_attention = self.seq_decoder( decoder_input, decoder_hidden, encoder_context ) detailed_char_scores = decoder_output.cpu().numpy() # print(decoder_output.shape) scores, candidates = decoder_output.data[:, 1:].topk(k) for i in range(k): character_score = scores[:, i] character_index = candidates[:, i] score = seq_score * character_score.item() char_score = seq_score * detailed_char_scores rs_seq = seq + [ ( character_index.item() + 1, character_score.item(), char_score, [decoder_hidden], ) ] done = character_index.item() + 1 == self.cfg.SEQUENCE.NUM_CHAR - 1 all_seqs.append((rs_seq, score, char_score, done)) all_seqs = sorted(all_seqs, key=lambda seq: seq[1], reverse=True) topk_seqs = [seq for seq, _, _, _ in all_seqs[:k]] all_done = check_all_done(all_seqs[:k]) return topk_seqs, all_done def beam_search(self, encoder_context, decoder_hidden, beam_size=6, max_len=32): char_score = np.zeros(self.cfg.SEQUENCE.NUM_CHAR) top_seqs = [[(self.cfg.SEQUENCE.BOS_TOKEN, 1.0, char_score, [decoder_hidden])]] # loop for _ in range(max_len): top_seqs, all_done = self.beam_search_step( encoder_context, top_seqs, beam_size ) if all_done: break return top_seqs class Attn(nn.Module): def __init__(self, method, hidden_size, embed_size, onehot_size): super(Attn, self).__init__() self.method = method self.hidden_size = hidden_size self.embed_size = embed_size self.attn = nn.Linear(2 * self.hidden_size + onehot_size, hidden_size) # self.attn = nn.Linear(hidden_size, hidden_size) self.v = nn.Parameter(torch.rand(hidden_size)) stdv = 1.0 / math.sqrt(self.v.size(0)) self.v.data.normal_(mean=0, std=stdv) def forward(self, hidden, encoder_outputs): """ :param hidden: previous hidden state of the decoder, in shape (B, hidden_size) :param encoder_outputs: encoder outputs from Encoder, in shape (H*W, B, hidden_size) :return attention energies in shape (B, H*W) """ max_len = encoder_outputs.size(0) # this_batch_size = encoder_outputs.size(1) H = hidden.repeat(max_len, 1, 1).transpose(0, 1) # (B, H*W, hidden_size) encoder_outputs = encoder_outputs.transpose(0, 1) # (B, H*W, hidden_size) attn_energies = self.score( H, encoder_outputs ) # compute attention score (B, H*W) return F.softmax(attn_energies, dim=1).unsqueeze( 1 ) # normalize with softmax (B, 1, H*W) def score(self, hidden, encoder_outputs): energy = torch.tanh( self.attn(torch.cat([hidden, encoder_outputs], 2)) ) # (B, H*W, 2*hidden_size+H+W)->(B, H*W, hidden_size) energy = energy.transpose(2, 1) # (B, hidden_size, H*W) v = self.v.repeat(encoder_outputs.data.shape[0], 1).unsqueeze( 1 ) # (B, 1, hidden_size) energy = torch.bmm(v, energy) # (B, 1, H*W) return energy.squeeze(1) # (B, H*W) class BahdanauAttnDecoderRNN(nn.Module): def __init__( self, hidden_size, embed_size, output_size, n_layers=1, dropout_p=0, bidirectional=False, onehot_size = (8, 32) ): super(BahdanauAttnDecoderRNN, self).__init__() # Define parameters self.hidden_size = hidden_size self.embed_size = embed_size self.output_size = output_size self.n_layers = n_layers self.dropout_p = dropout_p # Define layers self.embedding = nn.Embedding(output_size, embed_size) self.embedding.weight.data = torch.eye(embed_size) # self.dropout = nn.Dropout(dropout_p) self.word_linear = nn.Linear(embed_size, hidden_size) self.attn = Attn("concat", hidden_size, embed_size, onehot_size[0] + onehot_size[1]) self.rnn = nn.GRUCell(2 * hidden_size + onehot_size[0] + onehot_size[1], hidden_size) self.out = nn.Linear(hidden_size, output_size) def forward(self, word_input, last_hidden, encoder_outputs): """ :param word_input: word input for current time step, in shape (B) :param last_hidden: last hidden stat of the decoder, in shape (layers*direction*B, hidden_size) :param encoder_outputs: encoder outputs in shape (H*W, B, C) :return decoder output """ # Get the embedding of the current input word (last output word) word_embedded_onehot = self.embedding(word_input).view( 1, word_input.size(0), -1 ) # (1,B,embed_size) word_embedded = self.word_linear(word_embedded_onehot) # (1, B, hidden_size) attn_weights = self.attn(last_hidden, encoder_outputs) # (B, 1, H*W) context = attn_weights.bmm( encoder_outputs.transpose(0, 1) ) # (B, 1, H*W) * (B, H*W, C) = (B,1,C) context = context.transpose(0, 1) # (1,B,C) # Combine embedded input word and attended context, run through RNN # 2 * hidden_size + W + H: 256 + 256 + 32 + 8 = 552 rnn_input = torch.cat((word_embedded, context), 2) last_hidden = last_hidden.view(last_hidden.size(0), -1) rnn_input = rnn_input.view(word_input.size(0), -1) hidden = self.rnn(rnn_input, last_hidden) if not self.training: output = F.softmax(self.out(hidden), dim=1) else: output = F.log_softmax(self.out(hidden), dim=1) # Return final output, hidden state # print(output.shape) return output, hidden, attn_weights def make_roi_seq_predictor(cfg, dim_in): return SequencePredictor(cfg, dim_in)
16,629
42.534031
134
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_predictors.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from maskrcnn_benchmark.layers import Conv2d, ConvTranspose2d from torch import nn from torch.nn import functional as F from .roi_seq_predictors import make_roi_seq_predictor class MaskRCNNC4Predictor(nn.Module): def __init__(self, cfg): super(MaskRCNNC4Predictor, self).__init__() num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES dim_reduced = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1] if cfg.MODEL.ROI_HEADS.USE_FPN: if cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'CAT': num_inputs = dim_reduced + 1 elif cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'MIX' or cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL': num_inputs = dim_reduced * 2 else: num_inputs = dim_reduced else: stage_index = 4 stage2_relative_factor = 2 ** (stage_index - 1) res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS num_inputs = res2_out_channels * stage2_relative_factor self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0) self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0) for name, param in self.named_parameters(): if "bias" in name: nn.init.constant_(param, 0) elif "weight" in name: # Caffe2 implementation uses MSRAFill, which in fact # corresponds to kaiming_normal_ in PyTorch nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu") def forward(self, x): x = F.relu(self.conv5_mask(x)) return self.mask_fcn_logits(x) class CharMaskRCNNC4Predictor(nn.Module): def __init__(self, cfg): super(CharMaskRCNNC4Predictor, self).__init__() # num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES num_classes = 1 char_num_classes = cfg.MODEL.ROI_MASK_HEAD.CHAR_NUM_CLASSES dim_reduced = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1] if cfg.MODEL.ROI_HEADS.USE_FPN: if cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'CAT': num_inputs = dim_reduced + 1 elif cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'MIX' or cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL': num_inputs = dim_reduced * 2 else: num_inputs = dim_reduced else: stage_index = 4 stage2_relative_factor = 2 ** (stage_index - 1) res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS num_inputs = res2_out_channels * stage2_relative_factor self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0) if cfg.MODEL.CHAR_MASK_ON: self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0) self.char_mask_fcn_logits = Conv2d(dim_reduced, char_num_classes, 1, 1, 0) else: self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0) for name, param in self.named_parameters(): if "bias" in name: nn.init.constant_(param, 0) elif "weight" in name: # Caffe2 implementation uses MSRAFill, which in fact # corresponds to kaiming_normal_ in PyTorch nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu") def forward(self, x): x = F.relu(self.conv5_mask(x)) return self.mask_fcn_logits(x), self.char_mask_fcn_logits(x) class SeqCharMaskRCNNC4Predictor(nn.Module): def __init__(self, cfg): super(SeqCharMaskRCNNC4Predictor, self).__init__() # num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES num_classes = 1 char_num_classes = cfg.MODEL.ROI_MASK_HEAD.CHAR_NUM_CLASSES dim_reduced = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1] if cfg.MODEL.ROI_HEADS.USE_FPN: if cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'CAT': num_inputs = dim_reduced + 1 elif cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'MIX' or 'ATTENTION_CHANNEL' in cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION: num_inputs = dim_reduced * 2 else: num_inputs = dim_reduced else: stage_index = 4 stage2_relative_factor = 2 ** (stage_index - 1) res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS num_inputs = res2_out_channels * stage2_relative_factor self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0) if cfg.MODEL.CHAR_MASK_ON: self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0) self.char_mask_fcn_logits = Conv2d(dim_reduced, char_num_classes, 1, 1, 0) self.seq = make_roi_seq_predictor(cfg, dim_reduced) else: self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0) for name, param in self.named_parameters(): if "bias" in name: nn.init.constant_(param, 0) elif "weight" in name: # Caffe2 implementation uses MSRAFill, which in fact # corresponds to kaiming_normal_ in PyTorch nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu") def forward(self, x, decoder_targets=None, word_targets=None): x = F.relu(self.conv5_mask(x)) if self.training: loss_seq_decoder = self.seq( x, decoder_targets=decoder_targets, word_targets=word_targets ) return ( self.mask_fcn_logits(x), self.char_mask_fcn_logits(x), loss_seq_decoder, ) else: decoded_chars, decoded_scores, detailed_decoded_scores = self.seq( x, use_beam_search=True ) return ( self.mask_fcn_logits(x), self.char_mask_fcn_logits(x), decoded_chars, decoded_scores, detailed_decoded_scores, ) class SeqMaskRCNNC4Predictor(nn.Module): def __init__(self, cfg): super(SeqMaskRCNNC4Predictor, self).__init__() num_classes = 1 # char_num_classes = cfg.MODEL.ROI_MASK_HEAD.CHAR_NUM_CLASSES dim_reduced = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1] if cfg.MODEL.ROI_HEADS.USE_FPN: if cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'CAT': num_inputs = dim_reduced + 1 elif cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'MIX' or cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL': num_inputs = dim_reduced * 2 else: num_inputs = dim_reduced else: stage_index = 4 stage2_relative_factor = 2 ** (stage_index - 1) res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS num_inputs = res2_out_channels * stage2_relative_factor self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0) if cfg.SEQUENCE.SEQ_ON: self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0) self.seq = make_roi_seq_predictor(cfg, dim_reduced) else: self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0) for name, param in self.named_parameters(): if "bias" in name: nn.init.constant_(param, 0) elif "weight" in name: # Caffe2 implementation uses MSRAFill, which in fact # corresponds to kaiming_normal_ in PyTorch nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu") def forward(self, x, decoder_targets=None, word_targets=None): x = F.relu(self.conv5_mask(x)) if self.training: loss_seq_decoder = self.seq( x, decoder_targets=decoder_targets, word_targets=word_targets ) return ( self.mask_fcn_logits(x), loss_seq_decoder, ) else: decoded_chars, decoded_scores, detailed_decoded_scores = self.seq( x, use_beam_search=True ) return ( self.mask_fcn_logits(x), decoded_chars, decoded_scores, detailed_decoded_scores, ) class SeqRCNNC4Predictor(nn.Module): def __init__(self, cfg): super(SeqRCNNC4Predictor, self).__init__() num_classes = 1 # char_num_classes = cfg.MODEL.ROI_MASK_HEAD.CHAR_NUM_CLASSES dim_reduced = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1] if cfg.MODEL.ROI_HEADS.USE_FPN: if cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'CAT': num_inputs = dim_reduced + 1 elif cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'MIX' or cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL': num_inputs = dim_reduced * 2 else: num_inputs = dim_reduced else: stage_index = 4 stage2_relative_factor = 2 ** (stage_index - 1) res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS num_inputs = res2_out_channels * stage2_relative_factor self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0) if cfg.SEQUENCE.SEQ_ON: # self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0) self.seq = make_roi_seq_predictor(cfg, dim_reduced) # else: # self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0) for name, param in self.named_parameters(): if "bias" in name: nn.init.constant_(param, 0) elif "weight" in name: # Caffe2 implementation uses MSRAFill, which in fact # corresponds to kaiming_normal_ in PyTorch nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu") def forward(self, x, decoder_targets=None, word_targets=None): x = F.relu(self.conv5_mask(x)) if self.training: loss_seq_decoder = self.seq( x, decoder_targets=decoder_targets, word_targets=word_targets ) return ( None, loss_seq_decoder, ) else: decoded_chars, decoded_scores, detailed_decoded_scores = self.seq( x, use_beam_search=True ) return ( None, decoded_chars, decoded_scores, detailed_decoded_scores, ) _ROI_MASK_PREDICTOR = { "MaskRCNNC4Predictor": MaskRCNNC4Predictor, "CharMaskRCNNC4Predictor": CharMaskRCNNC4Predictor, "SeqCharMaskRCNNC4Predictor": SeqCharMaskRCNNC4Predictor, "SeqMaskRCNNC4Predictor": SeqMaskRCNNC4Predictor, "SeqRCNNC4Predictor": SeqRCNNC4Predictor, } def make_roi_mask_predictor(cfg): func = _ROI_MASK_PREDICTOR[cfg.MODEL.ROI_MASK_HEAD.PREDICTOR] return func(cfg)
11,124
40.356877
122
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/mask_head/__init__.py
0
0
0
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/mask_head/mask_head.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch from torch import nn from maskrcnn_benchmark.modeling.matcher import Matcher from maskrcnn_benchmark.modeling.utils import cat from maskrcnn_benchmark.structures.bounding_box import BoxList from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou from .inference import make_roi_mask_post_processor from .loss import make_roi_mask_loss_evaluator from .roi_mask_feature_extractors import make_roi_mask_feature_extractor from .roi_mask_predictors import make_roi_mask_predictor from maskrcnn_benchmark.layers import Conv2d import math def conv3x3(in_planes, out_planes, stride=1, has_bias=False): "3x3 convolution with padding" return nn.Conv2d( in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=has_bias ) def conv3x3_bn_relu(in_planes, out_planes, stride=1, has_bias=False): return nn.Sequential( conv3x3(in_planes, out_planes, stride), nn.BatchNorm2d(out_planes), nn.ReLU(inplace=True), ) def keep_only_positive_boxes(boxes, batch_size_per_im): """ Given a set of BoxList containing the `labels` field, return a set of BoxList for which `labels > 0`. Arguments: boxes (list of BoxList) """ assert isinstance(boxes, (list, tuple)) assert isinstance(boxes[0], BoxList) assert boxes[0].has_field("labels") positive_boxes = [] positive_inds = [] for boxes_per_image in boxes: labels = boxes_per_image.get_field("labels") inds_mask = labels > 0 inds = inds_mask.nonzero().squeeze(1) if len(inds) > batch_size_per_im: new_inds = inds[:batch_size_per_im] inds_mask[inds[batch_size_per_im:]] = 0 else: new_inds = inds positive_boxes.append(boxes_per_image[new_inds]) positive_inds.append(inds_mask) return positive_boxes, positive_inds # TODO def project_char_masks_on_boxes( segmentation_masks, segmentation_char_masks, proposals, discretization_size ): """ Given segmentation masks and the bounding boxes corresponding to the location of the masks in the image, this function crops and resizes the masks in the position defined by the boxes. This prepares the masks for them to be fed to the loss computation as the targets. Arguments: segmentation_masks: an instance of SegmentationMask proposals: an instance of BoxList """ masks = [] char_masks = [] char_mask_weights = [] decoder_targets = [] word_targets = [] M_H, M_W = discretization_size[0], discretization_size[1] device = proposals.bbox.device proposals = proposals.convert("xyxy") assert segmentation_masks.size == proposals.size, "{}, {}".format( segmentation_masks, proposals ) assert segmentation_char_masks.size == proposals.size, "{}, {}".format( segmentation_char_masks, proposals ) # TODO put the proposals on the CPU, as the representation for the # masks is not efficient GPU-wise (possibly several small tensors for # representing a single instance mask) proposals = proposals.bbox.to(torch.device("cpu")) for segmentation_mask, segmentation_char_mask, proposal in zip( segmentation_masks, segmentation_char_masks, proposals ): # crop the masks, resize them to the desired resolution and # then convert them to the tensor representation, # instead of the list representation that was used cropped_mask = segmentation_mask.crop(proposal) scaled_mask = cropped_mask.resize((M_W, M_H)) mask = scaled_mask.convert(mode="mask") masks.append(mask) cropped_char_mask = segmentation_char_mask.crop(proposal) scaled_char_mask = cropped_char_mask.resize((M_W, M_H)) char_mask, char_mask_weight, decoder_target, word_target = scaled_char_mask.convert( mode="seq_char_mask" ) char_masks.append(char_mask) char_mask_weights.append(char_mask_weight) decoder_targets.append(decoder_target) word_targets.append(word_target) if len(masks) == 0: return ( torch.empty(0, dtype=torch.float32, device=device), torch.empty(0, dtype=torch.long, device=device), torch.empty(0, dtype=torch.float32, device=device), torch.empty(0, dtype=torch.long, device=device), ) return ( torch.stack(masks, dim=0).to(device, dtype=torch.float32), torch.stack(char_masks, dim=0).to(device, dtype=torch.long), torch.stack(char_mask_weights, dim=0).to(device, dtype=torch.float32), torch.stack(decoder_targets, dim=0).to(device, dtype=torch.long), torch.stack(word_targets, dim=0).to(device, dtype=torch.long), ) class ROIMaskHead(torch.nn.Module): def __init__(self, cfg, proposal_matcher, discretization_size): super(ROIMaskHead, self).__init__() self.proposal_matcher = proposal_matcher self.discretization_size = discretization_size self.cfg = cfg.clone() self.feature_extractor = make_roi_mask_feature_extractor(cfg) self.predictor = make_roi_mask_predictor(cfg) self.post_processor = make_roi_mask_post_processor(cfg) self.loss_evaluator = make_roi_mask_loss_evaluator(cfg) if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION': self.mask_attention = nn.Sequential( conv3x3_bn_relu(cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1] + 1, 32), conv3x3(32, 1), # Conv2d(cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1] + 1, 1, 1, 1, 0), nn.Sigmoid() ) self.mask_attention.apply(self.weights_init) # for name, param in self.named_parameters(): # if "bias" in name: # nn.init.constant_(param, 0) # elif "weight" in name: # # Caffe2 implementation uses MSRAFill, which in fact # # corresponds to kaiming_normal_ in PyTorch # nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu") if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_DOWN': self.mask_attention = nn.Sequential( conv3x3_bn_relu(cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1] + 1, 32, stride=2), conv3x3(32, 1, stride=2), nn.Upsample(scale_factor=4, mode='nearest'), nn.Sigmoid() ) self.mask_attention.apply(self.weights_init) if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL': num_channel = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1] * 2 self.channel_attention = nn.Sequential( nn.MaxPool2d(2), conv3x3_bn_relu(num_channel, num_channel, stride=2), conv3x3(num_channel, num_channel, stride=2), nn.AdaptiveAvgPool2d((1,1)), nn.Sigmoid() ) self.channel_attention.apply(self.weights_init) if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL_SPLIT' or self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL_SPLIT_BINARY': num_channel = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1] * 2 self.channel_attention = nn.Sequential( nn.MaxPool2d(2), conv3x3_bn_relu(num_channel, int(num_channel / 4), stride=2), conv3x3(int(num_channel / 4), 2, stride=2), nn.AdaptiveAvgPool2d((1,1)), # nn.Sigmoid() nn.Softmax(dim=1) ) self.channel_attention.apply(self.weights_init) if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL_2': num_channel = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1] * 2 self.channel_attention_2 = nn.Sequential( nn.AdaptiveAvgPool2d((1,1)), nn.Conv2d( num_channel, num_channel, kernel_size=1, stride=1, padding=0 ), nn.Conv2d( num_channel, num_channel, kernel_size=1, stride=1, padding=0 ), nn.Softmax(dim=1) ) self.channel_attention_2.apply(self.weights_init) if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL_TANH': feature_dim = 128 num_channel = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1] * 2 self.mask_pooler = nn.Sequential( nn.MaxPool2d(2), conv3x3_bn_relu(num_channel, num_channel, stride=2), ) self.attn = nn.Linear(feature_dim, feature_dim) self.v = nn.Parameter(torch.rand(feature_dim)) stdv = 1.0 / math.sqrt(self.v.size(0)) self.v.data.normal_(mean=0, std=stdv) self.mask_pooler.apply(self.weights_init) if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'NEW_CAT': num_channel = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1] self.enlarge_recepitve_field = nn.Sequential( nn.Conv2d( 2 * num_channel, num_channel, kernel_size=3, stride=1, padding=2, dilation=2 ), nn.Conv2d( num_channel, num_channel, kernel_size=3, stride=1, padding=2, dilation=2 ), ) self.enlarge_recepitve_field.apply(self.weights_init) if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'NEW_MASK': num_channel = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1] self.new_mask = nn.Sequential( nn.Conv2d( 2 * num_channel, num_channel, kernel_size=3, stride=1, padding=2, dilation=2 ), nn.Conv2d( num_channel, 32, kernel_size=3, stride=1, padding=2, dilation=2 ), nn.Conv2d( 32, 1, kernel_size=3, stride=1, padding=2, dilation=2 ), nn.Sigmoid() ) self.new_mask.apply(self.weights_init) def weights_init(self, m): classname = m.__class__.__name__ if classname.find("Conv") != -1: nn.init.kaiming_normal_(m.weight.data) elif classname.find("BatchNorm") != -1: m.weight.data.fill_(1.0) m.bias.data.fill_(1e-4) def step_function(self, x): return torch.reciprocal(1 + torch.exp(-50 * (x - 0.5))) def channel_attention_tanh(self, feature, mask): """ :param hidden: previous hidden state of the decoder, in shape (B, hidden_size) :param encoder_outputs: encoder outputs from Encoder, in shape (H*W, B, hidden_size) :return attention energies in shape (B, H*W) """ feature = feature.reshape((feature.shape[0], feature.shape[1], -1)) # (B, C, H*W) masks = mask.reshape((mask.shape[0], mask.shape[1], -1)).repeat(1, feature.shape[1], 1) # (B, C, H*W) fuse_feature = torch.cat([feature, masks], 2) energy = torch.tanh(self.attn(fuse_feature)) # (B, C, 2*H*W)->(B, C, 2*H*W) energy = energy.transpose(2, 1) # (B, 2*H*W, C) v = self.v.repeat(feature.shape[0], 1).unsqueeze( 1 ) # (B, 1, 2*H*W) energy = torch.bmm(v, energy) # (B, 1, C) energy = energy.squeeze(1) # (B, C) return nn.functional.softmax(energy, dim=1).unsqueeze(2).unsqueeze(3) # normalize with softmax (B, C) def match_targets_to_proposals(self, proposal, target): match_quality_matrix = boxlist_iou(target, proposal) # match_quality_matrix = boxlist_polygon_iou(target, proposal) matched_idxs = self.proposal_matcher(match_quality_matrix) # Mask RCNN needs "labels" and "masks "fields for creating the targets target = target.copy_with_fields(["labels", "masks", "char_masks"]) # get the targets corresponding GT for each proposal # NB: need to clamp the indices because we can have a single # GT in the image, and matched_idxs can be -2, which goes # out of bounds matched_targets = target[matched_idxs.clamp(min=0)] matched_targets.add_field("matched_idxs", matched_idxs) return matched_targets def prepare_targets(self, proposals, targets): masks = [] char_masks = [] char_mask_weights = [] decoder_targets = [] word_targets = [] for proposals_per_image, targets_per_image in zip(proposals, targets): matched_targets = self.match_targets_to_proposals( proposals_per_image, targets_per_image ) matched_idxs = matched_targets.get_field("matched_idxs") labels_per_image = matched_targets.get_field("labels") labels_per_image = labels_per_image.to(dtype=torch.int64) # this can probably be removed, but is left here for clarity # and completeness neg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD labels_per_image[neg_inds] = 0 # mask scores are only computed on positive samples positive_inds = torch.nonzero(labels_per_image > 0).squeeze(1) segmentation_masks = matched_targets.get_field("masks") segmentation_masks = segmentation_masks[positive_inds] char_segmentation_masks = matched_targets.get_field("char_masks") char_segmentation_masks = char_segmentation_masks[positive_inds] positive_proposals = proposals_per_image[positive_inds] masks_per_image, char_masks_per_image, char_masks_weight_per_image, decoder_targets_per_image, word_targets_per_image = project_char_masks_on_boxes( segmentation_masks, char_segmentation_masks, positive_proposals, self.discretization_size, ) masks.append(masks_per_image) char_masks.append(char_masks_per_image) char_mask_weights.append(char_masks_weight_per_image) decoder_targets.append(decoder_targets_per_image) word_targets.append(word_targets_per_image) return masks, char_masks, char_mask_weights, decoder_targets, word_targets def feature_mask(self, x, proposals): masks = [] for proposal in proposals: segmentation_masks = proposal.get_field("masks") boxes = proposal.bbox.to(torch.device("cpu")) for segmentation_mask, box in zip(segmentation_masks, boxes): cropped_mask = segmentation_mask.crop(box) scaled_mask = cropped_mask.resize((self.cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION_W, self.cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION_H)) mask = scaled_mask.convert(mode="mask") masks.append(mask) if len(masks) == 0: if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'CAT': x = cat([x, torch.ones((x.shape[0], 1, x.shape[2], x.shape[3]), device=x.device)], dim=1) if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'MIX' or 'ATTENTION_CHANNEL' in self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION: x = cat([x, x], dim=1) return x masks = torch.stack(masks, dim=0).to(x.device, dtype=torch.float32) if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'CAT': x = cat([x, masks.unsqueeze(1)], dim=1) return x if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'NEW_CAT': cat_x = cat([x, x * masks.unsqueeze(1)], dim=1) out_x = self.enlarge_recepitve_field(cat_x) return out_x if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'NEW_MASK': cat_x = cat([x, x * masks.unsqueeze(1)], dim=1) new_mask = self.new_mask(cat_x) out_x = x * new_mask return out_x if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION' or self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_DOWN': x_cat = cat([x, masks.unsqueeze(1)], dim=1) attention = self.mask_attention(x_cat) x = x * attention return x if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'MIX': mask_x = x * masks.unsqueeze(1) cat_x = cat([x, mask_x], dim=1) return cat_x if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL': mask_x = x * masks.unsqueeze(1) cat_x = cat([x, mask_x], dim=1) channel_attention = self.channel_attention(cat_x) attentioned_x = cat_x * channel_attention return attentioned_x if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL_2': mask_x = x * masks.unsqueeze(1) cat_x = cat([x, mask_x], dim=1) channel_attention = self.channel_attention_2(cat_x) # print(channel_attention[0, :, 0, 0]) attentioned_x = cat_x * channel_attention return attentioned_x if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL_SPLIT': mask_x = x * masks.unsqueeze(1) cat_x = cat([x, mask_x], dim=1) channel_attention = self.channel_attention(cat_x) print(channel_attention[0, :, 0, 0]) attentioned_x = cat([x * channel_attention[:, 0:1, :, :], mask_x * channel_attention[:, 1:, :, :]], dim=1) return attentioned_x if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL_SPLIT_BINARY': mask_x = x * masks.unsqueeze(1) cat_x = cat([x, mask_x], dim=1) channel_attention = self.step_function(self.channel_attention(cat_x)) # print(channel_attention[:, :, 0, 0]) attentioned_x = cat([x * channel_attention[:, 0:1, :, :], mask_x * channel_attention[:, 1:, :, :]], dim=1) # attentioned_x = cat([x * channel_attention[:, 1:, :, :], mask_x * channel_attention[:, 0:1, :, :]], dim=1) return attentioned_x if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL_TANH': mask_x = x * masks.unsqueeze(1) cat_x = cat([x, mask_x], dim=1) pooler_x = self.mask_pooler(cat_x) pooler_mask = nn.functional.interpolate(masks.unsqueeze(1), scale_factor=0.25, mode='bilinear') channel_attention = self.channel_attention_tanh(pooler_x, pooler_mask) attentioned_x = cat_x * channel_attention return attentioned_x soft_ratio = self.cfg.MODEL.ROI_MASK_HEAD.SOFT_MASKED_FEATURE_RATIO if soft_ratio > 0: if soft_ratio < 1.0: x = x * (soft_ratio + (1 - soft_ratio) * masks.unsqueeze(1)) else: x = x * (1.0 + soft_ratio * masks.unsqueeze(1)) else: x = x * masks.unsqueeze(1) return x def forward(self, features, proposals, targets=None): """ Arguments: features (list[Tensor]): feature-maps from possibly several levels proposals (list[BoxList]): proposal boxes targets (list[BoxList], optional): the ground-truth targets. Returns: x (Tensor): the result of the feature extractor proposals (list[BoxList]): during training, the original proposals are returned. During testing, the predicted boxlists are returned with the `mask` field set losses (dict[Tensor]): During training, returns the losses for the head. During testing, returns an empty dict. """ if self.training: # during training, only focus on positive boxes all_proposals = proposals proposals, positive_inds = keep_only_positive_boxes( proposals, self.cfg.MODEL.ROI_MASK_HEAD.MASK_BATCH_SIZE_PER_IM ) if all(len(proposal) == 0 for proposal in proposals): return None, None, None if self.training and self.cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR: x = features x = x[torch.cat(positive_inds, dim=0)] else: x = self.feature_extractor(features, proposals) if self.cfg.MODEL.ROI_MASK_HEAD.USE_MASKED_FEATURE: x = self.feature_mask(x, proposals) if self.training: mask_targets, char_mask_targets, char_mask_weights, \ decoder_targets, word_targets = self.prepare_targets( proposals, targets ) decoder_targets = cat(decoder_targets, dim=0) word_targets = cat(word_targets, dim=0) # proposals_not_empty, targets_not = [], [] # for proposal, target, mask_target, char_mask_target, char_mask_weight in zip(proposals, targets, mask_targets, char_mask_targets, char_mask_weights): # if len(proposal_target[0]) > 0: # proposals_not_empty.append(proposal) # targets_not.append(proposal_target[1]) # proposals = proposals_not_empty # targets = targets_not if self.cfg.MODEL.CHAR_MASK_ON: if self.cfg.SEQUENCE.SEQ_ON: if not self.training: if x.numel() > 0: mask_logits, char_mask_logits, seq_outputs, seq_scores, \ detailed_seq_scores = self.predictor(x) result = self.post_processor( mask_logits, char_mask_logits, proposals, seq_outputs=seq_outputs, seq_scores=seq_scores, detailed_seq_scores=detailed_seq_scores, ) return x, result, {} else: return None, None, {} mask_logits, char_mask_logits, seq_outputs = self.predictor( x, decoder_targets=decoder_targets, word_targets=word_targets ) loss_mask, loss_char_mask = self.loss_evaluator( proposals, mask_logits, char_mask_logits, mask_targets, char_mask_targets, char_mask_weights, ) return ( x, all_proposals, dict( loss_mask=loss_mask, loss_char_mask=loss_char_mask, loss_seq=seq_outputs, ), ) else: mask_logits, char_mask_logits = self.predictor(x) if not self.training: result = self.post_processor( mask_logits, char_mask_logits, proposals ) return x, result, {} loss_mask, loss_char_mask = self.loss_evaluator( proposals, mask_logits, char_mask_logits, mask_targets, char_mask_targets, char_mask_weights, ) return ( x, all_proposals, dict(loss_mask=loss_mask, loss_char_mask=loss_char_mask), ) else: if self.cfg.SEQUENCE.SEQ_ON: if self.cfg.MODEL.MASK_ON: if not self.training: if x.numel() > 0: mask_logits, seq_outputs, seq_scores, \ detailed_seq_scores = self.predictor(x) result = self.post_processor( mask_logits, None, proposals, seq_outputs=seq_outputs, seq_scores=seq_scores, detailed_seq_scores=detailed_seq_scores, ) return x, result, {} else: return None, None, {} mask_logits, seq_outputs = self.predictor( x, decoder_targets=decoder_targets, word_targets=word_targets ) loss_mask = self.loss_evaluator( proposals, mask_logits, mask_targets, ) return ( x, all_proposals, dict( loss_mask=loss_mask, loss_seq=seq_outputs, ), ) else: if not self.training: if x.numel() > 0: _, seq_outputs, seq_scores, \ detailed_seq_scores = self.predictor(x) result = self.post_processor( None, None, proposals, seq_outputs=seq_outputs, seq_scores=seq_scores, detailed_seq_scores=detailed_seq_scores, ) return x, result, {} else: return None, None, {} _, seq_outputs = self.predictor( x, decoder_targets=decoder_targets, word_targets=word_targets ) return ( x, all_proposals, dict( loss_seq=seq_outputs, ), ) else: mask_logits = self.predictor(x) if not self.training: result = self.post_processor(mask_logits, proposals) return x, result, {} loss_mask = self.loss_evaluator(proposals, mask_logits, targets) return x, all_proposals, dict(loss_mask=loss_mask) def build_roi_mask_head(cfg): matcher = Matcher( cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD, cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD, allow_low_quality_matches=False, ) return ROIMaskHead( cfg, matcher, (cfg.MODEL.ROI_MASK_HEAD.RESOLUTION_H, cfg.MODEL.ROI_MASK_HEAD.RESOLUTION_W), )
27,041
44.679054
160
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/box_head/inference.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch import torch.nn.functional as F from torch import nn from maskrcnn_benchmark.structures.bounding_box import BoxList from maskrcnn_benchmark.structures.boxlist_ops import boxlist_nms from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist from maskrcnn_benchmark.modeling.box_coder import BoxCoder class PostProcessor(nn.Module): """ From a set of classification scores, box regression and proposals, computes the post-processed boxes, and applies NMS to obtain the final results """ def __init__( self, score_thresh=0.05, nms=0.5, detections_per_img=100, box_coder=None, cfg=None ): """ Arguments: score_thresh (float) nms (float) detections_per_img (int) box_coder (BoxCoder) """ super(PostProcessor, self).__init__() self.cfg = cfg self.score_thresh = score_thresh self.nms = nms self.detections_per_img = detections_per_img if cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION: if box_coder is None: box_coder = BoxCoder(weights=(10., 10., 5., 5.)) self.box_coder = box_coder def forward(self, x, boxes): """ Arguments: x (tuple[tensor, tensor]): x contains the class logits and the box_regression from the model. boxes (list[BoxList]): bounding boxes that are used as reference, one for ech image Returns: results (list[BoxList]): one BoxList for each image, containing the extra fields labels and scores """ class_logits, box_regression = x class_prob = F.softmax(class_logits, -1) # TODO think about a representation of batch of boxes image_shapes = [box.size for box in boxes] boxes_per_image = [len(box) for box in boxes] if self.cfg.MODEL.SEG.USE_SEG_POLY or self.cfg.MODEL.ROI_BOX_HEAD.USE_MASKED_FEATURE or self.cfg.MODEL.ROI_MASK_HEAD.USE_MASKED_FEATURE: masks = [box.get_field('masks') for box in boxes] if self.cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION: concat_boxes = torch.cat([a.bbox for a in boxes], dim=0) proposals = self.box_coder.decode( box_regression.view(sum(boxes_per_image), -1), concat_boxes ) proposals = proposals.split(boxes_per_image, dim=0) else: proposals = boxes num_classes = class_prob.shape[1] class_prob = class_prob.split(boxes_per_image, dim=0) results = [] if self.cfg.MODEL.SEG.USE_SEG_POLY or self.cfg.MODEL.ROI_BOX_HEAD.USE_MASKED_FEATURE or self.cfg.MODEL.ROI_MASK_HEAD.USE_MASKED_FEATURE: for prob, boxes_per_img, image_shape, mask in zip( class_prob, proposals, image_shapes, masks ): boxlist = self.prepare_boxlist(boxes_per_img, prob, image_shape, mask) if self.cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION: boxlist = boxlist.clip_to_image(remove_empty=False) boxlist = self.filter_results(boxlist, num_classes) results.append(boxlist) else: for prob, boxes_per_img, image_shape in zip( class_prob, proposals, image_shapes ): boxlist = self.prepare_boxlist(boxes_per_img, prob, image_shape) if self.cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION: boxlist = boxlist.clip_to_image(remove_empty=False) boxlist = self.filter_results(boxlist, num_classes) results.append(boxlist) return results def prepare_boxlist(self, boxes, scores, image_shape, mask=None): """ Returns BoxList from `boxes` and adds probability scores information as an extra field `boxes` has shape (#detections, 4 * #classes), where each row represents a list of predicted bounding boxes for each of the object classes in the dataset (including the background class). The detections in each row originate from the same object proposal. `scores` has shape (#detection, #classes), where each row represents a list of object detection confidence scores for each of the object classes in the dataset (including the background class). `scores[i, j]`` corresponds to the box at `boxes[i, j * 4:(j + 1) * 4]`. """ if not self.cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION: scores = scores.reshape(-1) boxes.add_field("scores", scores) return boxes boxes = boxes.reshape(-1, 4) scores = scores.reshape(-1) boxlist = BoxList(boxes, image_shape, mode="xyxy") boxlist.add_field("scores", scores) if mask is not None: boxlist.add_field('masks', mask) return boxlist def filter_results(self, boxlist, num_classes): """Returns bounding-box detection results by thresholding on scores and applying non-maximum suppression (NMS). """ # unwrap the boxlist to avoid additional overhead. # if we had multi-class NMS, we could perform this directly on the boxlist boxes = boxlist.bbox.reshape(-1, num_classes * 4) scores = boxlist.get_field("scores").reshape(-1, num_classes) device = scores.device result = [] # Apply threshold on detection probabilities and apply NMS # Skip j = 0, because it's the background class inds_all = scores > self.score_thresh for j in range(1, num_classes): inds = inds_all[:, j].nonzero().squeeze(1) scores_j = scores[inds, j] boxes_j = boxes[inds, j * 4 : (j + 1) * 4] boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy") boxlist_for_class.add_field("scores", scores_j) boxlist_for_class = boxlist_nms( boxlist_for_class, self.nms, score_field="scores" ) num_labels = len(boxlist_for_class) boxlist_for_class.add_field( "labels", torch.full((num_labels,), j, dtype=torch.int64, device=device) ) if self.cfg.MODEL.SEG.USE_SEG_POLY or self.cfg.MODEL.ROI_BOX_HEAD.USE_MASKED_FEATURE or self.cfg.MODEL.ROI_MASK_HEAD.USE_MASKED_FEATURE: boxlist_for_class.add_field('masks', boxlist.get_field('masks')) result.append(boxlist_for_class) result = cat_boxlist(result) number_of_detections = len(result) # Limit to max_per_image detections **over all classes** if number_of_detections > self.detections_per_img > 0: cls_scores = result.get_field("scores") image_thresh, _ = torch.kthvalue( cls_scores.cpu(), number_of_detections - self.detections_per_img + 1 ) keep = cls_scores >= image_thresh.item() keep = torch.nonzero(keep).squeeze(1) result = result[keep] return result def make_roi_box_post_processor(cfg): # use_fpn = cfg.MODEL.ROI_HEADS.USE_FPN bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS box_coder = BoxCoder(weights=bbox_reg_weights) score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH nms_thresh = cfg.MODEL.ROI_HEADS.NMS detections_per_img = cfg.MODEL.ROI_HEADS.DETECTIONS_PER_IMG postprocessor = PostProcessor( score_thresh, nms_thresh, detections_per_img, box_coder, cfg ) return postprocessor
7,693
42.468927
148
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch from torch import nn from torch.nn import functional as F from maskrcnn_benchmark.modeling.backbone import resnet from maskrcnn_benchmark.modeling.poolers import Pooler from maskrcnn_benchmark.modeling.utils import cat from maskrcnn_benchmark.layers import Conv2d def conv3x3(in_planes, out_planes, stride=1, has_bias=False): "3x3 convolution with padding" return nn.Conv2d( in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=has_bias ) def conv3x3_bn_relu(in_planes, out_planes, stride=1, has_bias=False): return nn.Sequential( conv3x3(in_planes, out_planes, stride), nn.BatchNorm2d(out_planes), nn.ReLU(inplace=True), ) class ResNet50Conv5ROIFeatureExtractor(nn.Module): def __init__(self, config): super(ResNet50Conv5ROIFeatureExtractor, self).__init__() resolution = config.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION scales = config.MODEL.ROI_BOX_HEAD.POOLER_SCALES sampling_ratio = config.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO pooler = Pooler( output_size=(resolution, resolution), scales=scales, sampling_ratio=sampling_ratio, ) stage = resnet.StageSpec(index=4, block_count=3, return_features=False) head = resnet.ResNetHead( block_module=config.MODEL.RESNETS.TRANS_FUNC, stages=(stage,), num_groups=config.MODEL.RESNETS.NUM_GROUPS, width_per_group=config.MODEL.RESNETS.WIDTH_PER_GROUP, stride_in_1x1=config.MODEL.RESNETS.STRIDE_IN_1X1, stride_init=None, res2_out_channels=config.MODEL.RESNETS.RES2_OUT_CHANNELS, ) self.pooler = pooler self.head = head def forward(self, x, proposals): x = self.pooler(x, proposals) x = self.head(x) return x class FPN2MLPFeatureExtractor(nn.Module): """ Heads for FPN for classification """ def __init__(self, cfg): super(FPN2MLPFeatureExtractor, self).__init__() self.cfg = cfg resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION scales = cfg.MODEL.ROI_BOX_HEAD.POOLER_SCALES sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO pooler = Pooler( output_size=(resolution, resolution), scales=scales, sampling_ratio=sampling_ratio, ) if self.cfg.MODEL.ROI_BOX_HEAD.MIX_OPTION == 'CAT': input_size = (cfg.MODEL.BACKBONE.OUT_CHANNELS + 1) * resolution ** 2 else: input_size = cfg.MODEL.BACKBONE.OUT_CHANNELS * resolution ** 2 representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM self.pooler = pooler self.fc6 = nn.Linear(input_size, representation_size) self.fc7 = nn.Linear(representation_size, representation_size) # if self.cfg.MODEL.ROI_BOX_HEAD.MIX_OPTION == 'ATTENTION': # self.attention = nn.Sequential( # conv3x3_bn_relu(cfg.MODEL.BACKBONE.OUT_CHANNELS + 1, 32), # conv3x3(32, 1), # nn.Sigmoid() # ) # self.attention.apply(self.weights_init) # if self.cfg.MODEL.ROI_BOX_HEAD.MIX_OPTION == 'ATTENTION': # self.attention = nn.Sequential( # Conv2d(cfg.MODEL.BACKBONE.OUT_CHANNELS + 1, 1, 1, 1, 0), # nn.Sigmoid() # ) # for name, param in self.named_parameters(): # if "bias" in name: # nn.init.constant_(param, 0) # elif "weight" in name: # # Caffe2 implementation uses MSRAFill, which in fact # # corresponds to kaiming_normal_ in PyTorch # nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu") for l in [self.fc6, self.fc7]: # Caffe2 implementation uses XavierFill, which in fact # corresponds to kaiming_uniform_ in PyTorch nn.init.kaiming_uniform_(l.weight, a=1) nn.init.constant_(l.bias, 0) def weights_init(self, m): classname = m.__class__.__name__ if classname.find("Conv") != -1: nn.init.kaiming_normal_(m.weight.data) elif classname.find("BatchNorm") != -1: m.weight.data.fill_(1.0) m.bias.data.fill_(1e-4) def feature_mask(self, x, proposals): masks = [] for proposal in proposals: segmentation_masks = proposal.get_field("masks") boxes = proposal.bbox.to(torch.device("cpu")) for segmentation_mask, box in zip(segmentation_masks, boxes): cropped_mask = segmentation_mask.crop(box) scaled_mask = cropped_mask.resize((self.cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION, self.cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION)) mask = scaled_mask.convert(mode="mask") masks.append(mask) if len(masks) == 0: if self.cfg.MODEL.ROI_BOX_HEAD.MIX_OPTION == 'CAT': x = cat([x, torch.ones((x.shape[0], 1, x.shape[2], x.shape[3]), device=x.device)], dim=1) return x masks = torch.stack(masks, dim=0).to(x.device, dtype=torch.float32) if self.cfg.MODEL.ROI_BOX_HEAD.MIX_OPTION == 'CAT': x = cat([x, masks.unsqueeze(1)], dim=1) return x if self.cfg.MODEL.ROI_BOX_HEAD.MIX_OPTION == 'ATTENTION': # x_cat = cat([x, masks.unsqueeze(1)], dim=1) # attention = self.attention(x_cat) # x = x * attention return x soft_ratio = self.cfg.MODEL.ROI_BOX_HEAD.SOFT_MASKED_FEATURE_RATIO if soft_ratio > 0: if soft_ratio < 1.0: x = x * (soft_ratio + (1 - soft_ratio) * masks.unsqueeze(1)) else: x = x * (1.0 + soft_ratio * masks.unsqueeze(1)) else: x = x * masks.unsqueeze(1) return x def forward(self, x, proposals): x = self.pooler(x, proposals) if self.cfg.MODEL.ROI_BOX_HEAD.USE_MASKED_FEATURE: x = self.feature_mask(x, proposals) x = x.view(x.size(0), -1) x = F.relu(self.fc6(x)) x = F.relu(self.fc7(x)) return x _ROI_BOX_FEATURE_EXTRACTORS = { "ResNet50Conv5ROIFeatureExtractor": ResNet50Conv5ROIFeatureExtractor, "FPN2MLPFeatureExtractor": FPN2MLPFeatureExtractor, } def make_roi_box_feature_extractor(cfg): func = _ROI_BOX_FEATURE_EXTRACTORS[cfg.MODEL.ROI_BOX_HEAD.FEATURE_EXTRACTOR] return func(cfg)
6,713
38.263158
145
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/box_head/box_head.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch from .inference import make_roi_box_post_processor from .loss import make_roi_box_loss_evaluator from .roi_box_feature_extractors import make_roi_box_feature_extractor from .roi_box_predictors import make_roi_box_predictor class ROIBoxHead(torch.nn.Module): """ Generic Box Head class. """ def __init__(self, cfg): super(ROIBoxHead, self).__init__() self.cfg = cfg self.feature_extractor = make_roi_box_feature_extractor(cfg) self.predictor = make_roi_box_predictor(cfg) self.post_processor = make_roi_box_post_processor(cfg) self.loss_evaluator = make_roi_box_loss_evaluator(cfg) def forward(self, features, proposals, targets=None): """ Arguments: features (list[Tensor]): feature-maps from possibly several levels proposals (list[BoxList]): proposal boxes targets (list[BoxList], optional): the ground-truth targets. Returns: x (Tensor): the result of the feature extractor proposals (list[BoxList]): during training, the subsampled proposals are returned. During testing, the predicted boxlists are returned losses (dict[Tensor]): During training, returns the losses for the head. During testing, returns an empty dict. """ if self.training: # Faster R-CNN subsamples during training the proposals with a fixed # positive / negative ratio with torch.no_grad(): proposals = self.loss_evaluator.subsample(proposals, targets) # extract features that will be fed to the final classifier. The # feature_extractor generally corresponds to the pooler + heads x = self.feature_extractor(features, proposals) # final classifier that converts the features into predictions class_logits, box_regression = self.predictor(x) if not self.training: if self.cfg.MODEL.ROI_BOX_HEAD.INFERENCE_USE_BOX: result = self.post_processor((class_logits, box_regression), proposals) # print(result[0].get_field('masks')) return x, result, {} else: return x, proposals, {} loss_classifier, loss_box_reg = self.loss_evaluator( [class_logits], [box_regression] ) if self.cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION: return ( x, proposals, dict(loss_classifier=loss_classifier, loss_box_reg=loss_box_reg), ) else: return ( x, proposals, dict(loss_classifier=loss_classifier), ) def build_roi_box_head(cfg): """ Constructs a new box head. By default, uses ROIBoxHead, but if it turns out not to be enough, just register a new class and make it a parameter in the config """ return ROIBoxHead(cfg)
3,104
35.529412
87
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch from maskrcnn_benchmark.layers import smooth_l1_loss from maskrcnn_benchmark.modeling.balanced_positive_negative_sampler import ( BalancedPositiveNegativeSampler, ) from maskrcnn_benchmark.modeling.box_coder import BoxCoder from maskrcnn_benchmark.modeling.matcher import Matcher from maskrcnn_benchmark.modeling.utils import cat from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou from torch.nn import functional as F class FastRCNNLossComputation(object): """ Computes the loss for Faster R-CNN. Also supports FPN """ def __init__(self, proposal_matcher, fg_bg_sampler, box_coder, cfg=None): """ Arguments: proposal_matcher (Matcher) fg_bg_sampler (BalancedPositiveNegativeSampler) box_coder (BoxCoder) """ self.proposal_matcher = proposal_matcher self.fg_bg_sampler = fg_bg_sampler self.box_coder = box_coder self.cfg = cfg def match_targets_to_proposals(self, proposal, target): match_quality_matrix = boxlist_iou(target, proposal) matched_idxs = self.proposal_matcher(match_quality_matrix) # Fast RCNN only need "labels" field for selecting the targets target = target.copy_with_fields("labels") # get the targets corresponding GT for each proposal # NB: need to clamp the indices because we can have a single # GT in the image, and matched_idxs can be -2, which goes # out of bounds matched_targets = target[matched_idxs.clamp(min=0)] matched_targets.add_field("matched_idxs", matched_idxs) return matched_targets def prepare_targets(self, proposals, targets): labels = [] regression_targets = [] for proposals_per_image, targets_per_image in zip(proposals, targets): matched_targets = self.match_targets_to_proposals( proposals_per_image, targets_per_image ) matched_idxs = matched_targets.get_field("matched_idxs") labels_per_image = matched_targets.get_field("labels") labels_per_image = labels_per_image.to(dtype=torch.int64) # Label background (below the low threshold) bg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD labels_per_image[bg_inds] = 0 # Label ignore proposals (between low and high thresholds) ignore_inds = matched_idxs == Matcher.BETWEEN_THRESHOLDS labels_per_image[ignore_inds] = -1 # -1 is ignored by sampler # compute regression targets regression_targets_per_image = self.box_coder.encode( matched_targets.bbox, proposals_per_image.bbox ) labels.append(labels_per_image) regression_targets.append(regression_targets_per_image) return labels, regression_targets def subsample(self, proposals, targets): """ This method performs the positive/negative sampling, and return the sampled proposals. Note: this function keeps a state. Arguments: proposals (list[BoxList]) targets (list[BoxList]) """ labels, regression_targets = self.prepare_targets(proposals, targets) sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels) # print('sampled_pos_inds:', sampled_pos_inds[0].sum()) # print('sampled_neg_inds:', sampled_neg_inds[0].sum()) proposals = list(proposals) # add corresponding label and # regression_targets information to the bounding boxes for labels_per_image, regression_targets_per_image, proposals_per_image in zip( labels, regression_targets, proposals ): proposals_per_image.add_field("labels", labels_per_image) proposals_per_image.add_field( "regression_targets", regression_targets_per_image ) # distributed sampled proposals, that were obtained on all feature maps # concatenated via the fg_bg_sampler, into individual feature map levels for img_idx, (pos_inds_img, neg_inds_img) in enumerate( zip(sampled_pos_inds, sampled_neg_inds) ): img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img).squeeze(1) proposals_per_image = proposals[img_idx][img_sampled_inds] proposals[img_idx] = proposals_per_image self._proposals = proposals return proposals def __call__(self, class_logits, box_regression): """ Computes the loss for Faster R-CNN. This requires that the subsample method has been called beforehand. Arguments: class_logits (list[Tensor]) box_regression (list[Tensor]) Returns: classification_loss (Tensor) box_loss (Tensor) """ class_logits = cat(class_logits, dim=0) if self.cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION: box_regression = cat(box_regression, dim=0) device = class_logits.device if not hasattr(self, "_proposals"): raise RuntimeError("subsample needs to be called before") proposals = self._proposals labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0) if self.cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION: regression_targets = cat( [proposal.get_field("regression_targets") for proposal in proposals], dim=0 ) classification_loss = F.cross_entropy(class_logits, labels) if self.cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION: # get indices that correspond to the regression targets for # the corresponding ground truth labels, to be used with # advanced indexing sampled_pos_inds_subset = torch.nonzero(labels > 0).squeeze(1) labels_pos = labels[sampled_pos_inds_subset] map_inds = 4 * labels_pos[:, None] + torch.tensor([0, 1, 2, 3], device=device) box_loss = smooth_l1_loss( box_regression[sampled_pos_inds_subset[:, None], map_inds], regression_targets[sampled_pos_inds_subset], size_average=False, beta=1, ) box_loss = box_loss / labels.numel() else: box_loss = 0 return classification_loss, box_loss def make_roi_box_loss_evaluator(cfg): matcher = Matcher( cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD, cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD, allow_low_quality_matches=False, ) bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS box_coder = BoxCoder(weights=bbox_reg_weights) fg_bg_sampler = BalancedPositiveNegativeSampler( cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION ) loss_evaluator = FastRCNNLossComputation(matcher, fg_bg_sampler, box_coder, cfg) return loss_evaluator
7,135
37.572973
91
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_predictors.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from torch import nn class FastRCNNPredictor(nn.Module): def __init__(self, config, pretrained=None): super(FastRCNNPredictor, self).__init__() stage_index = 4 stage2_relative_factor = 2 ** (stage_index - 1) res2_out_channels = config.MODEL.RESNETS.RES2_OUT_CHANNELS num_inputs = res2_out_channels * stage2_relative_factor num_classes = config.MODEL.ROI_BOX_HEAD.NUM_CLASSES self.avgpool = nn.AvgPool2d(kernel_size=7, stride=7) self.cls_score = nn.Linear(num_inputs, num_classes) self.bbox_pred = nn.Linear(num_inputs, num_classes * 4) nn.init.normal_(self.cls_score.weight, mean=0, std=0.01) nn.init.constant_(self.cls_score.bias, 0) nn.init.normal_(self.bbox_pred.weight, mean=0, std=0.001) nn.init.constant_(self.bbox_pred.bias, 0) def forward(self, x): x = self.avgpool(x) x = x.view(x.size(0), -1) cls_logit = self.cls_score(x) bbox_pred = self.bbox_pred(x) return cls_logit, bbox_pred class FPNPredictor(nn.Module): def __init__(self, cfg): super(FPNPredictor, self).__init__() self.cfg = cfg num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM self.cls_score = nn.Linear(representation_size, num_classes) if cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION: self.bbox_pred = nn.Linear(representation_size, num_classes * 4) nn.init.normal_(self.bbox_pred.weight, std=0.001) nn.init.constant_(self.bbox_pred.bias, 0) nn.init.normal_(self.cls_score.weight, std=0.01) nn.init.constant_(self.cls_score.bias, 0) # nn.init.normal_(self.cls_score.weight, std=0.01) # nn.init.normal_(self.bbox_pred.weight, std=0.001) # for l in [self.cls_score, self.bbox_pred]: # nn.init.constant_(l.bias, 0) def forward(self, x): scores = self.cls_score(x) if self.cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION: bbox_deltas = self.bbox_pred(x) else: bbox_deltas = None return scores, bbox_deltas _ROI_BOX_PREDICTOR = { "FastRCNNPredictor": FastRCNNPredictor, "FPNPredictor": FPNPredictor, } def make_roi_box_predictor(cfg): func = _ROI_BOX_PREDICTOR[cfg.MODEL.ROI_BOX_HEAD.PREDICTOR] return func(cfg)
2,501
33.273973
76
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/box_head/__init__.py
0
0
0
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/structures/image_list.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch class ImageList(object): """ Structure that holds a list of images (of possibly varying sizes) as a single tensor. This works by padding the images to the same size, and storing in a field the original sizes of each image """ def __init__(self, tensors, image_sizes): """ Arguments: tensors (tensor) image_sizes (list[tuple[int, int]]) """ self.tensors = tensors self.image_sizes = image_sizes def to(self, *args, **kwargs): cast_tensor = self.tensors.to(*args, **kwargs) return ImageList(cast_tensor, self.image_sizes) def get_sizes(self): return self.image_sizes def to_image_list(tensors, size_divisible=0): """ tensors can be an ImageList, a torch.Tensor or an iterable of Tensors. It can't be a numpy array. When tensors is an iterable of Tensors, it pads the Tensors with zeros so that they have the same shape """ if isinstance(tensors, torch.Tensor) and size_divisible > 0: tensors = [tensors] if isinstance(tensors, ImageList): return tensors elif isinstance(tensors, torch.Tensor): # single tensor shape can be inferred assert tensors.dim() == 4 image_sizes = [tensor.shape[-2:] for tensor in tensors] return ImageList(tensors, image_sizes) elif isinstance(tensors, (tuple, list)): max_size = tuple(max(s) for s in zip(*[img.shape for img in tensors])) # TODO Ideally, just remove this and let me model handle arbitrary # input sizs if size_divisible > 0: import math stride = size_divisible max_size = list(max_size) max_size[1] = int(math.ceil(max_size[1] / stride) * stride) max_size[2] = int(math.ceil(max_size[2] / stride) * stride) max_size = tuple(max_size) batch_shape = (len(tensors),) + max_size batched_imgs = tensors[0].new(*batch_shape).zero_() for img, pad_img in zip(tensors, batched_imgs): pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) image_sizes = [im.shape[-2:] for im in tensors] return ImageList(batched_imgs, image_sizes) else: raise TypeError("Unsupported type for to_image_list: {}".format(type(tensors))) def to_image_target_list(tensors, size_divisible=0, targets=None): """ tensors can be an ImageList, a torch.Tensor or an iterable of Tensors. It can't be a numpy array. When tensors is an iterable of Tensors, it pads the Tensors with zeros so that they have the same shape """ if isinstance(tensors, torch.Tensor) and size_divisible > 0: tensors = [tensors] if isinstance(tensors, ImageList): return tensors elif isinstance(tensors, torch.Tensor): # single tensor shape can be inferred assert tensors.dim() == 4 image_sizes = [tensor.shape[-2:] for tensor in tensors] return ImageList(tensors, image_sizes) elif isinstance(tensors, (tuple, list)): max_size = tuple(max(s) for s in zip(*[img.shape for img in tensors])) # TODO Ideally, just remove this and let me model handle arbitrary # input sizs if size_divisible > 0: import math stride = size_divisible max_size = list(max_size) max_size[1] = int(math.ceil(max_size[1] / stride) * stride) max_size[2] = int(math.ceil(max_size[2] / stride) * stride) max_size = tuple(max_size) batch_shape = (len(tensors),) + max_size batched_imgs = tensors[0].new(*batch_shape).zero_() if targets is None: for img, pad_img in zip(tensors, batched_imgs): pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) else: for img, pad_img, target in zip(tensors, batched_imgs, targets): pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) if target is not None: target.set_size((pad_img.shape[2], pad_img.shape[1])) image_sizes = [im.shape[-2:] for im in tensors] return ImageList(batched_imgs, image_sizes), targets else: raise TypeError("Unsupported type for to_image_list: {}".format(type(tensors)))
4,459
35.859504
87
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/structures/segmentation_mask.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import cv2 import numpy as np import pycocotools.mask as mask_utils import torch from maskrcnn_benchmark.utils.chars import char2num import pyclipper # from PIL import Image from shapely import affinity from shapely.geometry import Polygon as ShapePolygon # transpose FLIP_LEFT_RIGHT = 0 FLIP_TOP_BOTTOM = 1 def convert_2d_tuple(t): a = [] for i in t: a.extend(list(i)) return a class Mask(object): """ This class is unfinished and not meant for use yet It is supposed to contain the mask for an object as a 2d tensor """ def __init__(self, masks, size, mode): self.masks = masks self.size = size self.mode = mode def transpose(self, method): if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM): raise NotImplementedError( "Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented" ) width, height = self.size if method == FLIP_LEFT_RIGHT: dim = width # idx = 2 elif method == FLIP_TOP_BOTTOM: dim = height # idx = 1 flip_idx = list(range(dim)[::-1]) flipped_masks = self.masks.index_select(dim, flip_idx) return Mask(flipped_masks, self.size, self.mode) def crop(self, box): w, h = box[2] - box[0], box[3] - box[1] cropped_masks = self.masks[:, box[1] : box[3], box[0] : box[2]] return Mask(cropped_masks, size=(w, h), mode=self.mode) def resize(self, size, *args, **kwargs): pass class SegmentationMask(object): """ This class stores the segmentations for all objects in the image """ def __init__(self, polygons, size, mode=None): """ Arguments: polygons: a list of list of lists of numbers. The first level of the list correspond to individual instances, the second level to all the polygons that compose the object, and the third level to the polygon coordinates. """ assert isinstance(polygons, list) self.polygons = [Polygons(p, size, mode) for p in polygons] self.size = size self.mode = mode def transpose(self, method): if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM): raise NotImplementedError( "Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented" ) flipped = [] for polygon in self.polygons: flipped.append(polygon.transpose(method)) return SegmentationMask(flipped, size=self.size, mode=self.mode) def crop(self, box, keep_ind=None): w, h = box[2] - box[0], box[3] - box[1] if keep_ind is not None: self.polygons = np.array(self.polygons) self.polygons = self.polygons[keep_ind] cropped = [] for polygon in self.polygons: cropped.append(polygon.crop(box)) return SegmentationMask(cropped, size=(w, h), mode=self.mode) def rotate(self, angle, r_c, start_h, start_w): rotated = [] for polygon in self.polygons: rotated.append(polygon.rotate(angle, r_c, start_h, start_w)) return SegmentationMask(rotated, size=(r_c[0] * 2, r_c[1] * 2), mode=self.mode) def resize(self, size, *args, **kwargs): scaled = [] for polygon in self.polygons: scaled.append(polygon.resize(size, *args, **kwargs)) return SegmentationMask(scaled, size=size, mode=self.mode) def set_size(self, size): self.size = size for polygon in self.polygons: polygon.set_size(size) def to(self, *args, **kwargs): return self def __getitem__(self, item): if isinstance(item, (int, slice)): selected_polygons = [self.polygons[item]] else: # advanced indexing on a single dimension selected_polygons = [] if isinstance(item, torch.Tensor) and item.dtype == torch.bool: item = item.nonzero() item = item.squeeze(1) if item.numel() > 0 else item item = item.tolist() for i in item: selected_polygons.append(self.polygons[i]) return SegmentationMask(selected_polygons, size=self.size, mode=self.mode) def __iter__(self): return iter(self.polygons) def __repr__(self): s = self.__class__.__name__ + "(" s += "num_instances={}, ".format(len(self.polygons)) s += "image_width={}, ".format(self.size[0]) s += "image_height={})".format(self.size[1]) return s def size(self): return self.size def get_polygons(self): return self.polygons def to_np_polygon(self): np_polygons = [] for polygon in self.polygons: polys = polygon.get_polygons() for poly in polys: np_poly = poly.numpy() np_polygons.append(np_poly) return np_polygons def convert_seg_map(self, labels, shrink_ratio, seg_size, ignore_difficult=True): # width, height = self.size # assert self.size[0] == seg_size[1] # assert self.size[1] == seg_size[0] height, width = seg_size[0], seg_size[1] seg_map = np.zeros((1, height, width), dtype=np.uint8) training_mask = np.ones((height, width), dtype=np.uint8) for poly, label in zip(self.polygons, labels): poly = poly.get_polygons()[0] poly = poly.reshape((-1, 2)).numpy() if ignore_difficult and label.item() == -1: cv2.fillPoly(training_mask, poly.astype(np.int32)[np.newaxis, :, :], 0) continue if poly.shape[0] < 4: continue p = ShapePolygon(poly) if p.length == 0: continue try: d = p.area * (1 - np.power(shrink_ratio, 2)) / p.length except: continue subj = [tuple(s) for s in poly] pco = pyclipper.PyclipperOffset() pco.AddPath(subj, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON) s = pco.Execute(-d) if s == []: cv2.fillPoly(training_mask, poly.astype(np.int32)[np.newaxis, :, :], 0) continue out = convert_2d_tuple(s[0]) out = np.array(out).reshape(-1, 2) cv2.fillPoly(seg_map[0, :, :], [out.astype(np.int32)], 1) return seg_map, training_mask class Polygons(object): """ This class holds a set of polygons that represents a single instance of an object mask. The object can be represented as a set of polygons """ def __init__(self, polygons, size, mode): # assert isinstance(polygons, list), '{}'.format(polygons) if isinstance(polygons, list): polygons = [torch.as_tensor(p, dtype=torch.float32) for p in polygons] elif isinstance(polygons, Polygons): polygons = polygons.polygons self.polygons = polygons self.size = size self.mode = mode def transpose(self, method): if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM): raise NotImplementedError( "Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented" ) flipped_polygons = [] width, height = self.size if method == FLIP_LEFT_RIGHT: dim = width idx = 0 elif method == FLIP_TOP_BOTTOM: dim = height idx = 1 for poly in self.polygons: p = poly.clone() TO_REMOVE = 1 p[idx::2] = dim - poly[idx::2] - TO_REMOVE flipped_polygons.append(p) return Polygons(flipped_polygons, size=self.size, mode=self.mode) def rotate(self, angle, r_c, start_h, start_w): poly = self.polygons[0].numpy().reshape(-1, 2) poly[:, 0] += start_w poly[:, 1] += start_h polys = ShapePolygon(poly) r_polys = list(affinity.rotate(polys, angle, r_c).boundary.coords[:-1]) p = [] for r in r_polys: p += list(r) return Polygons([p], size=self.size, mode=self.mode) def crop(self, box): w, h = box[2] - box[0], box[3] - box[1] # TODO chck if necessary w = max(w, 1) h = max(h, 1) cropped_polygons = [] for poly in self.polygons: p = poly.clone() p[0::2] = p[0::2] - box[0] # .clamp(min=0, max=w) p[1::2] = p[1::2] - box[1] # .clamp(min=0, max=h) cropped_polygons.append(p) return Polygons(cropped_polygons, size=(w, h), mode=self.mode) def resize(self, size, *args, **kwargs): ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size)) if ratios[0] == ratios[1]: ratio = ratios[0] scaled_polys = [p * ratio for p in self.polygons] return Polygons(scaled_polys, size, mode=self.mode) ratio_w, ratio_h = ratios scaled_polygons = [] for poly in self.polygons: p = poly.clone() p[0::2] *= ratio_w p[1::2] *= ratio_h scaled_polygons.append(p) return Polygons(scaled_polygons, size=size, mode=self.mode) def convert(self, mode): width, height = self.size if mode == "mask": # print([p.numpy() for p in self.polygons]) try: rles = mask_utils.frPyObjects( [p.numpy() for p in self.polygons], height, width ) except: print([p.numpy() for p in self.polygons]) mask = torch.ones((height, width), dtype=torch.uint8) return mask rle = mask_utils.merge(rles) mask = mask_utils.decode(rle) mask = torch.from_numpy(mask) # TODO add squeeze? return mask def set_size(self, size): self.size = size def get_polygons(self): return self.polygons def __repr__(self): s = self.__class__.__name__ + "(" s += "num_polygons={}, ".format(len(self.polygons)) s += "image_width={}, ".format(self.size[0]) s += "image_height={}, ".format(self.size[1]) s += "mode={})".format(self.mode) return s class CharPolygons(object): """ This class holds a set of polygons that represents a single instance of an object mask. The object can be represented as a set of polygons """ def __init__( self, char_boxes, word=None, use_char_ann=False, char_classes=None, size=None, mode=None, char_num_classes=37, ): if isinstance(char_boxes, CharPolygons): if char_classes is None: char_classes = char_boxes.char_classes self.word = char_boxes.word char_boxes = char_boxes.char_boxes else: if char_classes is None: char_classes = [ torch.as_tensor(p[8], dtype=torch.float32) for p in char_boxes ] char_boxes = [ torch.as_tensor(p[:8], dtype=torch.float32) for p in char_boxes ] self.word = word self.char_boxes = char_boxes self.char_classes = char_classes self.size = size self.mode = mode self.use_char_ann = use_char_ann self.char_num_classes = char_num_classes def transpose(self, method): if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM): raise NotImplementedError( "Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented" ) flipped_polygons = [] width, height = self.size if method == FLIP_LEFT_RIGHT: dim = width idx = 0 elif method == FLIP_TOP_BOTTOM: dim = height idx = 1 for char_box in self.char_boxes: p = char_box.clone() TO_REMOVE = 1 p[idx::2] = dim - char_box[idx::2] - TO_REMOVE flipped_polygons.append(p) return CharPolygons( flipped_polygons, word=self.word, use_char_ann=self.use_char_ann, char_classes=self.char_classes, size=self.size, mode=self.mode, char_num_classes=self.char_num_classes, ) def crop(self, box): w, h = box[2] - box[0], box[3] - box[1] # TODO chck if necessary w = max(w, 1) h = max(h, 1) cropped_polygons = [] for char_box in self.char_boxes: p = char_box.clone() p[0::2] = p[0::2] - box[0] # .clamp(min=0, max=w) p[1::2] = p[1::2] - box[1] # .clamp(min=0, max=h) cropped_polygons.append(p) return CharPolygons( cropped_polygons, word=self.word, use_char_ann=self.use_char_ann, char_classes=self.char_classes, size=(w, h), mode=self.mode, char_num_classes=self.char_num_classes, ) def rotate(self, angle, r_c, start_h, start_w): r_polys = [] for poly in self.char_boxes: poly = poly.numpy() poly[0::2] += start_w poly[1::2] += start_h poly = ShapePolygon(np.array(poly).reshape(4, 2)) r_poly = np.array( list(affinity.rotate(poly, angle, r_c).boundary.coords[:-1]) ).reshape(-1, 8) r_polys.append(r_poly[0]) return CharPolygons( r_polys, word=self.word, use_char_ann=self.use_char_ann, char_classes=self.char_classes, size=(r_c[0] * 2, r_c[1] * 2), mode=self.mode, char_num_classes=self.char_num_classes, ) def resize(self, size, *args, **kwargs): ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size)) if ratios[0] == ratios[1]: ratio = ratios[0] scaled_polys = [p * ratio for p in self.char_boxes] return CharPolygons( scaled_polys, word=self.word, use_char_ann=self.use_char_ann, char_classes=self.char_classes, size=size, mode=self.mode, char_num_classes=self.char_num_classes, ) ratio_w, ratio_h = ratios scaled_polygons = [] for poly in self.char_boxes: p = poly.clone() p[0::2] *= ratio_w p[1::2] *= ratio_h scaled_polygons.append(p) return CharPolygons( scaled_polygons, word=self.word, use_char_ann=self.use_char_ann, char_classes=self.char_classes, size=size, mode=self.mode, char_num_classes=self.char_num_classes, ) def set_size(self, size): self.size = size def convert(self, mode): width, height = self.size if mode == "char_mask": if not self.use_char_ann: char_map = -np.ones((height, width)) char_map_weight = np.zeros((self.char_num_classes,)) else: char_map = np.zeros((height, width)) char_map_weight = np.ones((self.char_num_classes,)) for i, p in enumerate(self.char_boxes): poly = p.numpy().reshape(4, 2) poly = shrink_poly(poly, 0.25) cv2.fillPoly( char_map, [poly.astype(np.int32)], int(self.char_classes[i]) ) pos_index = np.where(char_map > 0) pos_num = pos_index[0].size if pos_num > 0: pos_weight = 1.0 * (height * width - pos_num) / pos_num char_map_weight[1:] = pos_weight return torch.from_numpy(char_map), torch.from_numpy(char_map_weight) elif mode == "seq_char_mask": decoder_target = self.char_num_classes * np.ones((32,)) word_target = -np.ones((32,)) if not self.use_char_ann: char_map = -np.ones((height, width)) char_map_weight = np.zeros((self.char_num_classes,)) for i, char in enumerate(self.word): if i > 31: break decoder_target[i] = char2num(char) word_target[i] = char2num(char) end_point = min(max(1, len(self.word)), 31) word_target[end_point] = self.char_num_classes else: char_map = np.zeros((height, width)) char_map_weight = np.ones((self.char_num_classes,)) word_length = 0 for i, p in enumerate(self.char_boxes): poly = p.numpy().reshape(4, 2) if i < 32: decoder_target[i] = int(self.char_classes[i]) word_target[i] = int(self.char_classes[i]) word_length += 1 poly = shrink_poly(poly, 0.25) cv2.fillPoly( char_map, [poly.astype(np.int32)], int(self.char_classes[i]) ) end_point = min(max(1, word_length), 31) word_target[end_point] = self.char_num_classes pos_index = np.where(char_map > 0) pos_num = pos_index[0].size if pos_num > 0: pos_weight = 1.0 * (height * width - pos_num) / pos_num char_map_weight[1:] = pos_weight return ( torch.from_numpy(char_map), torch.from_numpy(char_map_weight), torch.from_numpy(decoder_target), torch.from_numpy(word_target), ) def creat_color_map(self, n_class, width): splits = int(np.ceil(np.power((n_class * 1.0), 1.0 / 3))) maps = [] for i in range(splits): r = int(i * width * 1.0 / (splits - 1)) for j in range(splits): g = int(j * width * 1.0 / (splits - 1)) for k in range(splits - 1): b = int(k * width * 1.0 / (splits - 1)) maps.append([r, g, b]) return np.array(maps) def __repr__(self): s = self.__class__.__name__ + "(" s += "num_char_boxes={}, ".format(len(self.char_boxes)) s += "num_char_classes={}, ".format(len(self.char_classes)) s += "image_width={}, ".format(self.size[0]) s += "image_height={}, ".format(self.size[1]) s += "mode={})".format(self.mode) return s class SegmentationCharMask(object): def __init__( self, chars_boxes, words=None, use_char_ann=True, size=None, mode=None, char_num_classes=37 ): # self.chars_boxes=[CharPolygons(char_boxes, word=word, use_char_ann=use_char_ann, size=size, mode=mode) for char_boxes, word in zip(chars_boxes, words)] if words is None: self.chars_boxes = [ CharPolygons( char_boxes, word=None, use_char_ann=use_char_ann, size=size, mode=mode, char_num_classes=char_num_classes, ) for char_boxes in chars_boxes ] else: self.chars_boxes = [ CharPolygons( char_boxes, word=words[i], use_char_ann=use_char_ann, size=size, mode=mode, char_num_classes=char_num_classes, ) for i, char_boxes in enumerate(chars_boxes) ] self.size = size self.mode = mode self.use_char_ann = use_char_ann self.char_num_classes = char_num_classes def transpose(self, method): if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM): raise NotImplementedError( "Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented" ) flipped = [] for char_boxes in self.chars_boxes: flipped.append(char_boxes.transpose(method)) return SegmentationCharMask( flipped, use_char_ann=self.use_char_ann, size=self.size, mode=self.mode, char_num_classes=self.char_num_classes ) def crop(self, box, keep_ind): cropped = [] w, h = box[2] - box[0], box[3] - box[1] if keep_ind is not None: self.chars_boxes = np.array(self.chars_boxes) self.chars_boxes = self.chars_boxes[keep_ind] for char_boxes in self.chars_boxes: cropped.append(char_boxes.crop(box)) return SegmentationCharMask( cropped, use_char_ann=self.use_char_ann, size=(w, h), mode=self.mode ) def resize(self, size, *args, **kwargs): scaled = [] for char_boxes in self.chars_boxes: scaled.append(char_boxes.resize(size, *args, **kwargs)) return SegmentationCharMask( scaled, use_char_ann=self.use_char_ann, size=size, mode=self.mode, char_num_classes=self.char_num_classes ) def set_size(self, size): self.size = size for char_box in self.chars_boxes: char_box.set_size(size) def rotate(self, angle, r_c, start_h, start_w): rotated = [] for char_boxes in self.chars_boxes: rotated.append(char_boxes.rotate(angle, r_c, start_h, start_w)) return SegmentationCharMask( rotated, use_char_ann=self.use_char_ann, size=(r_c[0] * 2, r_c[1] * 2), mode=self.mode, char_num_classes=self.char_num_classes, ) def __iter__(self): return iter(self.chars_boxes) def __getitem__(self, item): if isinstance(item, (int, slice)): selected_chars_boxes = [self.chars_boxes[item]] else: # advanced indexing on a single dimension selected_chars_boxes = [] if isinstance(item, torch.Tensor) and item.dtype == torch.bool: item = item.nonzero() item = item.squeeze(1) if item.numel() > 0 else item item = item.tolist() for i in item: if i >= len(self.chars_boxes): print(i) print("chars_boxes.shape: ", len(self.chars_boxes)) input() selected_chars_boxes.append(self.chars_boxes[i]) return SegmentationCharMask( selected_chars_boxes, use_char_ann=self.use_char_ann, size=self.size, mode=self.mode, char_num_classes=self.char_num_classes, ) def __repr__(self): s = self.__class__.__name__ + "(" s += "num_char_boxes={}, ".format(len(self.chars_boxes)) s += "image_width={}, ".format(self.size[0]) s += "image_height={})".format(self.size[1]) return s def shrink_poly(poly, shrink): # shrink ratio R = shrink r = [None, None, None, None] for i in range(4): r[i] = min( np.linalg.norm(poly[i] - poly[(i + 1) % 4]), np.linalg.norm(poly[i] - poly[(i - 1) % 4]), ) # find the longer pair if np.linalg.norm(poly[0] - poly[1]) + np.linalg.norm( poly[2] - poly[3] ) > np.linalg.norm(poly[0] - poly[3]) + np.linalg.norm(poly[1] - poly[2]): # first move (p0, p1), (p2, p3), then (p0, p3), (p1, p2) ## p0, p1 theta = np.arctan2((poly[1][1] - poly[0][1]), (poly[1][0] - poly[0][0])) poly[0][0] += R * r[0] * np.cos(theta) poly[0][1] += R * r[0] * np.sin(theta) poly[1][0] -= R * r[1] * np.cos(theta) poly[1][1] -= R * r[1] * np.sin(theta) ## p2, p3 theta = np.arctan2((poly[2][1] - poly[3][1]), (poly[2][0] - poly[3][0])) poly[3][0] += R * r[3] * np.cos(theta) poly[3][1] += R * r[3] * np.sin(theta) poly[2][0] -= R * r[2] * np.cos(theta) poly[2][1] -= R * r[2] * np.sin(theta) ## p0, p3 theta = np.arctan2((poly[3][0] - poly[0][0]), (poly[3][1] - poly[0][1])) poly[0][0] += R * r[0] * np.sin(theta) poly[0][1] += R * r[0] * np.cos(theta) poly[3][0] -= R * r[3] * np.sin(theta) poly[3][1] -= R * r[3] * np.cos(theta) ## p1, p2 theta = np.arctan2((poly[2][0] - poly[1][0]), (poly[2][1] - poly[1][1])) poly[1][0] += R * r[1] * np.sin(theta) poly[1][1] += R * r[1] * np.cos(theta) poly[2][0] -= R * r[2] * np.sin(theta) poly[2][1] -= R * r[2] * np.cos(theta) else: ## p0, p3 # print poly theta = np.arctan2((poly[3][0] - poly[0][0]), (poly[3][1] - poly[0][1])) poly[0][0] += R * r[0] * np.sin(theta) poly[0][1] += R * r[0] * np.cos(theta) poly[3][0] -= R * r[3] * np.sin(theta) poly[3][1] -= R * r[3] * np.cos(theta) ## p1, p2 theta = np.arctan2((poly[2][0] - poly[1][0]), (poly[2][1] - poly[1][1])) poly[1][0] += R * r[1] * np.sin(theta) poly[1][1] += R * r[1] * np.cos(theta) poly[2][0] -= R * r[2] * np.sin(theta) poly[2][1] -= R * r[2] * np.cos(theta) ## p0, p1 theta = np.arctan2((poly[1][1] - poly[0][1]), (poly[1][0] - poly[0][0])) poly[0][0] += R * r[0] * np.cos(theta) poly[0][1] += R * r[0] * np.sin(theta) poly[1][0] -= R * r[1] * np.cos(theta) poly[1][1] -= R * r[1] * np.sin(theta) ## p2, p3 theta = np.arctan2((poly[2][1] - poly[3][1]), (poly[2][0] - poly[3][0])) poly[3][0] += R * r[3] * np.cos(theta) poly[3][1] += R * r[3] * np.sin(theta) poly[2][0] -= R * r[2] * np.cos(theta) poly[2][1] -= R * r[2] * np.sin(theta) return poly def shrink_rect(poly, shrink): xmin = min(poly[:, 0]) xmax = max(poly[:, 0]) ymin = min(poly[:, 1]) ymax = max(poly[:, 1]) # assert xmax > xmin and ymax > ymin xc = (xmax + xmin) / 2 yc = (ymax + ymin) / 2 w = xmax - xmin h = ymax - ymin sxmin = xc - w / 2 * shrink sxmax = xc + w / 2 * shrink symin = yc - h / 2 * shrink symax = yc + h / 2 * shrink return np.array([sxmin, symin, sxmax, symin, sxmax, symax, sxmin, symax]).reshape( (4, 2) ) def is_poly_inbox(poly, height, width): min_x = min(poly[:, 0]) min_y = min(poly[:, 1]) max_x = max(poly[:, 0]) max_y = max(poly[:, 1]) if (max_x < 0 and max_y < 0) or (min_x > width and min_y > height): return False else: return True
27,175
34.431551
161
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/structures/bounding_box.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import numpy as np import torch # from shapely import affinity # from shapely.geometry import box # transpose FLIP_LEFT_RIGHT = 0 FLIP_TOP_BOTTOM = 1 class BoxList(object): """ This class represents a set of bounding boxes. The bounding boxes are represented as a Nx4 Tensor. In order ot uniquely determine the bounding boxes with respect to an image, we also store the corresponding image dimensions. They can contain extra information that is specific to each bounding box, such as labels. """ def __init__(self, bbox, image_size, mode="xyxy", use_char_ann=True, is_fake=False): device = bbox.device if isinstance(bbox, torch.Tensor) else torch.device("cpu") bbox = torch.as_tensor(bbox, dtype=torch.float32, device=device) if bbox.ndimension() != 2: raise ValueError( "bbox should have 2 dimensions, got {}".format(bbox.ndimension()) ) if bbox.size(-1) != 4: raise ValueError( "last dimenion of bbox should have a " "size of 4, got {}".format(bbox.size(-1)) ) if mode not in ("xyxy", "xywh"): raise ValueError("mode should be 'xyxy' or 'xywh'") self.bbox = bbox self.size = image_size # (image_width, image_height) self.mode = mode self.extra_fields = {} self.use_char_ann = use_char_ann def set_size(self, size): self.size = size bbox = BoxList( self.bbox, size, mode=self.mode, use_char_ann=self.use_char_ann ) for k, v in self.extra_fields.items(): if not isinstance(v, torch.Tensor): v = v.set_size(size) bbox.add_field(k, v) return bbox.convert(self.mode) def add_field(self, field, field_data): self.extra_fields[field] = field_data def get_field(self, field): return self.extra_fields[field] def has_field(self, field): return field in self.extra_fields def fields(self): return list(self.extra_fields.keys()) def _copy_extra_fields(self, bbox): for k, v in bbox.extra_fields.items(): self.extra_fields[k] = v def convert(self, mode): if mode not in ("xyxy", "xywh"): raise ValueError("mode should be 'xyxy' or 'xywh'") if mode == self.mode: return self # we only have two modes, so don't need to check # self.mode xmin, ymin, xmax, ymax = self._split_into_xyxy() if mode == "xyxy": bbox = torch.cat((xmin, ymin, xmax, ymax), dim=-1) bbox = BoxList(bbox, self.size, mode=mode, use_char_ann=self.use_char_ann) else: TO_REMOVE = 1 bbox = torch.cat( (xmin, ymin, xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE), dim=-1 ) bbox = BoxList(bbox, self.size, mode=mode, use_char_ann=self.use_char_ann) bbox._copy_extra_fields(self) return bbox def _split_into_xyxy(self): if self.mode == "xyxy": xmin, ymin, xmax, ymax = self.bbox.split(1, dim=-1) return xmin, ymin, xmax, ymax elif self.mode == "xywh": TO_REMOVE = 1 xmin, ymin, w, h = self.bbox.split(1, dim=-1) return ( xmin, ymin, xmin + (w - TO_REMOVE).clamp(min=0), ymin + (h - TO_REMOVE).clamp(min=0), ) else: raise RuntimeError("Should not be here") def resize(self, size, *args, **kwargs): """ Returns a resized copy of this bounding box :param size: The requested size in pixels, as a 2-tuple: (width, height). """ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size)) if ratios[0] == ratios[1]: ratio = ratios[0] scaled_box = self.bbox * ratio bbox = BoxList( scaled_box, size, mode=self.mode, use_char_ann=self.use_char_ann ) # bbox._copy_extra_fields(self) for k, v in self.extra_fields.items(): if not isinstance(v, torch.Tensor): v = v.resize(size, *args, **kwargs) bbox.add_field(k, v) return bbox ratio_width, ratio_height = ratios xmin, ymin, xmax, ymax = self._split_into_xyxy() scaled_xmin = xmin * ratio_width scaled_xmax = xmax * ratio_width scaled_ymin = ymin * ratio_height scaled_ymax = ymax * ratio_height scaled_box = torch.cat( (scaled_xmin, scaled_ymin, scaled_xmax, scaled_ymax), dim=-1 ) bbox = BoxList(scaled_box, size, mode="xyxy", use_char_ann=self.use_char_ann) # bbox._copy_extra_fields(self) for k, v in self.extra_fields.items(): if not isinstance(v, torch.Tensor): v = v.resize(size, *args, **kwargs) bbox.add_field(k, v) return bbox.convert(self.mode) def poly2box(self, poly): xmin = min(poly[0::2]) xmax = max(poly[0::2]) ymin = min(poly[1::2]) ymax = max(poly[1::2]) return [xmin, ymin, xmax, ymax] def rotate(self, angle, r_c, start_h, start_w): masks = self.extra_fields["masks"] masks = masks.rotate(angle, r_c, start_h, start_w) polys = masks.polygons boxes = [] for poly in polys: box = self.poly2box(poly.polygons[0].numpy()) boxes.append(box) self.size = (r_c[0] * 2, r_c[1] * 2) bbox = BoxList(boxes, self.size, mode="xyxy", use_char_ann=self.use_char_ann) for k, v in self.extra_fields.items(): if k == "masks": v = masks else: if self.use_char_ann: if not isinstance(v, torch.Tensor): v = v.rotate(angle, r_c, start_h, start_w) else: if not isinstance(v, torch.Tensor) and k != "char_masks": v = v.rotate(angle, r_c, start_h, start_w) bbox.add_field(k, v) return bbox.convert(self.mode) def transpose(self, method): """ Transpose bounding box (flip or rotate in 90 degree steps) :param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`, :py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`, :py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`, :py:attr:`PIL.Image.TRANSPOSE` or :py:attr:`PIL.Image.TRANSVERSE`. """ if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM): raise NotImplementedError( "Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented" ) image_width, image_height = self.size xmin, ymin, xmax, ymax = self._split_into_xyxy() if method == FLIP_LEFT_RIGHT: TO_REMOVE = 1 transposed_xmin = image_width - xmax - TO_REMOVE transposed_xmax = image_width - xmin - TO_REMOVE transposed_ymin = ymin transposed_ymax = ymax elif method == FLIP_TOP_BOTTOM: transposed_xmin = xmin transposed_xmax = xmax transposed_ymin = image_height - ymax transposed_ymax = image_height - ymin transposed_boxes = torch.cat( (transposed_xmin, transposed_ymin, transposed_xmax, transposed_ymax), dim=-1 ) bbox = BoxList( transposed_boxes, self.size, mode="xyxy", use_char_ann=self.use_char_ann ) # bbox._copy_extra_fields(self) for k, v in self.extra_fields.items(): if not isinstance(v, torch.Tensor): v = v.transpose(method) bbox.add_field(k, v) return bbox.convert(self.mode) def crop(self, box): """ Cropss a rectangular region from this bounding box. The box is a 4-tuple defining the left, upper, right, and lower pixel coordinate. """ xmin, ymin, xmax, ymax = self._split_into_xyxy() w, h = box[2] - box[0], box[3] - box[1] cropped_xmin = (xmin - box[0]).clamp(min=0, max=w) cropped_ymin = (ymin - box[1]).clamp(min=0, max=h) cropped_xmax = (xmax - box[0]).clamp(min=0, max=w) cropped_ymax = (ymax - box[1]).clamp(min=0, max=h) keep_ind = None not_empty = np.where( (cropped_xmin != cropped_xmax) & (cropped_ymin != cropped_ymax) )[0] if len(not_empty) > 0: keep_ind = not_empty cropped_box = torch.cat( (cropped_xmin, cropped_ymin, cropped_xmax, cropped_ymax), dim=-1 ) cropped_box = cropped_box[not_empty] bbox = BoxList(cropped_box, (w, h), mode="xyxy", use_char_ann=self.use_char_ann) # bbox._copy_extra_fields(self) for k, v in self.extra_fields.items(): if self.use_char_ann: if not isinstance(v, torch.Tensor): v = v.crop(box, keep_ind) else: if not isinstance(v, torch.Tensor) and k != "char_masks": v = v.crop(box, keep_ind) bbox.add_field(k, v) return bbox.convert(self.mode) # Tensor-like methods def to(self, device): bbox = BoxList(self.bbox.to(device), self.size, self.mode, self.use_char_ann) for k, v in self.extra_fields.items(): if hasattr(v, "to"): v = v.to(device) bbox.add_field(k, v) return bbox def __getitem__(self, item): bbox = BoxList(self.bbox[item], self.size, self.mode, self.use_char_ann) for k, v in self.extra_fields.items(): bbox.add_field(k, v[item]) return bbox def __len__(self): return self.bbox.shape[0] def clip_to_image(self, remove_empty=True): TO_REMOVE = 1 self.bbox[:, 0].clamp_(min=0, max=self.size[0] - TO_REMOVE) self.bbox[:, 1].clamp_(min=0, max=self.size[1] - TO_REMOVE) self.bbox[:, 2].clamp_(min=0, max=self.size[0] - TO_REMOVE) self.bbox[:, 3].clamp_(min=0, max=self.size[1] - TO_REMOVE) if remove_empty: box = self.bbox keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0]) return self[keep] return self def area(self): TO_REMOVE = 1 box = self.bbox area = (box[:, 2] - box[:, 0] + TO_REMOVE) * (box[:, 3] - box[:, 1] + TO_REMOVE) return area def copy_with_fields(self, fields): bbox = BoxList(self.bbox, self.size, self.mode, self.use_char_ann) if not isinstance(fields, (list, tuple)): fields = [fields] for field in fields: bbox.add_field(field, self.get_field(field)) return bbox def __repr__(self): s = self.__class__.__name__ + "(" s += "num_boxes={}, ".format(len(self)) s += "image_width={}, ".format(self.size[0]) s += "image_height={}, ".format(self.size[1]) s += "mode={})".format(self.mode) return s if __name__ == "__main__": bbox = BoxList([[0, 0, 10, 10], [0, 0, 5, 5]], (10, 10)) s_bbox = bbox.resize((5, 5)) print(s_bbox) print(s_bbox.bbox) t_bbox = bbox.transpose(0) print(t_bbox) print(t_bbox.bbox)
11,570
35.617089
88
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/structures/boxlist_ops.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch from maskrcnn_benchmark.layers import nms as _box_nms from .bounding_box import BoxList from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask import numpy as np import shapely from shapely.geometry import Polygon,MultiPoint def boxlist_nms(boxlist, nms_thresh, max_proposals=-1, score_field="score"): """ Performs non-maximum suppression on a boxlist, with scores specified in a boxlist field via score_field. Arguments: boxlist(BoxList) nms_thresh (float) max_proposals (int): if > 0, then only the top max_proposals are kept after non-maxium suppression score_field (str) """ if nms_thresh <= 0: return boxlist mode = boxlist.mode boxlist = boxlist.convert("xyxy") boxes = boxlist.bbox score = boxlist.get_field(score_field) keep = _box_nms(boxes, score, nms_thresh) if max_proposals > 0: keep = keep[:max_proposals] boxlist = boxlist[keep] return boxlist.convert(mode) def remove_small_boxes(boxlist, min_size): """ Only keep boxes with both sides >= min_size Arguments: boxlist (Boxlist) min_size (int) """ # TODO maybe add an API for querying the ws / hs xywh_boxes = boxlist.convert("xywh").bbox _, _, ws, hs = xywh_boxes.unbind(dim=1) keep = ((ws >= min_size) & (hs >= min_size)).nonzero().squeeze(1) return boxlist[keep] # implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py # with slight modifications def boxlist_iou(boxlist1, boxlist2): """Compute the intersection over union of two set of boxes. The box order must be (xmin, ymin, xmax, ymax). Arguments: box1: (BoxList) bounding boxes, sized [N,4]. box2: (BoxList) bounding boxes, sized [M,4]. Returns: (tensor) iou, sized [N,M]. Reference: https://github.com/chainer/chainercv/blob/master/chainercv/utils/bbox/bbox_iou.py """ if boxlist1.size != boxlist2.size: raise RuntimeError( "boxlists should have same image size, got {}, {}".format( boxlist1, boxlist2 ) ) # N = len(boxlist1) # M = len(boxlist2) area1 = boxlist1.area() area2 = boxlist2.area() box1, box2 = boxlist1.bbox, boxlist2.bbox lt = torch.max(box1[:, None, :2], box2[:, :2]) # [N,M,2] rb = torch.min(box1[:, None, 2:], box2[:, 2:]) # [N,M,2] TO_REMOVE = 1 wh = (rb - lt + TO_REMOVE).clamp(min=0) # [N,M,2] inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] iou = inter / (area1[:, None] + area2 - inter) return iou # def boxlist_polygon_iou(target, proposal): # """Compute the intersection over union of two set of boxes. # The box order must be (xmin, ymin, xmax, ymax). # Arguments: # box1: (BoxList) bounding boxes, sized [N,4]. # box2: (BoxList) bounding boxes, sized [M,4]. # Returns: # (tensor) iou, sized [N,M]. # Reference: # https://github.com/chainer/chainercv/blob/master/chainercv/utils/bbox/bbox_iou.py # """ # if target.size != proposal.size: # raise RuntimeError( # "boxlists should have same image size, got {}, {}".format( # target, proposal # ) # ) # target_polygon = target.get_field("masks").to_np_polygon() # proposal_polygon = proposal.get_field("masks").to_np_polygon() # print(target_polygon) # print(proposal_polygon) # polygon_points1 = target_polygon[0].reshape(-1, 2) # poly1 = Polygon(polygon_points1).convex_hull # polygon_points2 = proposal_polygon[0].reshape(-1, 2) # poly2 = Polygon(polygon_points2).convex_hull # union_poly = np.concatenate((polygon_points1, polygon_points2)) # if not poly1.intersects(poly2): # this test is fast and can accelerate calculation # iou = 0 # else: # try: # inter_area = poly1.intersection(poly2).area # #union_area = poly1.area + poly2.area - inter_area # union_area = MultiPoint(union_poly).convex_hull.area # if union_area == 0: # return 0 # iou = float(inter_area) / union_area # except shapely.geos.TopologicalError: # print('shapely.geos.TopologicalError occured, iou set to 0') # iou = 0 # return iou # TODO redundant, remove def _cat(tensors, dim=0): """ Efficient version of torch.cat avoids a copy if there is only a single element in a list """ assert isinstance(tensors, (list, tuple)) if len(tensors) == 1: return tensors[0] return torch.cat(tensors, dim) def _cat_mask(masks): polygons_cat = [] size = masks[0].size for mask in masks: polygons = mask.get_polygons() polygons_cat.extend(polygons) masks_cat = SegmentationMask(polygons_cat, size) return masks_cat def cat_boxlist(bboxes): """ Concatenates a list of BoxList (having the same image size) into a single BoxList Arguments: bboxes (list[BoxList]) """ # if bboxes is None: # return None # if bboxes[0] is None: # bboxes = [bboxes[1] assert isinstance(bboxes, (list, tuple)) assert all(isinstance(bbox, BoxList) for bbox in bboxes) size = bboxes[0].size assert all(bbox.size == size for bbox in bboxes) mode = bboxes[0].mode assert all(bbox.mode == mode for bbox in bboxes) fields = set(bboxes[0].fields()) assert all(set(bbox.fields()) == fields for bbox in bboxes) cat_boxes = BoxList(_cat([bbox.bbox for bbox in bboxes], dim=0), size, mode) for field in fields: if field == 'masks': data = _cat_mask([bbox.get_field(field) for bbox in bboxes]) else: data = _cat([bbox.get_field(field) for bbox in bboxes], dim=0) cat_boxes.add_field(field, data) return cat_boxes def cat_boxlist_gt(bboxes): """ Concatenates a list of BoxList (having the same image size) into a single BoxList Arguments: bboxes (list[BoxList]) """ assert isinstance(bboxes, (list, tuple)) assert all(isinstance(bbox, BoxList) for bbox in bboxes) size = bboxes[0].size # bboxes[1].set_size(size) assert all(bbox.size == size for bbox in bboxes) mode = bboxes[0].mode assert all(bbox.mode == mode for bbox in bboxes) fields = set(bboxes[0].fields()) assert all(set(bbox.fields()) == fields for bbox in bboxes) if bboxes[0].bbox.sum().item() == 0: cat_boxes = BoxList(bboxes[1].bbox, size, mode) else: cat_boxes = BoxList(_cat([bbox.bbox for bbox in bboxes], dim=0), size, mode) for field in fields: if bboxes[0].bbox.sum().item() == 0: if field == 'masks': data = _cat_mask([bbox.get_field(field) for bbox in bboxes[1:]]) else: data = _cat([bbox.get_field(field) for bbox in bboxes[1:]], dim=0) else: if field == 'masks': data = _cat_mask([bbox.get_field(field) for bbox in bboxes]) else: data = _cat([bbox.get_field(field) for bbox in bboxes], dim=0) cat_boxes.add_field(field, data) return cat_boxes
7,393
30.46383
90
py
MaskTextSpotterV3
MaskTextSpotterV3-master/maskrcnn_benchmark/structures/__init__.py
0
0
0
py
MaskTextSpotterV3
MaskTextSpotterV3-master/evaluation/weighted_editdistance.py
def weighted_edit_distance(word1, word2, scores): m = len(word1) n = len(word2) dp = [[0 for __ in range(m + 1)] for __ in range(n + 1)] for j in range(m + 1): dp[0][j] = j for i in range(n + 1): dp[i][0] = i for i in range(1, n + 1): ## word2 for j in range(1, m + 1): ## word1 delect_cost = ed_delect_cost(j-1, i-1, word1, word2, scores) ## delect a[i] insert_cost = ed_insert_cost(j-1, i-1, word1, word2, scores) ## insert b[j] if word1[j - 1] != word2[i - 1]: replace_cost = ed_replace_cost(j-1, i-1, word1, word2, scores) ## replace a[i] with b[j] else: replace_cost = 0 dp[i][j] = min(dp[i-1][j] + insert_cost, dp[i][j-1] + delect_cost, dp[i-1][j-1] + replace_cost) return dp[n][m] def ed_delect_cost(j, i, word1, word2, scores): ## delect a[i] c = char2num(word1[j]) return scores[c][j] def ed_insert_cost(i, j, word1, word2, scores): ## insert b[j] if i < len(word1) - 1: c1 = char2num(word1[i]) c2 = char2num(word1[i + 1]) return (scores[c1][i] + scores[c2][i+1])/2 else: c1 = char2num(word1[i]) return scores[c1][i] def ed_replace_cost(i, j, word1, word2, scores): ## replace a[i] with b[j] c1 = char2num(word1[i]) c2 = char2num(word2[j]) # if word1 == "eeatpisaababarait".upper(): # print(scores[c2][i]/scores[c1][i]) return max(1 - scores[c2][i]/scores[c1][i]*5, 0) def char2num(char): if char in '0123456789': num = ord(char) - ord('0') + 1 elif char in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ': num = ord(char.lower()) - ord('a') + 11 else: print('error symbol', char) exit() return num - 1
1,813
31.981818
107
py
MaskTextSpotterV3
MaskTextSpotterV3-master/evaluation/rotated_icdar2013/e2e/rrc_evaluation_funcs.py
#!/usr/bin/env python2 #encoding: UTF-8 import json import sys;sys.path.append('./') import zipfile import re import sys import os import codecs import importlib try: from StringIO import StringIO except ImportError: from io import StringIO def print_help(): sys.stdout.write('Usage: python %s.py -g=<gtFile> -s=<submFile> [-o=<outputFolder> -p=<jsonParams>]' %sys.argv[0]) sys.exit(2) def load_zip_file_keys(file,fileNameRegExp=''): """ Returns an array with the entries of the ZIP file that match with the regular expression. The key's are the names or the file or the capturing group definied in the fileNameRegExp """ try: archive=zipfile.ZipFile(file, mode='r', allowZip64=True) except : raise Exception('Error loading the ZIP archive.') pairs = [] for name in archive.namelist(): addFile = True keyName = name if fileNameRegExp!="": m = re.match(fileNameRegExp,name) if m == None: addFile = False else: if len(m.groups())>0: keyName = m.group(1) if addFile: pairs.append( keyName ) return pairs def load_zip_file(file,fileNameRegExp='',allEntries=False): """ Returns an array with the contents (filtered by fileNameRegExp) of a ZIP file. The key's are the names or the file or the capturing group definied in the fileNameRegExp allEntries validates that all entries in the ZIP file pass the fileNameRegExp """ try: archive=zipfile.ZipFile(file, mode='r', allowZip64=True) except : raise Exception('Error loading the ZIP archive') pairs = [] for name in archive.namelist(): addFile = True keyName = name if fileNameRegExp!="": m = re.match(fileNameRegExp,name) if m == None: addFile = False else: if len(m.groups())>0: keyName = m.group(1) if addFile: pairs.append( [ keyName , archive.read(name)] ) else: if allEntries: raise Exception('ZIP entry not valid: %s' %name) return dict(pairs) def decode_utf8(raw): """ Returns a Unicode object on success, or None on failure """ try: raw = codecs.decode(raw,'utf-8', 'replace') #extracts BOM if exists raw = raw.encode('utf8') if raw.startswith(codecs.BOM_UTF8): raw = raw.replace(codecs.BOM_UTF8, '', 1) return raw.decode('utf-8') except: return None def validate_lines_in_file(fileName,file_contents,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0): """ This function validates that all lines of the file calling the Line validation function for each line """ utf8File = decode_utf8(file_contents) if (utf8File is None) : raise Exception("The file %s is not UTF-8" %fileName) lines = utf8File.split( "\r\n" if CRLF else "\n" ) for line in lines: line = line.replace("\r","").replace("\n","") if(line != ""): try: validate_tl_line(line,LTRB,withTranscription,withConfidence,imWidth,imHeight) except Exception as e: raise Exception(("Line in sample not valid. Sample: %s Line: %s Error: %s" %(fileName,line,str(e))).encode('utf-8', 'replace')) def validate_tl_line(line,LTRB=True,withTranscription=True,withConfidence=True,imWidth=0,imHeight=0): """ Validate the format of the line. If the line is not valid an exception will be raised. If maxWidth and maxHeight are specified, all points must be inside the imgage bounds. Posible values are: LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription] LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription] """ get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight) def get_tl_line_values(line,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0): """ Validate the format of the line. If the line is not valid an exception will be raised. If maxWidth and maxHeight are specified, all points must be inside the imgage bounds. Posible values are: LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription] LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription] Returns values from a textline. Points , [Confidences], [Transcriptions] """ confidence = 0.0 transcription = ""; points = [] numPoints = 4; if LTRB: numPoints = 4; if withTranscription and withConfidence: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line) if m == None : m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line) raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence,transcription") elif withConfidence: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line) if m == None : raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence") elif withTranscription: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,(.*)$',line) if m == None : raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,transcription") else: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,?\s*$',line) if m == None : raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax") xmin = int(m.group(1)) ymin = int(m.group(2)) xmax = int(m.group(3)) ymax = int(m.group(4)) if(xmax<xmin): raise Exception("Xmax value (%s) not valid (Xmax < Xmin)." %(xmax)) if(ymax<ymin): raise Exception("Ymax value (%s) not valid (Ymax < Ymin)." %(ymax)) points = [ float(m.group(i)) for i in range(1, (numPoints+1) ) ] if (imWidth>0 and imHeight>0): validate_point_inside_bounds(xmin,ymin,imWidth,imHeight); validate_point_inside_bounds(xmax,ymax,imWidth,imHeight); else: numPoints = 8; if withTranscription and withConfidence: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line) if m == None : raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence,transcription") elif withConfidence: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line) if m == None : raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence") elif withTranscription: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,(.*)$',line) if m == None : raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,transcription") else: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*$',line) if m == None : raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4") points = [ float(m.group(i)) for i in range(1, (numPoints+1) ) ] validate_clockwise_points(points) if (imWidth>0 and imHeight>0): validate_point_inside_bounds(points[0],points[1],imWidth,imHeight); validate_point_inside_bounds(points[2],points[3],imWidth,imHeight); validate_point_inside_bounds(points[4],points[5],imWidth,imHeight); validate_point_inside_bounds(points[6],points[7],imWidth,imHeight); if withConfidence: try: confidence = float(m.group(numPoints+1)) except ValueError: raise Exception("Confidence value must be a float") if withTranscription: posTranscription = numPoints + (2 if withConfidence else 1) transcription = m.group(posTranscription) m2 = re.match(r'^\s*\"(.*)\"\s*$',transcription) if m2 != None : #Transcription with double quotes, we extract the value and replace escaped characters transcription = m2.group(1).replace("\\\\", "\\").replace("\\\"", "\"") return points,confidence,transcription def validate_point_inside_bounds(x,y,imWidth,imHeight): if(x<0 or x>imWidth): raise Exception("X value (%s) not valid. Image dimensions: (%s,%s)" %(xmin,imWidth,imHeight)) if(y<0 or y>imHeight): raise Exception("Y value (%s) not valid. Image dimensions: (%s,%s) Sample: %s Line:%s" %(ymin,imWidth,imHeight)) def validate_clockwise_points(points): """ Validates that the points that the 4 points that dlimite a polygon are in clockwise order. """ if len(points) != 8: raise Exception("Points list not valid." + str(len(points))) point = [ [int(points[0]) , int(points[1])], [int(points[2]) , int(points[3])], [int(points[4]) , int(points[5])], [int(points[6]) , int(points[7])] ] edge = [ ( point[1][0] - point[0][0])*( point[1][1] + point[0][1]), ( point[2][0] - point[1][0])*( point[2][1] + point[1][1]), ( point[3][0] - point[2][0])*( point[3][1] + point[2][1]), ( point[0][0] - point[3][0])*( point[0][1] + point[3][1]) ] summatory = edge[0] + edge[1] + edge[2] + edge[3]; if summatory>0: raise Exception("Points are not clockwise. The coordinates of bounding quadrilaterals have to be given in clockwise order. Regarding the correct interpretation of 'clockwise' remember that the image coordinate system used is the standard one, with the image origin at the upper left, the X axis extending to the right and Y axis extending downwards.") def get_tl_line_values_from_file_contents(content,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0,sort_by_confidences=True): """ Returns all points, confindences and transcriptions of a file in lists. Valid line formats: xmin,ymin,xmax,ymax,[confidence],[transcription] x1,y1,x2,y2,x3,y3,x4,y4,[confidence],[transcription] """ pointsList = [] transcriptionsList = [] confidencesList = [] lines = content.split( "\r\n" if CRLF else "\n" ) for line in lines: line = line.replace("\r","").replace("\n","") if(line != "") : points, confidence, transcription = get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight); pointsList.append(points) transcriptionsList.append(transcription) confidencesList.append(confidence) if withConfidence and len(confidencesList)>0 and sort_by_confidences: import numpy as np sorted_ind = np.argsort(-np.array(confidencesList)) confidencesList = [confidencesList[i] for i in sorted_ind] pointsList = [pointsList[i] for i in sorted_ind] transcriptionsList = [transcriptionsList[i] for i in sorted_ind] return pointsList,confidencesList,transcriptionsList def main_evaluation(p,default_evaluation_params_fn,validate_data_fn,evaluate_method_fn,show_result=True,per_sample=True): """ This process validates a method, evaluates it and if it succed generates a ZIP file with a JSON entry for each sample. Params: p: Dictionary of parmeters with the GT/submission locations. If None is passed, the parameters send by the system are used. default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation validate_data_fn: points to a method that validates the corrct format of the submission evaluate_method_fn: points to a function that evaluated the submission and return a Dictionary with the results """ if (p == None): p = dict([s[1:].split('=') for s in sys.argv[1:]]) if(len(sys.argv)<3): print_help() evalParams = default_evaluation_params_fn() if 'p' in p.keys(): evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p'][1:-1]) ) resDict={'calculated':True,'Message':'','method':'{}','per_sample':'{}'} try: validate_data_fn(p['g'], p['s'], evalParams) evalData = evaluate_method_fn(p['g'], p['s'], evalParams) resDict.update(evalData) except Exception as e: resDict['Message']= str(e) resDict['calculated']=False if 'o' in p: if not os.path.exists(p['o']): os.makedirs(p['o']) resultsOutputname = p['o'] + '/results.zip' outZip = zipfile.ZipFile(resultsOutputname, mode='w', allowZip64=True) del resDict['per_sample'] if 'output_items' in resDict.keys(): del resDict['output_items'] outZip.writestr('method.json',json.dumps(resDict)) if not resDict['calculated']: if show_result: sys.stderr.write('Error!\n'+ resDict['Message']+'\n\n') if 'o' in p: outZip.close() return resDict if 'o' in p: if per_sample == True: for k,v in evalData['per_sample'].items(): outZip.writestr( k + '.json',json.dumps(v)) if 'output_items' in evalData.keys(): for k, v in evalData['output_items'].items(): outZip.writestr( k,v) outZip.close() if show_result: sys.stdout.write("Calculated!") sys.stdout.write(json.dumps(resDict['method'])) return resDict def main_validation(default_evaluation_params_fn,validate_data_fn): """ This process validates a method Params: default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation validate_data_fn: points to a method that validates the corrct format of the submission """ try: p = dict([s[1:].split('=') for s in sys.argv[1:]]) evalParams = default_evaluation_params_fn() if 'p' in p.keys(): evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p'][1:-1]) ) validate_data_fn(p['g'], p['s'], evalParams) print('SUCCESS') sys.exit(0) except Exception as e: print(str(e)) sys.exit(101)
15,410
40.764228
359
py
MaskTextSpotterV3
MaskTextSpotterV3-master/evaluation/rotated_icdar2013/e2e/script.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # encoding=utf8 from collections import namedtuple import rrc_evaluation_funcs import importlib from prepare_results import prepare_results_for_evaluation def evaluation_imports(): """ evaluation_imports: Dictionary ( key = module name , value = alias ) with python modules used in the evaluation. """ return { 'Polygon':'plg', 'numpy':'np' } def default_evaluation_params(): """ default_evaluation_params: Default parameters to use for the validation and evaluation. """ return { 'IOU_CONSTRAINT' :0.5, 'AREA_PRECISION_CONSTRAINT' :0.5, 'WORD_SPOTTING' :False, 'MIN_LENGTH_CARE_WORD' :3, 'GT_SAMPLE_NAME_2_ID':'gt_img_([0-9]+).txt', 'DET_SAMPLE_NAME_2_ID':'res_img_([0-9]+).txt', 'LTRB':False, #LTRB:2points(left,top,right,bottom) or 4 points(x1,y1,x2,y2,x3,y3,x4,y4) 'CRLF':False, # Lines are delimited by Windows CRLF format 'CONFIDENCES':False, #Detections must include confidence value. MAP and MAR will be calculated, 'SPECIAL_CHARACTERS':'!?.:,*"()·[]/\'', 'ONLY_REMOVE_FIRST_LAST_CHARACTER' : True } def validate_data(gtFilePath, submFilePath, evaluationParams): """ Method validate_data: validates that all files in the results folder are correct (have the correct name contents). Validates also that there are no missing files in the folder. If some error detected, the method raises the error """ gt = rrc_evaluation_funcs.load_zip_file(gtFilePath, evaluationParams['GT_SAMPLE_NAME_2_ID']) subm = rrc_evaluation_funcs.load_zip_file(submFilePath, evaluationParams['DET_SAMPLE_NAME_2_ID'], True) #Validate format of GroundTruth for k in gt: rrc_evaluation_funcs.validate_lines_in_file(k,gt[k],evaluationParams['CRLF'],evaluationParams['LTRB'],True) #Validate format of results for k in subm: if (k in gt) == False : raise Exception("The sample %s not present in GT" %k) rrc_evaluation_funcs.validate_lines_in_file(k,subm[k],evaluationParams['CRLF'],evaluationParams['LTRB'],True,evaluationParams['CONFIDENCES']) def evaluate_method(gtFilePath, submFilePath, evaluationParams): """ Method evaluate_method: evaluate method and returns the results Results. Dictionary with the following values: - method (required) Global method metrics. Ex: { 'Precision':0.8,'Recall':0.9 } - samples (optional) Per sample metrics. Ex: {'sample1' : { 'Precision':0.8,'Recall':0.9 } , 'sample2' : { 'Precision':0.8,'Recall':0.9 } """ for module,alias in evaluation_imports().items(): globals()[alias] = importlib.import_module(module) def polygon_from_points(points,correctOffset=False): """ Returns a Polygon object to use with the Polygon2 class from a list of 8 points: x1,y1,x2,y2,x3,y3,x4,y4 """ if correctOffset: #this will substract 1 from the coordinates that correspond to the xmax and ymax points[2] -= 1 points[4] -= 1 points[5] -= 1 points[7] -= 1 resBoxes=np.empty([1,8],dtype='int32') resBoxes[0,0]=int(points[0]) resBoxes[0,4]=int(points[1]) resBoxes[0,1]=int(points[2]) resBoxes[0,5]=int(points[3]) resBoxes[0,2]=int(points[4]) resBoxes[0,6]=int(points[5]) resBoxes[0,3]=int(points[6]) resBoxes[0,7]=int(points[7]) pointMat = resBoxes[0].reshape([2,4]).T return plg.Polygon( pointMat) def rectangle_to_polygon(rect): resBoxes=np.empty([1,8],dtype='int32') resBoxes[0,0]=int(rect.xmin) resBoxes[0,4]=int(rect.ymax) resBoxes[0,1]=int(rect.xmin) resBoxes[0,5]=int(rect.ymin) resBoxes[0,2]=int(rect.xmax) resBoxes[0,6]=int(rect.ymin) resBoxes[0,3]=int(rect.xmax) resBoxes[0,7]=int(rect.ymax) pointMat = resBoxes[0].reshape([2,4]).T return plg.Polygon( pointMat) def rectangle_to_points(rect): points = [int(rect.xmin), int(rect.ymax), int(rect.xmax), int(rect.ymax), int(rect.xmax), int(rect.ymin), int(rect.xmin), int(rect.ymin)] return points def get_union(pD,pG): areaA = pD.area(); areaB = pG.area(); return areaA + areaB - get_intersection(pD, pG); def get_intersection_over_union(pD,pG): try: return get_intersection(pD, pG) / get_union(pD, pG); except: return 0 def get_intersection(pD,pG): pInt = pD & pG if len(pInt) == 0: return 0 return pInt.area() def compute_ap(confList, matchList,numGtCare): correct = 0 AP = 0 if len(confList)>0: confList = np.array(confList) matchList = np.array(matchList) sorted_ind = np.argsort(-confList) confList = confList[sorted_ind] matchList = matchList[sorted_ind] for n in range(len(confList)): match = matchList[n] if match: correct += 1 AP += float(correct)/(n + 1) if numGtCare>0: AP /= numGtCare return AP def transcription_match(transGt,transDet,specialCharacters='!?.:,*"()·[]/\'',onlyRemoveFirstLastCharacterGT=True): if onlyRemoveFirstLastCharacterGT: #special characters in GT are allowed only at initial or final position if (transGt==transDet): return True if specialCharacters.find(transGt[0])>-1: if transGt[1:]==transDet: return True if specialCharacters.find(transGt[-1])>-1: if transGt[0:len(transGt)-1]==transDet: return True if specialCharacters.find(transGt[0])>-1 and specialCharacters.find(transGt[-1])>-1: if transGt[1:len(transGt)-1]==transDet: return True return False else: #Special characters are removed from the begining and the end of both Detection and GroundTruth while len(transGt)>0 and specialCharacters.find(transGt[0])>-1: transGt = transGt[1:] while len(transDet)>0 and specialCharacters.find(transDet[0])>-1: transDet = transDet[1:] while len(transGt)>0 and specialCharacters.find(transGt[-1])>-1 : transGt = transGt[0:len(transGt)-1] while len(transDet)>0 and specialCharacters.find(transDet[-1])>-1: transDet = transDet[0:len(transDet)-1] return transGt == transDet def include_in_dictionary(transcription): """ Function used in Word Spotting that finds if the Ground Truth transcription meets the rules to enter into the dictionary. If not, the transcription will be cared as don't care """ #special case 's at final if transcription[len(transcription)-2:]=="'s" or transcription[len(transcription)-2:]=="'S": transcription = transcription[0:len(transcription)-2] #hypens at init or final of the word transcription = transcription.strip('-'); specialCharacters = "'!?.:,*\"()·[]/"; for character in specialCharacters: transcription = transcription.replace(character,' ') transcription = transcription.strip() if len(transcription) != len(transcription.replace(" ","")) : return False; if len(transcription) < evaluationParams['MIN_LENGTH_CARE_WORD']: return False; notAllowed = "×÷·"; range1 = [ ord(u'a'), ord(u'z') ] range2 = [ ord(u'A'), ord(u'Z') ] range3 = [ ord(u'À'), ord(u'ƿ') ] range4 = [ ord(u'DŽ'), ord(u'ɿ') ] range5 = [ ord(u'Ά'), ord(u'Ͽ') ] range6 = [ ord(u'-'), ord(u'-') ] for char in transcription : charCode = ord(char) if(notAllowed.find(char) != -1): return False valid = ( charCode>=range1[0] and charCode<=range1[1] ) or ( charCode>=range2[0] and charCode<=range2[1] ) or ( charCode>=range3[0] and charCode<=range3[1] ) or ( charCode>=range4[0] and charCode<=range4[1] ) or ( charCode>=range5[0] and charCode<=range5[1] ) or ( charCode>=range6[0] and charCode<=range6[1] ) if valid == False: return False return True def include_in_dictionary_transcription(transcription): """ Function applied to the Ground Truth transcriptions used in Word Spotting. It removes special characters or terminations """ #special case 's at final if transcription[len(transcription)-2:]=="'s" or transcription[len(transcription)-2:]=="'S": transcription = transcription[0:len(transcription)-2] #hypens at init or final of the word transcription = transcription.strip('-'); specialCharacters = "'!?.:,*\"()·[]/"; for character in specialCharacters: transcription = transcription.replace(character,' ') transcription = transcription.strip() return transcription perSampleMetrics = {} matchedSum = 0 Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax') gt = rrc_evaluation_funcs.load_zip_file(gtFilePath,evaluationParams['GT_SAMPLE_NAME_2_ID']) subm = rrc_evaluation_funcs.load_zip_file(submFilePath,evaluationParams['DET_SAMPLE_NAME_2_ID'],True) numGlobalCareGt = 0; numGlobalCareDet = 0; arrGlobalConfidences = []; arrGlobalMatches = []; for resFile in gt: gtFile = rrc_evaluation_funcs.decode_utf8(gt[resFile]) if (gtFile is None) : raise Exception("The file %s is not UTF-8" %resFile) recall = 0 precision = 0 hmean = 0 detCorrect = 0 iouMat = np.empty([1,1]) gtPols = [] detPols = [] gtTrans = [] detTrans = [] gtPolPoints = [] detPolPoints = [] gtDontCarePolsNum = [] #Array of Ground Truth Polygons' keys marked as don't Care detDontCarePolsNum = [] #Array of Detected Polygons' matched with a don't Care GT detMatchedNums = [] pairs = [] arrSampleConfidences = []; arrSampleMatch = []; sampleAP = 0; evaluationLog = "" pointsList,_,transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(gtFile,evaluationParams['CRLF'],evaluationParams['LTRB'],True,False) for n in range(len(pointsList)): points = pointsList[n] transcription = transcriptionsList[n] dontCare = transcription == "###" if evaluationParams['LTRB']: gtRect = Rectangle(*points) gtPol = rectangle_to_polygon(gtRect) else: gtPol = polygon_from_points(points) gtPols.append(gtPol) gtPolPoints.append(points) #On word spotting we will filter some transcriptions with special characters if evaluationParams['WORD_SPOTTING'] : if dontCare == False : if include_in_dictionary(transcription) == False : dontCare = True else: transcription = include_in_dictionary_transcription(transcription) gtTrans.append(transcription) if dontCare: gtDontCarePolsNum.append( len(gtPols)-1 ) evaluationLog += "GT polygons: " + str(len(gtPols)) + (" (" + str(len(gtDontCarePolsNum)) + " don't care)\n" if len(gtDontCarePolsNum)>0 else "\n") if resFile in subm: detFile = rrc_evaluation_funcs.decode_utf8(subm[resFile]) pointsList,confidencesList,transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(detFile,evaluationParams['CRLF'],evaluationParams['LTRB'],True,evaluationParams['CONFIDENCES']) for n in range(len(pointsList)): points = pointsList[n] transcription = transcriptionsList[n] if evaluationParams['LTRB']: detRect = Rectangle(*points) detPol = rectangle_to_polygon(detRect) else: detPol = polygon_from_points(points) detPols.append(detPol) detPolPoints.append(points) detTrans.append(transcription) if len(gtDontCarePolsNum)>0 : for dontCarePol in gtDontCarePolsNum: dontCarePol = gtPols[dontCarePol] intersected_area = get_intersection(dontCarePol,detPol) pdDimensions = detPol.area() precision = 0 if pdDimensions == 0 else intersected_area / pdDimensions if (precision > evaluationParams['AREA_PRECISION_CONSTRAINT'] ): detDontCarePolsNum.append( len(detPols)-1 ) break evaluationLog += "DET polygons: " + str(len(detPols)) + (" (" + str(len(detDontCarePolsNum)) + " don't care)\n" if len(detDontCarePolsNum)>0 else "\n") if len(gtPols)>0 and len(detPols)>0: #Calculate IoU and precision matrixs outputShape=[len(gtPols),len(detPols)] iouMat = np.empty(outputShape) gtRectMat = np.zeros(len(gtPols),np.int8) detRectMat = np.zeros(len(detPols),np.int8) for gtNum in range(len(gtPols)): for detNum in range(len(detPols)): pG = gtPols[gtNum] pD = detPols[detNum] iouMat[gtNum,detNum] = get_intersection_over_union(pD,pG) for gtNum in range(len(gtPols)): for detNum in range(len(detPols)): if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCarePolsNum and detNum not in detDontCarePolsNum : if iouMat[gtNum,detNum]>evaluationParams['IOU_CONSTRAINT']: gtRectMat[gtNum] = 1 detRectMat[detNum] = 1 #detection matched only if transcription is equal if evaluationParams['WORD_SPOTTING']: correct = gtTrans[gtNum].upper() == detTrans[detNum].upper() else: correct = transcription_match(gtTrans[gtNum].upper(),detTrans[detNum].upper(),evaluationParams['SPECIAL_CHARACTERS'],evaluationParams['ONLY_REMOVE_FIRST_LAST_CHARACTER'])==True detCorrect += (1 if correct else 0) if correct: detMatchedNums.append(detNum) pairs.append({'gt':gtNum,'det':detNum,'correct':correct}) evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(detNum) + " trans. correct: " + str(correct) + "\n" if evaluationParams['CONFIDENCES']: for detNum in range(len(detPols)): if detNum not in detDontCarePolsNum : #we exclude the don't care detections match = detNum in detMatchedNums arrSampleConfidences.append(confidencesList[detNum]) arrSampleMatch.append(match) arrGlobalConfidences.append(confidencesList[detNum]); arrGlobalMatches.append(match); numGtCare = (len(gtPols) - len(gtDontCarePolsNum)) numDetCare = (len(detPols) - len(detDontCarePolsNum)) if numGtCare == 0: recall = float(1) precision = float(0) if numDetCare >0 else float(1) sampleAP = precision else: recall = float(detCorrect) / numGtCare precision = 0 if numDetCare==0 else float(detCorrect) / numDetCare if evaluationParams['CONFIDENCES']: sampleAP = compute_ap(arrSampleConfidences, arrSampleMatch, numGtCare ) hmean = 0 if (precision + recall)==0 else 2.0 * precision * recall / (precision + recall) matchedSum += detCorrect numGlobalCareGt += numGtCare numGlobalCareDet += numDetCare perSampleMetrics[resFile] = { 'precision':precision, 'recall':recall, 'hmean':hmean, 'pairs':pairs, 'AP':sampleAP, 'iouMat':[] if len(detPols)>100 else iouMat.tolist(), 'gtPolPoints':gtPolPoints, 'detPolPoints':detPolPoints, 'gtTrans':gtTrans, 'detTrans':detTrans, 'gtDontCare':gtDontCarePolsNum, 'detDontCare':detDontCarePolsNum, 'evaluationParams': evaluationParams, 'evaluationLog': evaluationLog } # Compute AP AP = 0 if evaluationParams['CONFIDENCES']: AP = compute_ap(arrGlobalConfidences, arrGlobalMatches, numGlobalCareGt) methodRecall = 0 if numGlobalCareGt == 0 else float(matchedSum)/numGlobalCareGt methodPrecision = 0 if numGlobalCareDet == 0 else float(matchedSum)/numGlobalCareDet methodHmean = 0 if methodRecall + methodPrecision==0 else 2* methodRecall * methodPrecision / (methodRecall + methodPrecision) methodMetrics = {'precision':methodPrecision, 'recall':methodRecall,'hmean': methodHmean, 'AP': AP } resDict = {'calculated':True,'Message':'','method': methodMetrics,'per_sample': perSampleMetrics} return resDict; if __name__=='__main__': ''' results_dir: result directory score_det: score of detection bounding box score_rec: score of the mask recognition branch score_rec_seq: score of the sequence recognition branch lexicon_type: 1 for generic; 2 for weak; 3 for strong ''' angle = 45 results_dir = '../../../output/mixtrain/inference/rotated_ic13_test_' + str(angle) + '/model_0250000_1000_results/' score_rec_seq = 0.9 score_rec = 0.4 score_det = 0.1 evaluate_result_path = prepare_results_for_evaluation(results_dir, use_lexicon=False, cache_dir='./cache_files', score_det=score_det, score_rec=score_rec, score_rec_seq=score_rec_seq) p = { 'g': '../gt/gt_'+str(angle)+'.zip', 's': evaluate_result_path } rrc_evaluation_funcs.main_evaluation(p,default_evaluation_params,validate_data,evaluate_method)
20,045
42.578261
322
py
MaskTextSpotterV3
MaskTextSpotterV3-master/evaluation/rotated_icdar2013/e2e/prepare_results.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import sys import os sys.path.append('./') import shapely from shapely.geometry import Polygon,MultiPoint import numpy as np import editdistance sys.path.append('../../') from weighted_editdistance import weighted_edit_distance from tqdm import tqdm try: import pickle except ImportError: import cPickle as pickle def list_from_str(st): line = st.split(',') # box[0:4], polygon[4:12], word, seq_word, detection_score, rec_socre, seq_score, char_score_path new_line = [float(a) for a in line[4:12]]+[float(line[-4])]+[line[-5]]+[line[-6]]+[float(line[-3])]+[float(line[-2])] + [line[-1]] return new_line def polygon_from_list(line): """ Create a shapely polygon object from gt or dt line. """ polygon_points = np.array(line).reshape(4, 2) polygon = Polygon(polygon_points).convex_hull return polygon def polygon_iou(list1, list2): """ Intersection over union between two shapely polygons. """ polygon_points1 = np.array(list1).reshape(4, 2) poly1 = Polygon(polygon_points1).convex_hull polygon_points2 = np.array(list2).reshape(4, 2) poly2 = Polygon(polygon_points2).convex_hull union_poly = np.concatenate((polygon_points1,polygon_points2)) if not poly1.intersects(poly2): # this test is fast and can accelerate calculation iou = 0 else: try: inter_area = poly1.intersection(poly2).area #union_area = poly1.area + poly2.area - inter_area union_area = MultiPoint(union_poly).convex_hull.area iou = float(inter_area) / (union_area+1e-6) except shapely.geos.TopologicalError: print('shapely.geos.TopologicalError occured, iou set to 0') iou = 0 return iou def nms(boxes,overlap): rec_scores = [b[-2] for b in boxes] indices = sorted(range(len(rec_scores)), key=lambda k: -rec_scores[k]) box_num = len(boxes) nms_flag = [True]*box_num for i in range(box_num): ii = indices[i] if not nms_flag[ii]: continue for j in range(box_num): jj = indices[j] if j == i: continue if not nms_flag[jj]: continue box1 = boxes[ii] box2 = boxes[jj] box1_score = rec_scores[ii] box2_score = rec_scores[jj] str1 = box1[9] str2 = box2[9] box_i = [box1[0],box1[1],box1[4],box1[5]] box_j = [box2[0],box2[1],box2[4],box2[5]] poly1 = polygon_from_list(box1[0:8]) poly2 = polygon_from_list(box2[0:8]) iou = polygon_iou(box1[0:8],box2[0:8]) thresh = overlap if iou > thresh: if box1_score > box2_score: nms_flag[jj] = False if box1_score == box2_score and poly1.area > poly2.area: nms_flag[jj] = False if box1_score == box2_score and poly1.area<=poly2.area: nms_flag[ii] = False break return nms_flag def packing(save_dir, cache_dir, pack_name): files = os.listdir(save_dir) if not os.path.exists(cache_dir): os.mkdir(cache_dir) os.system('zip -r -q -j '+os.path.join(cache_dir, pack_name+'.zip')+' '+save_dir+'/*') def test_single(results_dir,lexicon_type=3,cache_dir='./cache_dir',score_det=0.5,score_rec=0.5,score_rec_seq=0.5,overlap=0.2, use_lexicon=True, weighted_ed=True, use_seq=False, use_char=False, mix=False): ''' results_dir: result directory score_det: score of detection bounding box score_rec: score of the mask recognition branch socre_rec_seq: score of the sequence recognition branch overlap: overlap threshold used for nms lexicon_type: 1 for generic; 2 for weak; 3 for strong use_seq: use the recognition result of sequence branch use_mix: use both the recognition result of the mask and sequence branches, selected by score ''' print('score_det:', 'score_det:', score_det, 'score_rec:', score_rec, 'score_rec_seq:', score_rec_seq, 'lexicon_type:', lexicon_type, 'weighted_ed:', weighted_ed, 'use_seq:', use_seq, 'use_char:', use_char, 'mix:', mix) if not os.path.exists(cache_dir): os.mkdir(cache_dir) nms_dir = os.path.join(cache_dir,str(score_det)+'_'+str(score_rec)+'_'+str(score_rec_seq)) if not os.path.exists(nms_dir): os.mkdir(nms_dir) if lexicon_type==1: # generic lexicon lexicon_path = '../../lexicons/ic13/GenericVocabulary_new.txt' lexicon_fid=open(lexicon_path, 'r') pair_list = open('../../lexicons/ic13/GenericVocabulary_pair_list.txt', 'r') pairs = dict() for line in pair_list.readlines(): line=line.strip() word = line.split(' ')[0].upper() word_gt = line[len(word)+1:] pairs[word] = word_gt lexicon_fid=open(lexicon_path, 'r') lexicon=[] for line in lexicon_fid.readlines(): line=line.strip() lexicon.append(line) if lexicon_type==2: # weak lexicon lexicon_path = '../../lexicons/ic13/ch4_test_vocabulary_new.txt' lexicon_fid=open(lexicon_path, 'r') pair_list = open('../../lexicons/ic13/ch4_test_vocabulary_pair_list.txt', 'r') pairs = dict() for line in pair_list.readlines(): line=line.strip() word = line.split(' ')[0].upper() word_gt = line[len(word)+1:] pairs[word] = word_gt lexicon_fid=open(lexicon_path, 'r') lexicon=[] for line in lexicon_fid.readlines(): line=line.strip() lexicon.append(line) for i in tqdm(range(1,234)): img = 'img_'+str(i)+'.jpg' gt_img = 'gt_img_'+str(i)+'.txt' if lexicon_type==3: # weak lexicon_path = '../../lexicons/ic13/new_strong_lexicon/new_voc_img_' + str(i) + '.txt' lexicon_fid=open(lexicon_path, 'r') pair_list = open('../../lexicons/ic13/new_strong_lexicon/pair_voc_img_' + str(i) + '.txt', 'r') pairs = dict() for line in pair_list.readlines(): line=line.strip() word = line.split(' ')[0].upper() word_gt = line[len(word)+1:] pairs[word] = word_gt lexicon_fid=open(lexicon_path, 'r') lexicon=[] for line in lexicon_fid.readlines(): line=line.strip() lexicon.append(line) result_path = os.path.join(results_dir,'res_img_'+str(i)+'.txt') if os.path.isfile(result_path): with open(result_path,'r') as f: dt_lines = [a.strip() for a in f.readlines()] dt_lines = [list_from_str(dt) for dt in dt_lines] else: dt_lines = [] dt_lines = [dt for dt in dt_lines if dt[-2]>score_rec_seq and dt[-3]>score_rec and dt[-6]>score_det] nms_flag = nms(dt_lines,overlap) boxes = [] for k in range(len(dt_lines)): dt = dt_lines[k] if nms_flag[k]: if dt not in boxes: boxes.append(dt) with open(os.path.join(nms_dir,'res_img_'+str(i)+'.txt'),'w') as f: for g in boxes: gt_coors = [int(b) for b in g[0:8]] with open('../../../' + g[-1], "rb") as input_file: # with open(g[-1], "rb") as input_file: dict_scores = pickle.load(input_file) if use_char and use_seq: if g[-2]>g[-3]: word = g[-5] scores = dict_scores['seq_char_scores'][:,1:-1].swapaxes(0,1) else: word = g[-4] scores = dict_scores['seg_char_scores'] elif use_seq: word = g[-5] scores = dict_scores['seq_char_scores'][:,1:-1].swapaxes(0,1) else: word = g[-4] scores = dict_scores['seg_char_scores'] if not use_lexicon: match_word = word match_dist = 0. else: match_word, match_dist = find_match_word(word, lexicon, pairs, scores, use_lexicon, weighted_ed) if match_dist<1.5 or lexicon_type==1: gt_coor_strs = [str(a) for a in gt_coors]+ [match_word] f.write(','.join(gt_coor_strs)+'\r\n') pack_name = str(score_det)+'_'+str(score_rec)+'_over'+str(overlap) packing(nms_dir,cache_dir,pack_name) submit_file_path = os.path.join(cache_dir, pack_name+'.zip') return submit_file_path def find_match_word(rec_str, lexicon, pairs, scores_numpy, use_ed = True, weighted_ed = False): if not use_ed: return rec_str rec_str = rec_str.upper() dist_min = 100 dist_min_pre = 100 match_word = '' match_dist = 100 if not weighted_ed: for word in lexicon: word = word.upper() ed = editdistance.eval(rec_str, word) length_dist = abs(len(word) - len(rec_str)) # dist = ed + length_dist dist = ed if dist<dist_min: dist_min = dist match_word = pairs[word] match_dist = dist return match_word, match_dist else: small_lexicon_dict = dict() for word in lexicon: word = word.upper() ed = editdistance.eval(rec_str, word) small_lexicon_dict[word] = ed dist = ed if dist<dist_min_pre: dist_min_pre = dist small_lexicon = [] for word in small_lexicon_dict: if small_lexicon_dict[word]<=dist_min_pre+2: small_lexicon.append(word) for word in small_lexicon: word = word.upper() ed = weighted_edit_distance(rec_str, word, scores_numpy) dist = ed if dist<dist_min: dist_min = dist match_word = pairs[word] match_dist = dist return match_word, match_dist def prepare_results_for_evaluation(results_dir, use_lexicon, cache_dir, score_det, score_rec, score_rec_seq): if not os.path.isdir(cache_dir): os.mkdir(cache_dir) result_path = test_single(results_dir,score_det=score_det,score_rec=score_rec,score_rec_seq=score_rec_seq,overlap=0.2,cache_dir=cache_dir,lexicon_type=3, use_lexicon=use_lexicon, weighted_ed=True, use_seq=True, use_char=True, mix=True) return result_path
10,790
39.41573
239
py
MaskTextSpotterV3
MaskTextSpotterV3-master/evaluation/icdar2015/e2e/rrc_evaluation_funcs.py
#!/usr/bin/env python2 #encoding: UTF-8 import json import sys;sys.path.append('./') import zipfile import re import sys import os import codecs import importlib try: from StringIO import StringIO except ImportError: from io import StringIO def print_help(): sys.stdout.write('Usage: python %s.py -g=<gtFile> -s=<submFile> [-o=<outputFolder> -p=<jsonParams>]' %sys.argv[0]) sys.exit(2) def load_zip_file_keys(file,fileNameRegExp=''): """ Returns an array with the entries of the ZIP file that match with the regular expression. The key's are the names or the file or the capturing group definied in the fileNameRegExp """ try: archive=zipfile.ZipFile(file, mode='r', allowZip64=True) except : raise Exception('Error loading the ZIP archive.') pairs = [] for name in archive.namelist(): addFile = True keyName = name if fileNameRegExp!="": m = re.match(fileNameRegExp,name) if m == None: addFile = False else: if len(m.groups())>0: keyName = m.group(1) if addFile: pairs.append( keyName ) return pairs def load_zip_file(file,fileNameRegExp='',allEntries=False): """ Returns an array with the contents (filtered by fileNameRegExp) of a ZIP file. The key's are the names or the file or the capturing group definied in the fileNameRegExp allEntries validates that all entries in the ZIP file pass the fileNameRegExp """ try: archive=zipfile.ZipFile(file, mode='r', allowZip64=True) except : raise Exception('Error loading the ZIP archive') pairs = [] for name in archive.namelist(): addFile = True keyName = name if fileNameRegExp!="": m = re.match(fileNameRegExp,name) if m == None: addFile = False else: if len(m.groups())>0: keyName = m.group(1) if addFile: pairs.append( [ keyName , archive.read(name)] ) else: if allEntries: raise Exception('ZIP entry not valid: %s' %name) return dict(pairs) def decode_utf8(raw): """ Returns a Unicode object on success, or None on failure """ try: raw = codecs.decode(raw,'utf-8', 'replace') #extracts BOM if exists raw = raw.encode('utf8') if raw.startswith(codecs.BOM_UTF8): raw = raw.replace(codecs.BOM_UTF8, '', 1) return raw.decode('utf-8') except: return None def validate_lines_in_file(fileName,file_contents,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0): """ This function validates that all lines of the file calling the Line validation function for each line """ utf8File = decode_utf8(file_contents) if (utf8File is None) : raise Exception("The file %s is not UTF-8" %fileName) lines = utf8File.split( "\r\n" if CRLF else "\n" ) for line in lines: line = line.replace("\r","").replace("\n","") if(line != ""): try: validate_tl_line(line,LTRB,withTranscription,withConfidence,imWidth,imHeight) except Exception as e: raise Exception(("Line in sample not valid. Sample: %s Line: %s Error: %s" %(fileName,line,str(e))).encode('utf-8', 'replace')) def validate_tl_line(line,LTRB=True,withTranscription=True,withConfidence=True,imWidth=0,imHeight=0): """ Validate the format of the line. If the line is not valid an exception will be raised. If maxWidth and maxHeight are specified, all points must be inside the imgage bounds. Posible values are: LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription] LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription] """ get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight) def get_tl_line_values(line,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0): """ Validate the format of the line. If the line is not valid an exception will be raised. If maxWidth and maxHeight are specified, all points must be inside the imgage bounds. Posible values are: LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription] LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription] Returns values from a textline. Points , [Confidences], [Transcriptions] """ confidence = 0.0 transcription = ""; points = [] numPoints = 4; if LTRB: numPoints = 4; if withTranscription and withConfidence: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line) if m == None : m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line) raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence,transcription") elif withConfidence: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line) if m == None : raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence") elif withTranscription: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,(.*)$',line) if m == None : raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,transcription") else: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,?\s*$',line) if m == None : raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax") xmin = int(m.group(1)) ymin = int(m.group(2)) xmax = int(m.group(3)) ymax = int(m.group(4)) if(xmax<xmin): raise Exception("Xmax value (%s) not valid (Xmax < Xmin)." %(xmax)) if(ymax<ymin): raise Exception("Ymax value (%s) not valid (Ymax < Ymin)." %(ymax)) points = [ float(m.group(i)) for i in range(1, (numPoints+1) ) ] if (imWidth>0 and imHeight>0): validate_point_inside_bounds(xmin,ymin,imWidth,imHeight); validate_point_inside_bounds(xmax,ymax,imWidth,imHeight); else: numPoints = 8; if withTranscription and withConfidence: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line) if m == None : raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence,transcription") elif withConfidence: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line) if m == None : raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence") elif withTranscription: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,(.*)$',line) if m == None : raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,transcription") else: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*$',line) if m == None : raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4") points = [ float(m.group(i)) for i in range(1, (numPoints+1) ) ] validate_clockwise_points(points) if (imWidth>0 and imHeight>0): validate_point_inside_bounds(points[0],points[1],imWidth,imHeight); validate_point_inside_bounds(points[2],points[3],imWidth,imHeight); validate_point_inside_bounds(points[4],points[5],imWidth,imHeight); validate_point_inside_bounds(points[6],points[7],imWidth,imHeight); if withConfidence: try: confidence = float(m.group(numPoints+1)) except ValueError: raise Exception("Confidence value must be a float") if withTranscription: posTranscription = numPoints + (2 if withConfidence else 1) transcription = m.group(posTranscription) m2 = re.match(r'^\s*\"(.*)\"\s*$',transcription) if m2 != None : #Transcription with double quotes, we extract the value and replace escaped characters transcription = m2.group(1).replace("\\\\", "\\").replace("\\\"", "\"") return points,confidence,transcription def validate_point_inside_bounds(x,y,imWidth,imHeight): if(x<0 or x>imWidth): raise Exception("X value (%s) not valid. Image dimensions: (%s,%s)" %(xmin,imWidth,imHeight)) if(y<0 or y>imHeight): raise Exception("Y value (%s) not valid. Image dimensions: (%s,%s) Sample: %s Line:%s" %(ymin,imWidth,imHeight)) def validate_clockwise_points(points): """ Validates that the points that the 4 points that dlimite a polygon are in clockwise order. """ if len(points) != 8: raise Exception("Points list not valid." + str(len(points))) point = [ [int(points[0]) , int(points[1])], [int(points[2]) , int(points[3])], [int(points[4]) , int(points[5])], [int(points[6]) , int(points[7])] ] edge = [ ( point[1][0] - point[0][0])*( point[1][1] + point[0][1]), ( point[2][0] - point[1][0])*( point[2][1] + point[1][1]), ( point[3][0] - point[2][0])*( point[3][1] + point[2][1]), ( point[0][0] - point[3][0])*( point[0][1] + point[3][1]) ] summatory = edge[0] + edge[1] + edge[2] + edge[3]; if summatory>0: raise Exception("Points are not clockwise. The coordinates of bounding quadrilaterals have to be given in clockwise order. Regarding the correct interpretation of 'clockwise' remember that the image coordinate system used is the standard one, with the image origin at the upper left, the X axis extending to the right and Y axis extending downwards.") def get_tl_line_values_from_file_contents(content,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0,sort_by_confidences=True): """ Returns all points, confindences and transcriptions of a file in lists. Valid line formats: xmin,ymin,xmax,ymax,[confidence],[transcription] x1,y1,x2,y2,x3,y3,x4,y4,[confidence],[transcription] """ pointsList = [] transcriptionsList = [] confidencesList = [] lines = content.split( "\r\n" if CRLF else "\n" ) for line in lines: line = line.replace("\r","").replace("\n","") if(line != "") : points, confidence, transcription = get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight); pointsList.append(points) transcriptionsList.append(transcription) confidencesList.append(confidence) if withConfidence and len(confidencesList)>0 and sort_by_confidences: import numpy as np sorted_ind = np.argsort(-np.array(confidencesList)) confidencesList = [confidencesList[i] for i in sorted_ind] pointsList = [pointsList[i] for i in sorted_ind] transcriptionsList = [transcriptionsList[i] for i in sorted_ind] return pointsList,confidencesList,transcriptionsList def main_evaluation(p,default_evaluation_params_fn,validate_data_fn,evaluate_method_fn,show_result=True,per_sample=True): """ This process validates a method, evaluates it and if it succed generates a ZIP file with a JSON entry for each sample. Params: p: Dictionary of parmeters with the GT/submission locations. If None is passed, the parameters send by the system are used. default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation validate_data_fn: points to a method that validates the corrct format of the submission evaluate_method_fn: points to a function that evaluated the submission and return a Dictionary with the results """ if (p == None): p = dict([s[1:].split('=') for s in sys.argv[1:]]) if(len(sys.argv)<3): print_help() evalParams = default_evaluation_params_fn() if 'p' in p.keys(): evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p'][1:-1]) ) resDict={'calculated':True,'Message':'','method':'{}','per_sample':'{}'} try: validate_data_fn(p['g'], p['s'], evalParams) evalData = evaluate_method_fn(p['g'], p['s'], evalParams) resDict.update(evalData) except Exception as e: resDict['Message']= str(e) resDict['calculated']=False if 'o' in p: if not os.path.exists(p['o']): os.makedirs(p['o']) resultsOutputname = p['o'] + '/results.zip' outZip = zipfile.ZipFile(resultsOutputname, mode='w', allowZip64=True) del resDict['per_sample'] if 'output_items' in resDict.keys(): del resDict['output_items'] outZip.writestr('method.json',json.dumps(resDict)) if not resDict['calculated']: if show_result: sys.stderr.write('Error!\n'+ resDict['Message']+'\n\n') if 'o' in p: outZip.close() return resDict if 'o' in p: if per_sample == True: for k,v in evalData['per_sample'].items(): outZip.writestr( k + '.json',json.dumps(v)) if 'output_items' in evalData.keys(): for k, v in evalData['output_items'].items(): outZip.writestr( k,v) outZip.close() if show_result: sys.stdout.write("Calculated!") sys.stdout.write(json.dumps(resDict['method'])) return resDict def main_validation(default_evaluation_params_fn,validate_data_fn): """ This process validates a method Params: default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation validate_data_fn: points to a method that validates the corrct format of the submission """ try: p = dict([s[1:].split('=') for s in sys.argv[1:]]) evalParams = default_evaluation_params_fn() if 'p' in p.keys(): evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p'][1:-1]) ) validate_data_fn(p['g'], p['s'], evalParams) print('SUCCESS') sys.exit(0) except Exception as e: print(str(e)) sys.exit(101)
15,410
40.764228
359
py
MaskTextSpotterV3
MaskTextSpotterV3-master/evaluation/icdar2015/e2e/script.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # encoding=utf8 from collections import namedtuple import rrc_evaluation_funcs import importlib from prepare_results import prepare_results_for_evaluation def evaluation_imports(): """ evaluation_imports: Dictionary ( key = module name , value = alias ) with python modules used in the evaluation. """ return { 'Polygon':'plg', 'numpy':'np' } def default_evaluation_params(): """ default_evaluation_params: Default parameters to use for the validation and evaluation. """ return { 'IOU_CONSTRAINT' :0.5, 'AREA_PRECISION_CONSTRAINT' :0.5, 'WORD_SPOTTING' :False, 'MIN_LENGTH_CARE_WORD' :3, 'GT_SAMPLE_NAME_2_ID':'gt_img_([0-9]+).txt', 'DET_SAMPLE_NAME_2_ID':'res_img_([0-9]+).txt', 'LTRB':False, #LTRB:2points(left,top,right,bottom) or 4 points(x1,y1,x2,y2,x3,y3,x4,y4) 'CRLF':False, # Lines are delimited by Windows CRLF format 'CONFIDENCES':False, #Detections must include confidence value. MAP and MAR will be calculated, 'SPECIAL_CHARACTERS':'!?.:,*"()·[]/\'', 'ONLY_REMOVE_FIRST_LAST_CHARACTER' : True } def validate_data(gtFilePath, submFilePath, evaluationParams): """ Method validate_data: validates that all files in the results folder are correct (have the correct name contents). Validates also that there are no missing files in the folder. If some error detected, the method raises the error """ gt = rrc_evaluation_funcs.load_zip_file(gtFilePath, evaluationParams['GT_SAMPLE_NAME_2_ID']) subm = rrc_evaluation_funcs.load_zip_file(submFilePath, evaluationParams['DET_SAMPLE_NAME_2_ID'], True) #Validate format of GroundTruth for k in gt: rrc_evaluation_funcs.validate_lines_in_file(k,gt[k],evaluationParams['CRLF'],evaluationParams['LTRB'],True) #Validate format of results for k in subm: if (k in gt) == False : raise Exception("The sample %s not present in GT" %k) rrc_evaluation_funcs.validate_lines_in_file(k,subm[k],evaluationParams['CRLF'],evaluationParams['LTRB'],True,evaluationParams['CONFIDENCES']) def evaluate_method(gtFilePath, submFilePath, evaluationParams): """ Method evaluate_method: evaluate method and returns the results Results. Dictionary with the following values: - method (required) Global method metrics. Ex: { 'Precision':0.8,'Recall':0.9 } - samples (optional) Per sample metrics. Ex: {'sample1' : { 'Precision':0.8,'Recall':0.9 } , 'sample2' : { 'Precision':0.8,'Recall':0.9 } """ for module,alias in evaluation_imports().items(): globals()[alias] = importlib.import_module(module) def polygon_from_points(points,correctOffset=False): """ Returns a Polygon object to use with the Polygon2 class from a list of 8 points: x1,y1,x2,y2,x3,y3,x4,y4 """ if correctOffset: #this will substract 1 from the coordinates that correspond to the xmax and ymax points[2] -= 1 points[4] -= 1 points[5] -= 1 points[7] -= 1 resBoxes=np.empty([1,8],dtype='int32') resBoxes[0,0]=int(points[0]) resBoxes[0,4]=int(points[1]) resBoxes[0,1]=int(points[2]) resBoxes[0,5]=int(points[3]) resBoxes[0,2]=int(points[4]) resBoxes[0,6]=int(points[5]) resBoxes[0,3]=int(points[6]) resBoxes[0,7]=int(points[7]) pointMat = resBoxes[0].reshape([2,4]).T return plg.Polygon( pointMat) def rectangle_to_polygon(rect): resBoxes=np.empty([1,8],dtype='int32') resBoxes[0,0]=int(rect.xmin) resBoxes[0,4]=int(rect.ymax) resBoxes[0,1]=int(rect.xmin) resBoxes[0,5]=int(rect.ymin) resBoxes[0,2]=int(rect.xmax) resBoxes[0,6]=int(rect.ymin) resBoxes[0,3]=int(rect.xmax) resBoxes[0,7]=int(rect.ymax) pointMat = resBoxes[0].reshape([2,4]).T return plg.Polygon( pointMat) def rectangle_to_points(rect): points = [int(rect.xmin), int(rect.ymax), int(rect.xmax), int(rect.ymax), int(rect.xmax), int(rect.ymin), int(rect.xmin), int(rect.ymin)] return points def get_union(pD,pG): areaA = pD.area(); areaB = pG.area(); return areaA + areaB - get_intersection(pD, pG); def get_intersection_over_union(pD,pG): try: return get_intersection(pD, pG) / get_union(pD, pG); except: return 0 def get_intersection(pD,pG): pInt = pD & pG if len(pInt) == 0: return 0 return pInt.area() def compute_ap(confList, matchList,numGtCare): correct = 0 AP = 0 if len(confList)>0: confList = np.array(confList) matchList = np.array(matchList) sorted_ind = np.argsort(-confList) confList = confList[sorted_ind] matchList = matchList[sorted_ind] for n in range(len(confList)): match = matchList[n] if match: correct += 1 AP += float(correct)/(n + 1) if numGtCare>0: AP /= numGtCare return AP def transcription_match(transGt,transDet,specialCharacters='!?.:,*"()·[]/\'',onlyRemoveFirstLastCharacterGT=True): if onlyRemoveFirstLastCharacterGT: #special characters in GT are allowed only at initial or final position if (transGt==transDet): return True if specialCharacters.find(transGt[0])>-1: if transGt[1:]==transDet: return True if specialCharacters.find(transGt[-1])>-1: if transGt[0:len(transGt)-1]==transDet: return True if specialCharacters.find(transGt[0])>-1 and specialCharacters.find(transGt[-1])>-1: if transGt[1:len(transGt)-1]==transDet: return True return False else: #Special characters are removed from the begining and the end of both Detection and GroundTruth while len(transGt)>0 and specialCharacters.find(transGt[0])>-1: transGt = transGt[1:] while len(transDet)>0 and specialCharacters.find(transDet[0])>-1: transDet = transDet[1:] while len(transGt)>0 and specialCharacters.find(transGt[-1])>-1 : transGt = transGt[0:len(transGt)-1] while len(transDet)>0 and specialCharacters.find(transDet[-1])>-1: transDet = transDet[0:len(transDet)-1] return transGt == transDet def include_in_dictionary(transcription): """ Function used in Word Spotting that finds if the Ground Truth transcription meets the rules to enter into the dictionary. If not, the transcription will be cared as don't care """ #special case 's at final if transcription[len(transcription)-2:]=="'s" or transcription[len(transcription)-2:]=="'S": transcription = transcription[0:len(transcription)-2] #hypens at init or final of the word transcription = transcription.strip('-'); specialCharacters = "'!?.:,*\"()·[]/"; for character in specialCharacters: transcription = transcription.replace(character,' ') transcription = transcription.strip() if len(transcription) != len(transcription.replace(" ","")) : return False; if len(transcription) < evaluationParams['MIN_LENGTH_CARE_WORD']: return False; notAllowed = "×÷·"; range1 = [ ord(u'a'), ord(u'z') ] range2 = [ ord(u'A'), ord(u'Z') ] range3 = [ ord(u'À'), ord(u'ƿ') ] range4 = [ ord(u'DŽ'), ord(u'ɿ') ] range5 = [ ord(u'Ά'), ord(u'Ͽ') ] range6 = [ ord(u'-'), ord(u'-') ] for char in transcription : charCode = ord(char) if(notAllowed.find(char) != -1): return False valid = ( charCode>=range1[0] and charCode<=range1[1] ) or ( charCode>=range2[0] and charCode<=range2[1] ) or ( charCode>=range3[0] and charCode<=range3[1] ) or ( charCode>=range4[0] and charCode<=range4[1] ) or ( charCode>=range5[0] and charCode<=range5[1] ) or ( charCode>=range6[0] and charCode<=range6[1] ) if valid == False: return False return True def include_in_dictionary_transcription(transcription): """ Function applied to the Ground Truth transcriptions used in Word Spotting. It removes special characters or terminations """ #special case 's at final if transcription[len(transcription)-2:]=="'s" or transcription[len(transcription)-2:]=="'S": transcription = transcription[0:len(transcription)-2] #hypens at init or final of the word transcription = transcription.strip('-'); specialCharacters = "'!?.:,*\"()·[]/"; for character in specialCharacters: transcription = transcription.replace(character,' ') transcription = transcription.strip() return transcription perSampleMetrics = {} matchedSum = 0 Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax') gt = rrc_evaluation_funcs.load_zip_file(gtFilePath,evaluationParams['GT_SAMPLE_NAME_2_ID']) subm = rrc_evaluation_funcs.load_zip_file(submFilePath,evaluationParams['DET_SAMPLE_NAME_2_ID'],True) numGlobalCareGt = 0; numGlobalCareDet = 0; arrGlobalConfidences = []; arrGlobalMatches = []; for resFile in gt: gtFile = rrc_evaluation_funcs.decode_utf8(gt[resFile]) if (gtFile is None) : raise Exception("The file %s is not UTF-8" %resFile) recall = 0 precision = 0 hmean = 0 detCorrect = 0 iouMat = np.empty([1,1]) gtPols = [] detPols = [] gtTrans = [] detTrans = [] gtPolPoints = [] detPolPoints = [] gtDontCarePolsNum = [] #Array of Ground Truth Polygons' keys marked as don't Care detDontCarePolsNum = [] #Array of Detected Polygons' matched with a don't Care GT detMatchedNums = [] pairs = [] arrSampleConfidences = []; arrSampleMatch = []; sampleAP = 0; evaluationLog = "" pointsList,_,transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(gtFile,evaluationParams['CRLF'],evaluationParams['LTRB'],True,False) for n in range(len(pointsList)): points = pointsList[n] transcription = transcriptionsList[n] dontCare = transcription == "###" if evaluationParams['LTRB']: gtRect = Rectangle(*points) gtPol = rectangle_to_polygon(gtRect) else: gtPol = polygon_from_points(points) gtPols.append(gtPol) gtPolPoints.append(points) #On word spotting we will filter some transcriptions with special characters if evaluationParams['WORD_SPOTTING'] : if dontCare == False : if include_in_dictionary(transcription) == False : dontCare = True else: transcription = include_in_dictionary_transcription(transcription) gtTrans.append(transcription) if dontCare: gtDontCarePolsNum.append( len(gtPols)-1 ) evaluationLog += "GT polygons: " + str(len(gtPols)) + (" (" + str(len(gtDontCarePolsNum)) + " don't care)\n" if len(gtDontCarePolsNum)>0 else "\n") if resFile in subm: detFile = rrc_evaluation_funcs.decode_utf8(subm[resFile]) pointsList,confidencesList,transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(detFile,evaluationParams['CRLF'],evaluationParams['LTRB'],True,evaluationParams['CONFIDENCES']) for n in range(len(pointsList)): points = pointsList[n] transcription = transcriptionsList[n] if evaluationParams['LTRB']: detRect = Rectangle(*points) detPol = rectangle_to_polygon(detRect) else: detPol = polygon_from_points(points) detPols.append(detPol) detPolPoints.append(points) detTrans.append(transcription) if len(gtDontCarePolsNum)>0 : for dontCarePol in gtDontCarePolsNum: dontCarePol = gtPols[dontCarePol] intersected_area = get_intersection(dontCarePol,detPol) pdDimensions = detPol.area() precision = 0 if pdDimensions == 0 else intersected_area / pdDimensions if (precision > evaluationParams['AREA_PRECISION_CONSTRAINT'] ): detDontCarePolsNum.append( len(detPols)-1 ) break evaluationLog += "DET polygons: " + str(len(detPols)) + (" (" + str(len(detDontCarePolsNum)) + " don't care)\n" if len(detDontCarePolsNum)>0 else "\n") if len(gtPols)>0 and len(detPols)>0: #Calculate IoU and precision matrixs outputShape=[len(gtPols),len(detPols)] iouMat = np.empty(outputShape) gtRectMat = np.zeros(len(gtPols),np.int8) detRectMat = np.zeros(len(detPols),np.int8) for gtNum in range(len(gtPols)): for detNum in range(len(detPols)): pG = gtPols[gtNum] pD = detPols[detNum] iouMat[gtNum,detNum] = get_intersection_over_union(pD,pG) for gtNum in range(len(gtPols)): for detNum in range(len(detPols)): if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCarePolsNum and detNum not in detDontCarePolsNum : if iouMat[gtNum,detNum]>evaluationParams['IOU_CONSTRAINT']: gtRectMat[gtNum] = 1 detRectMat[detNum] = 1 #detection matched only if transcription is equal if evaluationParams['WORD_SPOTTING']: correct = gtTrans[gtNum].upper() == detTrans[detNum].upper() else: correct = transcription_match(gtTrans[gtNum].upper(),detTrans[detNum].upper(),evaluationParams['SPECIAL_CHARACTERS'],evaluationParams['ONLY_REMOVE_FIRST_LAST_CHARACTER'])==True detCorrect += (1 if correct else 0) if correct: detMatchedNums.append(detNum) pairs.append({'gt':gtNum,'det':detNum,'correct':correct}) evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(detNum) + " trans. correct: " + str(correct) + "\n" if evaluationParams['CONFIDENCES']: for detNum in range(len(detPols)): if detNum not in detDontCarePolsNum : #we exclude the don't care detections match = detNum in detMatchedNums arrSampleConfidences.append(confidencesList[detNum]) arrSampleMatch.append(match) arrGlobalConfidences.append(confidencesList[detNum]); arrGlobalMatches.append(match); numGtCare = (len(gtPols) - len(gtDontCarePolsNum)) numDetCare = (len(detPols) - len(detDontCarePolsNum)) if numGtCare == 0: recall = float(1) precision = float(0) if numDetCare >0 else float(1) sampleAP = precision else: recall = float(detCorrect) / numGtCare precision = 0 if numDetCare==0 else float(detCorrect) / numDetCare if evaluationParams['CONFIDENCES']: sampleAP = compute_ap(arrSampleConfidences, arrSampleMatch, numGtCare ) hmean = 0 if (precision + recall)==0 else 2.0 * precision * recall / (precision + recall) matchedSum += detCorrect numGlobalCareGt += numGtCare numGlobalCareDet += numDetCare perSampleMetrics[resFile] = { 'precision':precision, 'recall':recall, 'hmean':hmean, 'pairs':pairs, 'AP':sampleAP, 'iouMat':[] if len(detPols)>100 else iouMat.tolist(), 'gtPolPoints':gtPolPoints, 'detPolPoints':detPolPoints, 'gtTrans':gtTrans, 'detTrans':detTrans, 'gtDontCare':gtDontCarePolsNum, 'detDontCare':detDontCarePolsNum, 'evaluationParams': evaluationParams, 'evaluationLog': evaluationLog } # Compute AP AP = 0 if evaluationParams['CONFIDENCES']: AP = compute_ap(arrGlobalConfidences, arrGlobalMatches, numGlobalCareGt) methodRecall = 0 if numGlobalCareGt == 0 else float(matchedSum)/numGlobalCareGt methodPrecision = 0 if numGlobalCareDet == 0 else float(matchedSum)/numGlobalCareDet methodHmean = 0 if methodRecall + methodPrecision==0 else 2* methodRecall * methodPrecision / (methodRecall + methodPrecision) methodMetrics = {'precision':methodPrecision, 'recall':methodRecall,'hmean': methodHmean, 'AP': AP } resDict = {'calculated':True,'Message':'','method': methodMetrics,'per_sample': perSampleMetrics} return resDict; if __name__=='__main__': ''' results_dir: result directory score_det: score of detection bounding box score_rec: score of the mask recognition branch score_rec_seq: score of the sequence recognition branch lexicon_type: 1 for generic; 2 for weak; 3 for strong ''' results_dir = '../../../output/mixtrain/inference/icdar_2015_test/model_0250000_1440_results/' lexicon_type = 3 score_det = 0.01 score_rec = 0.4 # score_rec_seq set to 0.7 for lexicon_type 3 or 2; 0.8 for lexicon_type 1 score_rec_seq = 0.7 evaluate_result_path = prepare_results_for_evaluation(results_dir, lexicon_type=lexicon_type, cache_dir='./cache_files', score_det=score_det, score_rec=score_rec, score_rec_seq=score_rec_seq) p = { 'g': "../gt.zip", 's': evaluate_result_path } rrc_evaluation_funcs.main_evaluation(p,default_evaluation_params,validate_data,evaluate_method)
20,101
42.605206
322
py
MaskTextSpotterV3
MaskTextSpotterV3-master/evaluation/icdar2015/e2e/prepare_results.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import sys import os sys.path.append('./') import shapely from shapely.geometry import Polygon,MultiPoint import numpy as np import editdistance sys.path.append('../../') from weighted_editdistance import weighted_edit_distance from tqdm import tqdm try: import pickle except ImportError: import cPickle as pickle def list_from_str(st): line = st.split(',') # box[0:4], polygon[4:12], word, seq_word, detection_score, rec_socre, seq_score, char_score_path new_line = [float(a) for a in line[4:12]]+[float(line[-4])]+[line[-5]]+[line[-6]]+[float(line[-3])]+[float(line[-2])] + [line[-1]] return new_line def polygon_from_list(line): """ Create a shapely polygon object from gt or dt line. """ polygon_points = np.array(line).reshape(4, 2) polygon = Polygon(polygon_points).convex_hull return polygon def polygon_iou(list1, list2): """ Intersection over union between two shapely polygons. """ polygon_points1 = np.array(list1).reshape(4, 2) poly1 = Polygon(polygon_points1).convex_hull polygon_points2 = np.array(list2).reshape(4, 2) poly2 = Polygon(polygon_points2).convex_hull union_poly = np.concatenate((polygon_points1,polygon_points2)) if not poly1.intersects(poly2): # this test is fast and can accelerate calculation iou = 0 else: try: inter_area = poly1.intersection(poly2).area #union_area = poly1.area + poly2.area - inter_area union_area = MultiPoint(union_poly).convex_hull.area iou = float(inter_area) / (union_area+1e-6) except shapely.geos.TopologicalError: print('shapely.geos.TopologicalError occured, iou set to 0') iou = 0 return iou def nms(boxes,overlap): rec_scores = [b[-2] for b in boxes] indices = sorted(range(len(rec_scores)), key=lambda k: -rec_scores[k]) box_num = len(boxes) nms_flag = [True]*box_num for i in range(box_num): ii = indices[i] if not nms_flag[ii]: continue for j in range(box_num): jj = indices[j] if j == i: continue if not nms_flag[jj]: continue box1 = boxes[ii] box2 = boxes[jj] box1_score = rec_scores[ii] box2_score = rec_scores[jj] str1 = box1[9] str2 = box2[9] box_i = [box1[0],box1[1],box1[4],box1[5]] box_j = [box2[0],box2[1],box2[4],box2[5]] poly1 = polygon_from_list(box1[0:8]) poly2 = polygon_from_list(box2[0:8]) iou = polygon_iou(box1[0:8],box2[0:8]) thresh = overlap if iou > thresh: if box1_score > box2_score: nms_flag[jj] = False if box1_score == box2_score and poly1.area > poly2.area: nms_flag[jj] = False if box1_score == box2_score and poly1.area<=poly2.area: nms_flag[ii] = False break return nms_flag def packing(save_dir, cache_dir, pack_name): files = os.listdir(save_dir) if not os.path.exists(cache_dir): os.mkdir(cache_dir) os.system('zip -r -q -j '+os.path.join(cache_dir, pack_name+'.zip')+' '+save_dir+'/*') def test_single(results_dir,lexicon_type=3,cache_dir='./cache_dir',score_det=0.5,score_rec=0.5,score_rec_seq=0.5,overlap=0.2, use_lexicon=True, weighted_ed=True, use_seq=False, use_char=False, mix=False): ''' results_dir: result directory score_det: score of detection bounding box score_rec: score of the mask recognition branch socre_rec_seq: score of the sequence recognition branch overlap: overlap threshold used for nms lexicon_type: 1 for generic; 2 for weak; 3 for strong use_seq: use the recognition result of sequence branch use_mix: use both the recognition result of the mask and sequence branches, selected by score ''' print('score_det:', 'score_det:', score_det, 'score_rec:', score_rec, 'score_rec_seq:', score_rec_seq, 'lexicon_type:', lexicon_type, 'weighted_ed:', weighted_ed, 'use_seq:', use_seq, 'use_char:', use_char, 'mix:', mix) if not os.path.exists(cache_dir): os.mkdir(cache_dir) nms_dir = os.path.join(cache_dir,str(score_det)+'_'+str(score_rec)+'_'+str(score_rec_seq)) if not os.path.exists(nms_dir): os.mkdir(nms_dir) if lexicon_type==1: # generic lexicon lexicon_path = '../../lexicons/ic15/GenericVocabulary_new.txt' lexicon_fid=open(lexicon_path, 'r') pair_list = open('../../lexicons/ic15/GenericVocabulary_pair_list.txt', 'r') pairs = dict() for line in pair_list.readlines(): line=line.strip() word = line.split(' ')[0].upper() word_gt = line[len(word)+1:] pairs[word] = word_gt lexicon_fid=open(lexicon_path, 'r') lexicon=[] for line in lexicon_fid.readlines(): line=line.strip() lexicon.append(line) if lexicon_type==2: # weak lexicon lexicon_path = '../../lexicons/ic15/ch4_test_vocabulary_new.txt' lexicon_fid=open(lexicon_path, 'r') pair_list = open('../../lexicons/ic15/ch4_test_vocabulary_pair_list.txt', 'r') pairs = dict() for line in pair_list.readlines(): line=line.strip() word = line.split(' ')[0].upper() word_gt = line[len(word)+1:] pairs[word] = word_gt lexicon_fid=open(lexicon_path, 'r') lexicon=[] for line in lexicon_fid.readlines(): line=line.strip() lexicon.append(line) for i in tqdm(range(1,501)): img = 'img_'+str(i)+'.jpg' gt_img = 'gt_img_'+str(i)+'.txt' if lexicon_type==3: # weak lexicon_path = '../../lexicons/ic15/new_strong_lexicon/new_voc_img_' + str(i) + '.txt' lexicon_fid=open(lexicon_path, 'r') pair_list = open('../../lexicons/ic15/new_strong_lexicon/pair_voc_img_' + str(i) + '.txt', 'r') pairs = dict() for line in pair_list.readlines(): line=line.strip() word = line.split(' ')[0].upper() word_gt = line[len(word)+1:] pairs[word] = word_gt lexicon_fid=open(lexicon_path, 'r') lexicon=[] for line in lexicon_fid.readlines(): line=line.strip() lexicon.append(line) result_path = os.path.join(results_dir,'res_img_'+str(i)+'.txt') if os.path.isfile(result_path): with open(result_path,'r') as f: dt_lines = [a.strip() for a in f.readlines()] dt_lines = [list_from_str(dt) for dt in dt_lines] else: dt_lines = [] dt_lines = [dt for dt in dt_lines if dt[-2]>score_rec_seq and dt[-3]>score_rec and dt[-6]>score_det] nms_flag = nms(dt_lines,overlap) boxes = [] for k in range(len(dt_lines)): dt = dt_lines[k] if nms_flag[k]: if dt not in boxes: boxes.append(dt) with open(os.path.join(nms_dir,'res_img_'+str(i)+'.txt'),'w') as f: for g in boxes: gt_coors = [int(b) for b in g[0:8]] with open('../../../' + g[-1], "rb") as input_file: # with open(g[-1], "rb") as input_file: dict_scores = pickle.load(input_file) if use_char and use_seq: if g[-2]>g[-3]: word = g[-5] scores = dict_scores['seq_char_scores'][:,1:-1].swapaxes(0,1) else: word = g[-4] scores = dict_scores['seg_char_scores'] elif use_seq: word = g[-5] scores = dict_scores['seq_char_scores'][:,1:-1].swapaxes(0,1) else: word = g[-4] scores = dict_scores['seg_char_scores'] match_word, match_dist = find_match_word(word, lexicon, pairs, scores, use_lexicon, weighted_ed) if match_dist<1.5 or lexicon_type==1: gt_coor_strs = [str(a) for a in gt_coors]+ [match_word] f.write(','.join(gt_coor_strs)+'\r\n') pack_name = str(score_det)+'_'+str(score_rec)+'_over'+str(overlap) packing(nms_dir,cache_dir,pack_name) submit_file_path = os.path.join(cache_dir, pack_name+'.zip') return submit_file_path def find_match_word(rec_str, lexicon, pairs, scores_numpy, use_ed = True, weighted_ed = False): if not use_ed: return rec_str rec_str = rec_str.upper() dist_min = 100 dist_min_pre = 100 match_word = '' match_dist = 100 if not weighted_ed: for word in lexicon: word = word.upper() ed = editdistance.eval(rec_str, word) length_dist = abs(len(word) - len(rec_str)) # dist = ed + length_dist dist = ed if dist<dist_min: dist_min = dist match_word = pairs[word] match_dist = dist return match_word, match_dist else: small_lexicon_dict = dict() for word in lexicon: word = word.upper() ed = editdistance.eval(rec_str, word) small_lexicon_dict[word] = ed dist = ed if dist<dist_min_pre: dist_min_pre = dist small_lexicon = [] for word in small_lexicon_dict: if small_lexicon_dict[word]<=dist_min_pre+2: small_lexicon.append(word) for word in small_lexicon: word = word.upper() ed = weighted_edit_distance(rec_str, word, scores_numpy) dist = ed if dist<dist_min: dist_min = dist match_word = pairs[word] match_dist = dist return match_word, match_dist def prepare_results_for_evaluation(results_dir, lexicon_type, cache_dir, score_det, score_rec, score_rec_seq): if not os.path.isdir(cache_dir): os.mkdir(cache_dir) result_path = test_single(results_dir,score_det=score_det,score_rec=score_rec,score_rec_seq=score_rec_seq,overlap=0.2,cache_dir=cache_dir,lexicon_type=lexicon_type, use_lexicon=True, weighted_ed=True, use_seq=True, use_char=True, mix=True) return result_path
10,659
39.532319
243
py
MaskTextSpotterV3
MaskTextSpotterV3-master/evaluation/totaltext/e2e/rrc_evaluation_funcs.py
#!/usr/bin/env python2 #encoding: UTF-8 import json import sys;sys.path.append('./') import zipfile import re import sys import os import codecs import importlib try: from StringIO import StringIO except ImportError: from io import StringIO def print_help(): sys.stdout.write('Usage: python %s.py -g=<gtFile> -s=<submFile> [-o=<outputFolder> -p=<jsonParams>]' %sys.argv[0]) sys.exit(2) def load_zip_file_keys(file,fileNameRegExp=''): """ Returns an array with the entries of the ZIP file that match with the regular expression. The key's are the names or the file or the capturing group definied in the fileNameRegExp """ try: archive=zipfile.ZipFile(file, mode='r', allowZip64=True) except : raise Exception('Error loading the ZIP archive.') pairs = [] for name in archive.namelist(): addFile = True keyName = name if fileNameRegExp!="": m = re.match(fileNameRegExp,name) if m == None: addFile = False else: if len(m.groups())>0: keyName = m.group(1) if addFile: pairs.append( keyName ) return pairs def load_zip_file(file,fileNameRegExp='',allEntries=False): """ Returns an array with the contents (filtered by fileNameRegExp) of a ZIP file. The key's are the names or the file or the capturing group definied in the fileNameRegExp allEntries validates that all entries in the ZIP file pass the fileNameRegExp """ try: archive=zipfile.ZipFile(file, mode='r', allowZip64=True) except : raise Exception('Error loading the ZIP archive') pairs = [] for name in archive.namelist(): addFile = True keyName = name if fileNameRegExp!="": m = re.match(fileNameRegExp,name) if m == None: addFile = False else: if len(m.groups())>0: keyName = m.group(1) if addFile: pairs.append( [ keyName , archive.read(name)] ) else: if allEntries: raise Exception('ZIP entry not valid: %s' %name) return dict(pairs) def decode_utf8(raw): """ Returns a Unicode object on success, or None on failure """ try: raw = codecs.decode(raw,'utf-8', 'replace') #extracts BOM if exists raw = raw.encode('utf8') if raw.startswith(codecs.BOM_UTF8): raw = raw.replace(codecs.BOM_UTF8, '', 1) return raw.decode('utf-8') except: return None def validate_lines_in_file(fileName,file_contents,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0): """ This function validates that all lines of the file calling the Line validation function for each line """ utf8File = decode_utf8(file_contents) if (utf8File is None) : raise Exception("The file %s is not UTF-8" %fileName) lines = utf8File.split( "\r\n" if CRLF else "\n" ) for line in lines: line = line.replace("\r","").replace("\n","") if(line != ""): try: validate_tl_line(line,LTRB,withTranscription,withConfidence,imWidth,imHeight) except Exception as e: raise Exception(("Line in sample not valid. Sample: %s Line: %s Error: %s" %(fileName,line,str(e))).encode('utf-8', 'replace')) def validate_tl_line(line,LTRB=True,withTranscription=True,withConfidence=True,imWidth=0,imHeight=0): """ Validate the format of the line. If the line is not valid an exception will be raised. If maxWidth and maxHeight are specified, all points must be inside the imgage bounds. Posible values are: LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription] LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription] """ get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight) def get_tl_line_values(line,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0): """ Validate the format of the line. If the line is not valid an exception will be raised. If maxWidth and maxHeight are specified, all points must be inside the imgage bounds. Posible values are: LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription] LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription] Returns values from a textline. Points , [Confidences], [Transcriptions] """ confidence = 0.0 transcription = ""; points = [] numPoints = 4; if LTRB: numPoints = 4; if withTranscription and withConfidence: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line) if m == None : m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line) raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence,transcription") elif withConfidence: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line) if m == None : raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence") elif withTranscription: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,(.*)$',line) if m == None : raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,transcription") else: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,?\s*$',line) if m == None : raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax") xmin = int(m.group(1)) ymin = int(m.group(2)) xmax = int(m.group(3)) ymax = int(m.group(4)) if(xmax<xmin): raise Exception("Xmax value (%s) not valid (Xmax < Xmin)." %(xmax)) if(ymax<ymin): raise Exception("Ymax value (%s) not valid (Ymax < Ymin)." %(ymax)) points = [ float(m.group(i)) for i in range(1, (numPoints+1) ) ] if (imWidth>0 and imHeight>0): validate_point_inside_bounds(xmin,ymin,imWidth,imHeight); validate_point_inside_bounds(xmax,ymax,imWidth,imHeight); else: numPoints = 8; if withTranscription and withConfidence: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line) if m == None : raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence,transcription") elif withConfidence: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line) if m == None : raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence") elif withTranscription: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,(.*)$',line) if m == None : raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,transcription") else: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*$',line) if m == None : raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4") points = [ float(m.group(i)) for i in range(1, (numPoints+1) ) ] validate_clockwise_points(points) if (imWidth>0 and imHeight>0): validate_point_inside_bounds(points[0],points[1],imWidth,imHeight); validate_point_inside_bounds(points[2],points[3],imWidth,imHeight); validate_point_inside_bounds(points[4],points[5],imWidth,imHeight); validate_point_inside_bounds(points[6],points[7],imWidth,imHeight); if withConfidence: try: confidence = float(m.group(numPoints+1)) except ValueError: raise Exception("Confidence value must be a float") if withTranscription: posTranscription = numPoints + (2 if withConfidence else 1) transcription = m.group(posTranscription) m2 = re.match(r'^\s*\"(.*)\"\s*$',transcription) if m2 != None : #Transcription with double quotes, we extract the value and replace escaped characters transcription = m2.group(1).replace("\\\\", "\\").replace("\\\"", "\"") return points,confidence,transcription def validate_point_inside_bounds(x,y,imWidth,imHeight): if(x<0 or x>imWidth): raise Exception("X value (%s) not valid. Image dimensions: (%s,%s)" %(xmin,imWidth,imHeight)) if(y<0 or y>imHeight): raise Exception("Y value (%s) not valid. Image dimensions: (%s,%s) Sample: %s Line:%s" %(ymin,imWidth,imHeight)) def validate_clockwise_points(points): """ Validates that the points that the 4 points that dlimite a polygon are in clockwise order. """ if len(points) != 8: raise Exception("Points list not valid." + str(len(points))) point = [ [int(points[0]) , int(points[1])], [int(points[2]) , int(points[3])], [int(points[4]) , int(points[5])], [int(points[6]) , int(points[7])] ] edge = [ ( point[1][0] - point[0][0])*( point[1][1] + point[0][1]), ( point[2][0] - point[1][0])*( point[2][1] + point[1][1]), ( point[3][0] - point[2][0])*( point[3][1] + point[2][1]), ( point[0][0] - point[3][0])*( point[0][1] + point[3][1]) ] summatory = edge[0] + edge[1] + edge[2] + edge[3]; if summatory>0: raise Exception("Points are not clockwise. The coordinates of bounding quadrilaterals have to be given in clockwise order. Regarding the correct interpretation of 'clockwise' remember that the image coordinate system used is the standard one, with the image origin at the upper left, the X axis extending to the right and Y axis extending downwards.") def get_tl_line_values_from_file_contents(content,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0,sort_by_confidences=True): """ Returns all points, confindences and transcriptions of a file in lists. Valid line formats: xmin,ymin,xmax,ymax,[confidence],[transcription] x1,y1,x2,y2,x3,y3,x4,y4,[confidence],[transcription] """ pointsList = [] transcriptionsList = [] confidencesList = [] lines = content.split( "\r\n" if CRLF else "\n" ) for line in lines: line = line.replace("\r","").replace("\n","") if(line != "") : points, confidence, transcription = get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight); pointsList.append(points) transcriptionsList.append(transcription) confidencesList.append(confidence) if withConfidence and len(confidencesList)>0 and sort_by_confidences: import numpy as np sorted_ind = np.argsort(-np.array(confidencesList)) confidencesList = [confidencesList[i] for i in sorted_ind] pointsList = [pointsList[i] for i in sorted_ind] transcriptionsList = [transcriptionsList[i] for i in sorted_ind] return pointsList,confidencesList,transcriptionsList def main_evaluation(p,default_evaluation_params_fn,validate_data_fn,evaluate_method_fn,show_result=True,per_sample=True): """ This process validates a method, evaluates it and if it succed generates a ZIP file with a JSON entry for each sample. Params: p: Dictionary of parmeters with the GT/submission locations. If None is passed, the parameters send by the system are used. default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation validate_data_fn: points to a method that validates the corrct format of the submission evaluate_method_fn: points to a function that evaluated the submission and return a Dictionary with the results """ if (p == None): p = dict([s[1:].split('=') for s in sys.argv[1:]]) if(len(sys.argv)<3): print_help() evalParams = default_evaluation_params_fn() if 'p' in p.keys(): evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p'][1:-1]) ) resDict={'calculated':True,'Message':'','method':'{}','per_sample':'{}'} try: validate_data_fn(p['g'], p['s'], evalParams) evalData = evaluate_method_fn(p['g'], p['s'], evalParams) resDict.update(evalData) except Exception as e: resDict['Message']= str(e) resDict['calculated']=False if 'o' in p: if not os.path.exists(p['o']): os.makedirs(p['o']) resultsOutputname = p['o'] + '/results.zip' outZip = zipfile.ZipFile(resultsOutputname, mode='w', allowZip64=True) del resDict['per_sample'] if 'output_items' in resDict.keys(): del resDict['output_items'] outZip.writestr('method.json',json.dumps(resDict)) if not resDict['calculated']: if show_result: sys.stderr.write('Error!\n'+ resDict['Message']+'\n\n') if 'o' in p: outZip.close() return resDict if 'o' in p: if per_sample == True: for k,v in evalData['per_sample'].items(): outZip.writestr( k + '.json',json.dumps(v)) if 'output_items' in evalData.keys(): for k, v in evalData['output_items'].items(): outZip.writestr( k,v) outZip.close() if show_result: sys.stdout.write("Calculated!") sys.stdout.write(json.dumps(resDict['method'])) return resDict def main_validation(default_evaluation_params_fn,validate_data_fn): """ This process validates a method Params: default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation validate_data_fn: points to a method that validates the corrct format of the submission """ try: p = dict([s[1:].split('=') for s in sys.argv[1:]]) evalParams = default_evaluation_params_fn() if 'p' in p.keys(): evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p'][1:-1]) ) validate_data_fn(p['g'], p['s'], evalParams) print('SUCCESS') sys.exit(0) except Exception as e: print(str(e)) sys.exit(101)
15,410
40.764228
359
py
MaskTextSpotterV3
MaskTextSpotterV3-master/evaluation/totaltext/e2e/script.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # encoding=utf8 from collections import namedtuple import rrc_evaluation_funcs_total_text as rrc_evaluation_funcs import importlib from prepare_results import prepare_results_for_evaluation def evaluation_imports(): """ evaluation_imports: Dictionary ( key = module name , value = alias ) with python modules used in the evaluation. """ return { 'Polygon':'plg', 'numpy':'np' } def default_evaluation_params(): """ default_evaluation_params: Default parameters to use for the validation and evaluation. """ return { 'IOU_CONSTRAINT' :0.5, 'AREA_PRECISION_CONSTRAINT' :0.5, 'WORD_SPOTTING' :False, 'MIN_LENGTH_CARE_WORD' :3, 'GT_SAMPLE_NAME_2_ID':'gt_img_([0-9]+).txt', 'DET_SAMPLE_NAME_2_ID':'res_img_([0-9]+).txt', 'LTRB':False, #LTRB:2points(left,top,right,bottom) or 4 points(x1,y1,x2,y2,x3,y3,x4,y4) 'CRLF':False, # Lines are delimited by Windows CRLF format 'CONFIDENCES':False, #Detections must include confidence value. MAP and MAR will be calculated, 'SPECIAL_CHARACTERS':'!?.:,*"()·[]/\'', 'ONLY_REMOVE_FIRST_LAST_CHARACTER' : True } def validate_data(gtFilePath, submFilePath, evaluationParams): """ Method validate_data: validates that all files in the results folder are correct (have the correct name contents). Validates also that there are no missing files in the folder. If some error detected, the method raises the error """ gt = rrc_evaluation_funcs.load_zip_file(gtFilePath, evaluationParams['GT_SAMPLE_NAME_2_ID']) subm = rrc_evaluation_funcs.load_zip_file(submFilePath, evaluationParams['DET_SAMPLE_NAME_2_ID'], True) #Validate format of GroundTruth for k in gt: rrc_evaluation_funcs.validate_lines_in_file(k,gt[k],evaluationParams['CRLF'],evaluationParams['LTRB'],True) #Validate format of results for k in subm: if (k in gt) == False : raise Exception("The sample %s not present in GT" %k) rrc_evaluation_funcs.validate_lines_in_file(k,subm[k],evaluationParams['CRLF'],evaluationParams['LTRB'],True,evaluationParams['CONFIDENCES']) def evaluate_method(gtFilePath, submFilePath, evaluationParams): """ Method evaluate_method: evaluate method and returns the results Results. Dictionary with the following values: - method (required) Global method metrics. Ex: { 'Precision':0.8,'Recall':0.9 } - samples (optional) Per sample metrics. Ex: {'sample1' : { 'Precision':0.8,'Recall':0.9 } , 'sample2' : { 'Precision':0.8,'Recall':0.9 } """ for module,alias in evaluation_imports().items(): globals()[alias] = importlib.import_module(module) def polygon_from_points(points,correctOffset=False): """ Returns a Polygon object to use with the Polygon2 class from a list of 8 points: x1,y1,x2,y2,x3,y3,x4,y4 """ resBoxes=np.empty([1,len(points)],dtype='int32') for i in range(int(len(points) / 2)): resBoxes[0, i] = int(points[2*i]) resBoxes[0, int(len(points) / 2) + i] = int(points[2*i+1]) pointMat = resBoxes[0].reshape([2,-1]).T return plg.Polygon( pointMat) def rectangle_to_polygon(rect): resBoxes=np.empty([1,8],dtype='int32') resBoxes[0,0]=int(rect.xmin) resBoxes[0,4]=int(rect.ymax) resBoxes[0,1]=int(rect.xmin) resBoxes[0,5]=int(rect.ymin) resBoxes[0,2]=int(rect.xmax) resBoxes[0,6]=int(rect.ymin) resBoxes[0,3]=int(rect.xmax) resBoxes[0,7]=int(rect.ymax) pointMat = resBoxes[0].reshape([2,4]).T return plg.Polygon( pointMat) def rectangle_to_points(rect): points = [int(rect.xmin), int(rect.ymax), int(rect.xmax), int(rect.ymax), int(rect.xmax), int(rect.ymin), int(rect.xmin), int(rect.ymin)] return points def get_union(pD,pG): areaA = pD.area(); areaB = pG.area(); return areaA + areaB - get_intersection(pD, pG); def get_intersection_over_union(pD,pG): try: return get_intersection(pD, pG) / get_union(pD, pG); except: return 0 def get_intersection(pD,pG): pInt = pD & pG if len(pInt) == 0: return 0 return pInt.area() def compute_ap(confList, matchList,numGtCare): correct = 0 AP = 0 if len(confList)>0: confList = np.array(confList) matchList = np.array(matchList) sorted_ind = np.argsort(-confList) confList = confList[sorted_ind] matchList = matchList[sorted_ind] for n in range(len(confList)): match = matchList[n] if match: correct += 1 AP += float(correct)/(n + 1) if numGtCare>0: AP /= numGtCare return AP def transcription_match(transGt,transDet,specialCharacters='!?.:,*"()·[]/\'',onlyRemoveFirstLastCharacterGT=True): if onlyRemoveFirstLastCharacterGT: #special characters in GT are allowed only at initial or final position if (transGt==transDet): return True if specialCharacters.find(transGt[0])>-1: if transGt[1:]==transDet: return True if specialCharacters.find(transGt[-1])>-1: if transGt[0:len(transGt)-1]==transDet: return True if specialCharacters.find(transGt[0])>-1 and specialCharacters.find(transGt[-1])>-1: if transGt[1:len(transGt)-1]==transDet: return True return False else: #Special characters are removed from the begining and the end of both Detection and GroundTruth while len(transGt)>0 and specialCharacters.find(transGt[0])>-1: transGt = transGt[1:] while len(transDet)>0 and specialCharacters.find(transDet[0])>-1: transDet = transDet[1:] while len(transGt)>0 and specialCharacters.find(transGt[-1])>-1 : transGt = transGt[0:len(transGt)-1] while len(transDet)>0 and specialCharacters.find(transDet[-1])>-1: transDet = transDet[0:len(transDet)-1] return transGt == transDet def include_in_dictionary(transcription): """ Function used in Word Spotting that finds if the Ground Truth transcription meets the rules to enter into the dictionary. If not, the transcription will be cared as don't care """ #special case 's at final if transcription[len(transcription)-2:]=="'s" or transcription[len(transcription)-2:]=="'S": transcription = transcription[0:len(transcription)-2] #hypens at init or final of the word transcription = transcription.strip('-'); specialCharacters = "'!?.:,*\"()·[]/"; for character in specialCharacters: transcription = transcription.replace(character,' ') transcription = transcription.strip() if len(transcription) != len(transcription.replace(" ","")) : return False; if len(transcription) < evaluationParams['MIN_LENGTH_CARE_WORD']: return False; notAllowed = "×÷·"; range1 = [ ord(u'a'), ord(u'z') ] range2 = [ ord(u'A'), ord(u'Z') ] range3 = [ ord(u'À'), ord(u'ƿ') ] range4 = [ ord(u'DŽ'), ord(u'ɿ') ] range5 = [ ord(u'Ά'), ord(u'Ͽ') ] range6 = [ ord(u'-'), ord(u'-') ] for char in transcription : charCode = ord(char) if(notAllowed.find(char) != -1): return False valid = ( charCode>=range1[0] and charCode<=range1[1] ) or ( charCode>=range2[0] and charCode<=range2[1] ) or ( charCode>=range3[0] and charCode<=range3[1] ) or ( charCode>=range4[0] and charCode<=range4[1] ) or ( charCode>=range5[0] and charCode<=range5[1] ) or ( charCode>=range6[0] and charCode<=range6[1] ) if valid == False: return False return True def include_in_dictionary_transcription(transcription): """ Function applied to the Ground Truth transcriptions used in Word Spotting. It removes special characters or terminations """ #special case 's at final if transcription[len(transcription)-2:]=="'s" or transcription[len(transcription)-2:]=="'S": transcription = transcription[0:len(transcription)-2] #hypens at init or final of the word transcription = transcription.strip('-'); specialCharacters = "'!?.:,*\"()·[]/"; for character in specialCharacters: transcription = transcription.replace(character,' ') transcription = transcription.strip() return transcription perSampleMetrics = {} matchedSum = 0 Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax') gt = rrc_evaluation_funcs.load_zip_file(gtFilePath,evaluationParams['GT_SAMPLE_NAME_2_ID']) subm = rrc_evaluation_funcs.load_zip_file(submFilePath,evaluationParams['DET_SAMPLE_NAME_2_ID'],True) numGlobalCareGt = 0; numGlobalCareDet = 0; arrGlobalConfidences = []; arrGlobalMatches = []; for resFile in gt: gtFile = rrc_evaluation_funcs.decode_utf8(gt[resFile]) if (gtFile is None) : raise Exception("The file %s is not UTF-8" %resFile) recall = 0 precision = 0 hmean = 0 detCorrect = 0 iouMat = np.empty([1,1]) gtPols = [] detPols = [] gtTrans = [] detTrans = [] gtPolPoints = [] detPolPoints = [] gtDontCarePolsNum = [] #Array of Ground Truth Polygons' keys marked as don't Care detDontCarePolsNum = [] #Array of Detected Polygons' matched with a don't Care GT detMatchedNums = [] pairs = [] arrSampleConfidences = []; arrSampleMatch = []; sampleAP = 0; evaluationLog = "" pointsList,_,transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(gtFile,evaluationParams['CRLF'],evaluationParams['LTRB'],True,False) for n in range(len(pointsList)): points = pointsList[n] transcription = transcriptionsList[n] dontCare = transcription == "###" if evaluationParams['LTRB']: gtRect = Rectangle(*points) gtPol = rectangle_to_polygon(gtRect) else: gtPol = polygon_from_points(points) gtPols.append(gtPol) gtPolPoints.append(points) #On word spotting we will filter some transcriptions with special characters if evaluationParams['WORD_SPOTTING'] : if dontCare == False : if include_in_dictionary(transcription) == False : dontCare = True else: transcription = include_in_dictionary_transcription(transcription) gtTrans.append(transcription) if dontCare: gtDontCarePolsNum.append( len(gtPols)-1 ) evaluationLog += "GT polygons: " + str(len(gtPols)) + (" (" + str(len(gtDontCarePolsNum)) + " don't care)\n" if len(gtDontCarePolsNum)>0 else "\n") if resFile in subm: detFile = rrc_evaluation_funcs.decode_utf8(subm[resFile]) pointsList,confidencesList,transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(detFile,evaluationParams['CRLF'],evaluationParams['LTRB'],True,evaluationParams['CONFIDENCES']) for n in range(len(pointsList)): points = pointsList[n] transcription = transcriptionsList[n] if evaluationParams['LTRB']: detRect = Rectangle(*points) detPol = rectangle_to_polygon(detRect) else: detPol = polygon_from_points(points) detPols.append(detPol) detPolPoints.append(points) detTrans.append(transcription) if len(gtDontCarePolsNum)>0 : for dontCarePol in gtDontCarePolsNum: dontCarePol = gtPols[dontCarePol] intersected_area = get_intersection(dontCarePol,detPol) pdDimensions = detPol.area() precision = 0 if pdDimensions == 0 else intersected_area / pdDimensions if (precision > evaluationParams['AREA_PRECISION_CONSTRAINT'] ): detDontCarePolsNum.append( len(detPols)-1 ) break evaluationLog += "DET polygons: " + str(len(detPols)) + (" (" + str(len(detDontCarePolsNum)) + " don't care)\n" if len(detDontCarePolsNum)>0 else "\n") if len(gtPols)>0 and len(detPols)>0: #Calculate IoU and precision matrixs outputShape=[len(gtPols),len(detPols)] iouMat = np.empty(outputShape) gtRectMat = np.zeros(len(gtPols),np.int8) detRectMat = np.zeros(len(detPols),np.int8) for gtNum in range(len(gtPols)): for detNum in range(len(detPols)): pG = gtPols[gtNum] pD = detPols[detNum] iouMat[gtNum,detNum] = get_intersection_over_union(pD,pG) for gtNum in range(len(gtPols)): for detNum in range(len(detPols)): if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCarePolsNum and detNum not in detDontCarePolsNum : if iouMat[gtNum,detNum]>evaluationParams['IOU_CONSTRAINT']: gtRectMat[gtNum] = 1 detRectMat[detNum] = 1 #detection matched only if transcription is equal if evaluationParams['WORD_SPOTTING']: correct = gtTrans[gtNum].upper() == detTrans[detNum].upper() else: correct = transcription_match(gtTrans[gtNum].upper(),detTrans[detNum].upper(),evaluationParams['SPECIAL_CHARACTERS'],evaluationParams['ONLY_REMOVE_FIRST_LAST_CHARACTER'])==True detCorrect += (1 if correct else 0) if correct: detMatchedNums.append(detNum) pairs.append({'gt':gtNum,'det':detNum,'correct':correct}) evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(detNum) + " trans. correct: " + str(correct) + "\n" if evaluationParams['CONFIDENCES']: for detNum in range(len(detPols)): if detNum not in detDontCarePolsNum : #we exclude the don't care detections match = detNum in detMatchedNums arrSampleConfidences.append(confidencesList[detNum]) arrSampleMatch.append(match) arrGlobalConfidences.append(confidencesList[detNum]); arrGlobalMatches.append(match); numGtCare = (len(gtPols) - len(gtDontCarePolsNum)) numDetCare = (len(detPols) - len(detDontCarePolsNum)) if numGtCare == 0: recall = float(1) precision = float(0) if numDetCare >0 else float(1) sampleAP = precision else: recall = float(detCorrect) / numGtCare precision = 0 if numDetCare==0 else float(detCorrect) / numDetCare if evaluationParams['CONFIDENCES']: sampleAP = compute_ap(arrSampleConfidences, arrSampleMatch, numGtCare ) hmean = 0 if (precision + recall)==0 else 2.0 * precision * recall / (precision + recall) matchedSum += detCorrect numGlobalCareGt += numGtCare numGlobalCareDet += numDetCare perSampleMetrics[resFile] = { 'precision':precision, 'recall':recall, 'hmean':hmean, 'pairs':pairs, 'AP':sampleAP, 'iouMat':[] if len(detPols)>100 else iouMat.tolist(), 'gtPolPoints':gtPolPoints, 'detPolPoints':detPolPoints, 'gtTrans':gtTrans, 'detTrans':detTrans, 'gtDontCare':gtDontCarePolsNum, 'detDontCare':detDontCarePolsNum, 'evaluationParams': evaluationParams, 'evaluationLog': evaluationLog } # Compute AP AP = 0 if evaluationParams['CONFIDENCES']: AP = compute_ap(arrGlobalConfidences, arrGlobalMatches, numGlobalCareGt) methodRecall = 0 if numGlobalCareGt == 0 else float(matchedSum)/numGlobalCareGt methodPrecision = 0 if numGlobalCareDet == 0 else float(matchedSum)/numGlobalCareDet methodHmean = 0 if methodRecall + methodPrecision==0 else 2* methodRecall * methodPrecision / (methodRecall + methodPrecision) methodMetrics = {'precision':methodPrecision, 'recall':methodRecall,'hmean': methodHmean, 'AP': AP } resDict = {'calculated':True,'Message':'','method': methodMetrics,'per_sample': perSampleMetrics} return resDict; if __name__=='__main__': ''' results_dir: result directory score_det: score of detection bounding box score_rec: score of the mask recognition branch score_rec_seq: score of the sequence recognition branch lexicon_type: 1 for generic; 2 for weak; 3 for strong ''' results_dir = '../../../output/mixtrain/inference/total_text_test/model_0250000_1000_results/' score_det = 0.05 score_rec = 0.5 use_lexicon = False score_rec_seq = 0.9 # use_lexicon = True # score_rec_seq = 0.8 evaluate_result_path = prepare_results_for_evaluation(results_dir, use_lexicon=use_lexicon, cache_dir='./cache_files', score_det=score_det, score_rec=score_rec, score_rec_seq=score_rec_seq) p = { 'g': "../gt.zip", 'o': "./cache_files", 's': evaluate_result_path } rrc_evaluation_funcs.main_evaluation(p,default_evaluation_params,validate_data,evaluate_method)
19,780
42.763274
322
py
MaskTextSpotterV3
MaskTextSpotterV3-master/evaluation/totaltext/e2e/prepare_results.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import sys import os import glob sys.path.append('./') import shapely from shapely.geometry import Polygon,MultiPoint import numpy as np import editdistance sys.path.append('../../') from weighted_editdistance import weighted_edit_distance from tqdm import tqdm try: import pickle except ImportError: import cPickle as pickle def list_from_str(st): line = st.split(';') segms = line[1].split(',') scores = line[2].split(',') new_line = [float(a) for a in segms]+[float(scores[-4])]+[scores[-5]]+[scores[-6]]+[float(scores[-3])]+[float(scores[-2])] + [scores[-1]] return new_line def polygon_from_list(line): """ Create a shapely polygon object from gt or dt line. """ polygon_points = np.array(line).reshape(-1, 2) polygon = Polygon(polygon_points).convex_hull return polygon def polygon_iou(list1, list2): """ Intersection over union between two shapely polygons. """ polygon_points1 = np.array(list1).reshape(-1, 2) poly1 = Polygon(polygon_points1).convex_hull polygon_points2 = np.array(list2).reshape(-1, 2) poly2 = Polygon(polygon_points2).convex_hull union_poly = np.concatenate((polygon_points1,polygon_points2)) if not poly1.intersects(poly2): # this test is fast and can accelerate calculation iou = 0 else: try: inter_area = poly1.intersection(poly2).area #union_area = poly1.area + poly2.area - inter_area union_area = MultiPoint(union_poly).convex_hull.area iou = float(inter_area) / (union_area+1e-6) except shapely.geos.TopologicalError: print('shapely.geos.TopologicalError occured, iou set to 0') iou = 0 return iou def nms(boxes,overlap): rec_scores = [b[-6] for b in boxes] indices = sorted(range(len(rec_scores)), key=lambda k: -rec_scores[k]) box_num = len(boxes) nms_flag = [True]*box_num for i in range(box_num): ii = indices[i] if not nms_flag[ii]: continue for j in range(box_num): jj = indices[j] if j == i: continue if not nms_flag[jj]: continue box1 = boxes[ii] box2 = boxes[jj] box1_score = rec_scores[ii] box2_score = rec_scores[jj] str1 = box1[9] str2 = box2[9] box_i = [box1[0],box1[1],box1[4],box1[5]] box_j = [box2[0],box2[1],box2[4],box2[5]] poly1 = polygon_from_list(box1[0:-6]) poly2 = polygon_from_list(box2[0:-6]) iou = polygon_iou(box1[0:-6],box2[0:-6]) thresh = overlap if iou > thresh: if box1_score > box2_score: nms_flag[jj] = False if box1_score == box2_score and poly1.area > poly2.area: nms_flag[jj] = False if box1_score == box2_score and poly1.area<=poly2.area: nms_flag[ii] = False break return nms_flag def packing(save_dir, cache_dir, pack_name): files = os.listdir(save_dir) if not os.path.exists(cache_dir): os.mkdir(cache_dir) os.system('zip -r -q -j '+os.path.join(cache_dir, pack_name+'.zip')+' '+save_dir+'/*') def test_single(results_dir,lexicon_type=3,cache_dir='./cache_dir',score_det=0.5,score_rec=0.5,score_rec_seq=0.5,overlap=0.2, use_lexicon=True, weighted_ed=True, use_seq=False, use_char=False, mix=False): ''' results_dir: result directory score_det: score of detection bounding box score_rec: score of the mask recognition branch socre_rec_seq: score of the sequence recognition branch overlap: overlap threshold used for nms lexicon_type: 1 for generic; 2 for weak; 3 for strong use_seq: use the recognition result of sequence branch use_mix: use both the recognition result of the mask and sequence branches, selected by score ''' print('score_det:', 'score_det:', score_det, 'score_rec:', score_rec, 'score_rec_seq:', score_rec_seq, 'overlap:', overlap,'lexicon_type:', lexicon_type, 'weighted_ed:', weighted_ed, 'use_seq:', use_seq, 'use_char:', use_char, 'mix:', mix) if not os.path.exists(cache_dir): os.mkdir(cache_dir) nms_dir = os.path.join(cache_dir,str(score_det)+'_'+str(score_rec)+'_'+str(score_rec_seq)) if not os.path.exists(nms_dir): os.mkdir(nms_dir) if use_lexicon and lexicon_type==2: # weak lexicon lexicon_path = '../../lexicons/totaltext/weak_voc_new.txt' lexicon_fid=open(lexicon_path, 'r') pair_list = open('../../lexicons/totaltext/weak_voc_pair_list.txt', 'r') pairs = dict() for line in pair_list.readlines(): line=line.strip() word = line.split(' ')[0].upper() word_gt = line[len(word)+1:] pairs[word] = word_gt lexicon_fid=open(lexicon_path, 'r') lexicon=[] for line in lexicon_fid.readlines(): line=line.strip() lexicon.append(line) for res_file in glob.glob("*.txt"): result_path = os.path.join(results_dir,res_file) if os.path.isfile(result_path): with open(result_path,'r') as f: dt_lines = [a.strip() for a in f.readlines()] dt_lines = [list_from_str(dt) for dt in dt_lines] else: dt_lines = [] dt_lines = [dt for dt in dt_lines if dt[-2]>score_rec_seq and dt[-3]>score_rec and dt[-6]>score_det] nms_flag = nms(dt_lines,overlap) boxes = [] for k in range(len(dt_lines)): dt = dt_lines[k] if nms_flag[k]: if dt not in boxes: boxes.append(dt) with open(os.path.join(nms_dir,'gt_'+res_file.split('.')[0].split('_')[1]+'.txt'),'w') as f: for g in boxes: gt_coors = [int(b) for b in g[0:-6]] with open('../../../' + g[-1], "rb") as input_file: dict_scores = pickle.load(input_file) if use_char and use_seq: if g[-2]>g[-3]: word = g[-5] scores = dict_scores['seq_char_scores'][:,1:-1].swapaxes(0,1) else: word = g[-4] scores = dict_scores['seg_char_scores'] elif use_seq: word = g[-5] scores = dict_scores['seq_char_scores'][:,1:-1].swapaxes(0,1) else: word = g[-4] scores = dict_scores['seg_char_scores'] if not use_lexicon: match_word = word match_dist = 0. else: match_word, match_dist = find_match_word(word, pairs, scores, use_lexicon, weighted_ed, lexicon) if match_dist<1.5 or lexicon_type==1: gt_coor_strs = [str(a) for a in gt_coors]+ [match_word] f.write(','.join(gt_coor_strs)+'\r\n') pack_name = str(score_det)+'_'+str(score_rec)+'_over'+str(overlap) packing(nms_dir,cache_dir,pack_name) submit_file_path = os.path.join(cache_dir, pack_name+'.zip') return submit_file_path def find_match_word(rec_str, pairs, scores_numpy, use_ed=True, weighted_ed=False, lexicon=None): if not use_ed: return rec_str rec_str = rec_str.upper() dist_min = 100 dist_min_pre = 100 match_word = '' match_dist = 100 if not weighted_ed: for word in lexicon: word = word.upper() ed = editdistance.eval(rec_str, word) length_dist = abs(len(word) - len(rec_str)) # dist = ed + length_dist dist = ed if dist<dist_min: dist_min = dist match_word = pairs[word] match_dist = dist return match_word, match_dist else: small_lexicon_dict = dict() for word in lexicon: word = word.upper() ed = editdistance.eval(rec_str, word) small_lexicon_dict[word] = ed dist = ed if dist<dist_min_pre: dist_min_pre = dist small_lexicon = [] for word in small_lexicon_dict: if small_lexicon_dict[word]<=dist_min_pre+2: small_lexicon.append(word) for word in small_lexicon: word = word.upper() ed = weighted_edit_distance(rec_str, word, scores_numpy) dist = ed if dist<dist_min: dist_min = dist match_word = pairs[word] match_dist = dist return match_word, match_dist def prepare_results_for_evaluation(results_dir, use_lexicon, cache_dir, score_det, score_rec, score_rec_seq): if not os.path.isdir(cache_dir): os.mkdir(cache_dir) result_path = test_single(results_dir,score_det=score_det,score_rec=score_rec,score_rec_seq=score_rec_seq,overlap=0.2,cache_dir=cache_dir,lexicon_type=2, use_lexicon=use_lexicon, weighted_ed=True, use_seq=True, use_char=True, mix=True) return result_path
9,314
38.807692
243
py
MaskTextSpotterV3
MaskTextSpotterV3-master/evaluation/totaltext/e2e/rrc_evaluation_funcs_total_text.py
#!/usr/bin/env python2 #encoding: UTF-8 import json import sys;sys.path.append('./') import zipfile import re import sys import os import codecs import importlib from io import StringIO def print_help(): sys.stdout.write('Usage: python %s.py -g=<gtFile> -s=<submFile> -o=<outputFolder> [-i=<gtImagesFile> -p=<jsonParams>]' %sys.argv[0]) sys.exit(2) def load_zip_file_keys(file,fileNameRegExp=''): """ Returns an array with the entries of the ZIP file that match with the regular expression. The key's are the names or the file or the capturing group definied in the fileNameRegExp """ try: archive=zipfile.ZipFile(file, mode='r', allowZip64=True) except : raise Exception('Error loading the ZIP archive.') pairs = [] for name in archive.namelist(): addFile = True keyName = name # if fileNameRegExp!="": # m = re.match(fileNameRegExp,name) # if m == None: # addFile = False # else: # if len(m.groups())>0: # keyName = m.group(1) if addFile: pairs.append( keyName ) return pairs def load_zip_file(file,fileNameRegExp='',allEntries=False): """ Returns an array with the contents (filtered by fileNameRegExp) of a ZIP file. The key's are the names or the file or the capturing group definied in the fileNameRegExp allEntries validates that all entries in the ZIP file pass the fileNameRegExp """ try: archive=zipfile.ZipFile(file, mode='r', allowZip64=True) except : raise Exception('Error loading the ZIP archive') pairs = [] for name in archive.namelist(): addFile = True keyName = name # if fileNameRegExp!="": # m = re.match(fileNameRegExp,name) # if m == None: # addFile = False # else: # if len(m.groups())>0: # keyName = m.group(1) if addFile: pairs.append( [ keyName , archive.read(name)] ) else: if allEntries: raise Exception('ZIP entry not valid: %s' %name) return dict(pairs) def decode_utf8(raw): """ Returns a Unicode object on success, or None on failure """ try: raw = codecs.decode(raw,'utf-8', 'replace') #extracts BOM if exists raw = raw.encode('utf8') if raw.startswith(codecs.BOM_UTF8): raw = raw.replace(codecs.BOM_UTF8, '', 1) return raw.decode('utf-8') except: return None def validate_lines_in_file(fileName,file_contents,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0): """ This function validates that all lines of the file calling the Line validation function for each line """ utf8File = decode_utf8(file_contents) if (utf8File is None) : raise Exception("The file %s is not UTF-8" %fileName) lines = utf8File.split( "\r\n" if CRLF else "\n" ) for line in lines: line = line.replace("\r","").replace("\n","") if(line != ""): try: validate_tl_line(line,LTRB,withTranscription,withConfidence,imWidth,imHeight) except Exception as e: raise Exception(("Line in sample not valid. Sample: %s Line: %s Error: %s" %(fileName,line,str(e))).encode('utf-8', 'replace')) def validate_tl_line(line,LTRB=True,withTranscription=True,withConfidence=True,imWidth=0,imHeight=0): """ Validate the format of the line. If the line is not valid an exception will be raised. If maxWidth and maxHeight are specified, all points must be inside the imgage bounds. Posible values are: LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription] LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription] """ get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight) def get_tl_line_values(line,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0): """ Validate the format of the line. If the line is not valid an exception will be raised. If maxWidth and maxHeight are specified, all points must be inside the imgage bounds. Posible values are: LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription] LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription] Returns values from a textline. Points , [Confidences], [Transcriptions] """ confidence = 0.0 transcription = ""; points = [] numPoints = 4; if LTRB: numPoints = 4; if withTranscription and withConfidence: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line) if m == None : m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line) raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence,transcription") elif withConfidence: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line) if m == None : raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence") elif withTranscription: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,(.*)$',line) if m == None : raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,transcription") else: m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,?\s*$',line) if m == None : raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax") xmin = int(m.group(1)) ymin = int(m.group(2)) xmax = int(m.group(3)) ymax = int(m.group(4)) if(xmax<xmin): raise Exception("Xmax value (%s) not valid (Xmax < Xmin)." %(xmax)) if(ymax<ymin): raise Exception("Ymax value (%s) not valid (Ymax < Ymin)." %(ymax)) points = [ float(m.group(i)) for i in range(1, (numPoints+1) ) ] if (imWidth>0 and imHeight>0): validate_point_inside_bounds(xmin,ymin,imWidth,imHeight); validate_point_inside_bounds(xmax,ymax,imWidth,imHeight); else: line_split = line.split(',') # print(line_split) numPoints = int((len(line_split) - 1) / 2) points = [ float(line_split[i]) for i in range(2 * numPoints) ] # print(points) transcription = line_split[-1] # numPoints = 8; # if withTranscription and withConfidence: # m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line) # if m == None : # raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence,transcription") # elif withConfidence: # m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line) # if m == None : # raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence") # elif withTranscription: # m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,(.*)$',line) # if m == None : # raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,transcription") # else: # m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*$',line) # if m == None : # raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4") # points = [ float(m.group(i)) for i in range(1, (numPoints+1) ) ] # validate_clockwise_points(points) # if (imWidth>0 and imHeight>0): # validate_point_inside_bounds(points[0],points[1],imWidth,imHeight); # validate_point_inside_bounds(points[2],points[3],imWidth,imHeight); # validate_point_inside_bounds(points[4],points[5],imWidth,imHeight); # validate_point_inside_bounds(points[6],points[7],imWidth,imHeight); # if withConfidence: # try: # confidence = float(m.group(numPoints+1)) # except ValueError: # raise Exception("Confidence value must be a float") # if withTranscription: # posTranscription = numPoints + (2 if withConfidence else 1) # transcription = m.group(posTranscription) # m2 = re.match(r'^\s*\"(.*)\"\s*$',transcription) # if m2 != None : #Transcription with double quotes, we extract the value and replace escaped characters # transcription = m2.group(1).replace("\\\\", "\\").replace("\\\"", "\"") return points,confidence,transcription def validate_point_inside_bounds(x,y,imWidth,imHeight): if(x<0 or x>imWidth): raise Exception("X value (%s) not valid. Image dimensions: (%s,%s)" %(xmin,imWidth,imHeight)) if(y<0 or y>imHeight): raise Exception("Y value (%s) not valid. Image dimensions: (%s,%s) Sample: %s Line:%s" %(ymin,imWidth,imHeight)) def validate_clockwise_points(points): """ Validates that the points that the 4 points that dlimite a polygon are in clockwise order. """ if len(points) != 8: raise Exception("Points list not valid." + str(len(points))) point = [ [int(points[0]) , int(points[1])], [int(points[2]) , int(points[3])], [int(points[4]) , int(points[5])], [int(points[6]) , int(points[7])] ] edge = [ ( point[1][0] - point[0][0])*( point[1][1] + point[0][1]), ( point[2][0] - point[1][0])*( point[2][1] + point[1][1]), ( point[3][0] - point[2][0])*( point[3][1] + point[2][1]), ( point[0][0] - point[3][0])*( point[0][1] + point[3][1]) ] summatory = edge[0] + edge[1] + edge[2] + edge[3]; if summatory>0: raise Exception("Points are not clockwise. The coordinates of bounding quadrilaterals have to be given in clockwise order. Regarding the correct interpretation of 'clockwise' remember that the image coordinate system used is the standard one, with the image origin at the upper left, the X axis extending to the right and Y axis extending downwards.") def get_tl_line_values_from_file_contents(content,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0,sort_by_confidences=True): """ Returns all points, confindences and transcriptions of a file in lists. Valid line formats: xmin,ymin,xmax,ymax,[confidence],[transcription] x1,y1,x2,y2,x3,y3,x4,y4,[confidence],[transcription] """ pointsList = [] transcriptionsList = [] confidencesList = [] lines = content.split( "\r\n" if CRLF else "\n" ) for line in lines: line = line.replace("\r","").replace("\n","") if(line != "") : points, confidence, transcription = get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight); pointsList.append(points) transcriptionsList.append(transcription) confidencesList.append(confidence) if withConfidence and len(confidencesList)>0 and sort_by_confidences: confidencesList, pointsList,transcriptionsList = (list(t) for t in zip(*sorted(zip(confidencesList, pointsList, transcriptionsList), reverse=True))) return pointsList,confidencesList,transcriptionsList def main_evaluation(p,default_evaluation_params_fn,validate_data_fn,evaluate_method_fn,show_result=True,per_sample=True): """ This process validates a method, evaluates it and if it succed generates a ZIP file with a JSON entry for each sample. Params: p: Dictionary of parmeters with the GT/submission locations. If None is passed, the parameters send by the system are used. default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation validate_data_fn: points to a method that validates the corrct format of the submission evaluate_method_fn: points to a function that evaluated the submission and return a Dictionary with the results """ if (p == None): p = dict([s[1:].split('=') for s in sys.argv[1:]]) if(len(sys.argv)<2): print_help() evalParams = default_evaluation_params_fn() if 'p' in list(p.keys()): evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p'][1:-1]) ) resDict={'calculated':True,'Message':'','method':'{}','per_sample':'{}'} try: validate_data_fn(p['g'], p['s'], evalParams) evalData = evaluate_method_fn(p['g'], p['s'], evalParams) resDict.update(evalData) except Exception as e: resDict['Message']= str(e) resDict['calculated']=False if not os.path.exists(p['o']): os.makedirs(p['o']) resultsOutputname = p['o'] + '/results.zip' outZip = zipfile.ZipFile(resultsOutputname, mode='w', allowZip64=True) del resDict['per_sample'] if 'output_items' in list(resDict.keys()): del resDict['output_items'] outZip.writestr('method.json',json.dumps(resDict)) if not resDict['calculated']: if show_result: sys.stderr.write('Error!\n'+ resDict['Message']+'\n\n') outZip.close() return resDict if per_sample == True: for k,v in evalData['per_sample'].items(): outZip.writestr( k + '.json',json.dumps(v)) if 'output_items' in list(evalData.keys()): for k, v in evalData['output_items'].items(): outZip.writestr( k,v) outZip.close() if show_result: sys.stdout.write("Calculated!") sys.stdout.write(json.dumps(resDict['method'])) return resDict def main_validation(default_evaluation_params_fn,validate_data_fn): """ This process validates a method Params: default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation validate_data_fn: points to a method that validates the corrct format of the submission """ try: p = dict([s[1:].split('=') for s in sys.argv[1:]]) evalParams = default_evaluation_params_fn() if 'p' in list(p.keys()): evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p'][1:-1]) ) validate_data_fn(p['g'], p['s'], evalParams) print('SUCCESS') sys.exit(0) except Exception as e: print(str(e)) sys.exit(101)
15,482
41.652893
359
py
HASOC-2021---Hate-Speech-Detection
HASOC-2021---Hate-Speech-Detection-main/main.py
import getopt import sys import tensorflow as tf import os import json import numpy as np import file_utils from datetime import datetime import matplotlib.pyplot as plt import h5py from bert.tokenization.bert_tokenization import FullTokenizer from bert import BertModelLayer from bert.loader import StockBertConfig, map_stock_config_to_params, load_stock_weights import bert from sklearn.metrics import recall_score, precision_score, f1_score, average_precision_score, accuracy_score import data_loader import models def prepare_predictions(ids, predictions, labels): prediction_output = [] binary_predictions = list() total_expected = {0: 0, 1: 0} true_positives = {0: 0, 1: 0} for i in range(0, len(labels)): predicted_probs = predictions[i] predicted_class = 1 if predicted_probs[1] >= 0.51 else 0 expected = int(labels[i]) binary_predictions.append(predicted_class) if expected == predicted_class: true_positives[expected] +=1 total_expected[expected] += 1 l = {"id": str(ids[i]), "prediction": str(predicted_class), "label": str(labels[i]), "probs": predicted_probs.tolist()} prediction_output.append(json.dumps(l)) recall_hate = (true_positives[1] / total_expected[1]) if total_expected[1] > 0 else 0 recall_not_hate = (true_positives[0] / total_expected[0]) if total_expected[0] > 0 else 0 binary_predictions = np.array(binary_predictions) average_precision = average_precision_score(binary_predictions, labels) f1 = f1_score(binary_predictions, labels, average='binary') f1_weighted = f1_score(binary_predictions, labels, average='weighted') macro_f1 = f1_score(binary_predictions, labels, average='macro') recall = recall_score(binary_predictions, labels, average='binary') precision = precision_score(binary_predictions, labels, average='binary') accuracy = accuracy_score(binary_predictions, labels) score_output = {"accuracy": accuracy, "average_precision":average_precision, "f1":f1, "weighted_f1":f1_weighted, "macro_f1":macro_f1, "recall":recall, "precision":precision, "HatefulOffensive": {"recall": recall_hate, "support": total_expected[1]}, "NOT": {"recall": recall_not_hate, "support": total_expected[0]} } return prediction_output, score_output def train(config): physical_devices = tf.config.experimental.list_physical_devices('GPU') os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' for i in range(len(physical_devices)): tf.config.experimental.set_memory_growth(physical_devices[i], True) # hate word list hate_words = file_utils.read_file_to_list(config['base_dir'] +'resources/hate_words.txt') # BERT related configurations print('Using BERT: {}'.format(config['bert_model_dir'])) bert_ckpt_dir = config['base_dir'] + config['bert_model_dir'] + "/" bert_check_point_file = bert_ckpt_dir + "bert_model.ckpt" bert_config_file = bert_ckpt_dir + "bert_config.json" bert_tokenizer = bert.tokenization.bert_tokenization.FullTokenizer(vocab_file=os.path.join(bert_ckpt_dir, "vocab.txt")) X_train, y_train, y_train_ids, X_valid, y_valid, y_valid_ids, X_test, y_test, y_test_ids = data_loader.load_dataset(config, bert_tokenizer, hate_words) print("Training input file shapes") for k in X_train: print('\t' + k + " shape: " + str(X_train[k].shape)) print("Validation input file shapes") for k in X_valid: print('\t' + k + " shape: " + str(X_valid[k].shape)) print("Test data size", len(y_test_ids)) # folders to save the trained models and results results_dir_path = config['base_dir'] +'results' now = datetime.now() model_dir_path = config['base_dir'] +'results/'+now.strftime("%d-%m-%Y %H:%M:%S").replace(" ", "_") file_utils.create_folder(results_dir_path) file_utils.create_folder(model_dir_path) model_check_point_callback = tf.keras.callbacks.ModelCheckpoint( model_dir_path + '/best_model-epoch-{epoch:03d}-acc-{acc:03f}-val_acc-{val_acc:03f}.h5', save_best_only=True, monitor=config['monitor']) early_stopping_callback = tf.keras.callbacks.EarlyStopping(patience=config["epoch_patience"], restore_best_weights=True, monitor=config['monitor']) tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=model_dir_path + "/logs") callbacks = [early_stopping_callback] print('Using GPUs: ' + str(tf.test.is_gpu_available())) # create the model model = models.get_model(config, bert_config_file, bert_check_point_file, adapter_size=None) history = model.fit(X_train, y_train, validation_data=(X_valid, y_valid), batch_size=config['batch_size'], shuffle=True, epochs=config['epochs'], callbacks=callbacks) predictions = model.predict(X_test, batch_size=config['batch_size']) test_predictions, test_score_output = prepare_predictions(y_test_ids, predictions, y_test['output_label']) print('Test macro-f1: ', test_score_output['macro_f1']) # save the model # model.save(model_dir_path + "/model.h5") # save prediction score, predictions file_utils.save_string_to_file(json.dumps(test_score_output), model_dir_path + '/test_prediction_score.json') file_utils.save_list_to_file(test_predictions, model_dir_path + '/test_predictions.jsonl') # save the training config file_utils.save_string_to_file(json.dumps(config), model_dir_path + '/training_config.json') N = len(history.epoch) plt.style.use("ggplot") plt.figure() plt.plot(np.arange(1, N + 1), history.history['loss'], label='loss') plt.plot(np.arange(1, N + 1), history.history['val_loss'], label='val_loss') plt.plot(np.arange(1, N + 1), history.history['acc'], label='acc') plt.plot(np.arange(1, N + 1), history.history['val_acc'], label='val_acc') plt.title("Validation, Test Loss and Accuracy on HASOC "+config["dataset_year"]+" Dataset, " + config['optimizer']) plt.xlabel("Epoch #") plt.ylabel("Loss/Accuracy") plt.legend(loc="lower left") plt.savefig(model_dir_path + "/history.png") plt.close() if __name__ == "__main__": argv = (sys.argv[1:]) config_path = 'config.json' try: opts, args = getopt.getopt(argv, "hc:o:") except getopt.GetoptError: print('main.py -c <config_path>') sys.exit(2) for opt, arg in opts: if opt == '-h': print('main.py -c <config_path>') sys.exit() elif opt == "-c": config_path = arg if config_path != '': with open(config_path) as json_file: config = json.load(json_file) train(config) else: print('main.py -c <config_path>')
7,085
37.934066
179
py
HASOC-2021---Hate-Speech-Detection
HASOC-2021---Hate-Speech-Detection-main/data_loader.py
import pandas as pd import numpy as np from bert.tokenization.bert_tokenization import FullTokenizer from ekphrasis.classes.preprocessor import TextPreProcessor from ekphrasis.classes.tokenizer import SocialTokenizer from ekphrasis.dicts.emoticons import emoticons from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences import emoji import re import random def replace_digits_emojis(s): s = s.lower().strip() s = emoji.demojize(s) s = re.sub(r'\d+', '', s) s = re.sub(r'[^\w\s]', '', s) s = s.strip() return s def remove_urls_mentions(text): text = re.sub(r"(?:\@|https?\://)\S+", "", text) text = text.replace("RT", "").strip() return text def replace_space(text): text = text.replace("\n", " ").strip() text = re.sub(r"\s+", ' ', text) text = text.strip() return text def merge_outputs(processed_text): text = "" for l in processed_text: if "</" in l: l = l.replace("</", "<") if l in ['<percent>', '<url>', '<', '<number>', '</allcaps>', '<money>', '<phone>', '<allcaps>', '<repeated>', '<hashtag>', '<date>', '<time>', '<censored>', '</hashtag>', '<email>']: continue elif l in ['<emphasis>', '<user>', '<surprise>', '<laugh>', '<sad>', '<annoyed>', '<happy>']: if l == '<user>': continue else: text += " " + l else: text += " " + replace_digits_emojis(l) normalized = replace_space(text) return normalized def normalize_text(input_text:str, text_preprocessor): processed_text = text_preprocessor.pre_process_doc(input_text) normalized_text = merge_outputs(processed_text) return normalized_text def sample_validation_set(X, y, ids): validation_sample_size = int((float(len(ids)) * 0.1)/2) X_train = {} y_train = {} y_train_ids = [] X_valid = {} y_valid = {} y_valid_ids = [] sampled_indexes = {0:[], 1:[]} index_counter = 0 for label in y['output_label']: if len(sampled_indexes[label]) < validation_sample_size: sampled_indexes[label].append(index_counter) index_counter+=1 for k in X: data = X[k] training_data = [] validation_data = [] index_counter = 0 for d in data: label = y['output_label'][index_counter] # add to validation split if index_counter in sampled_indexes[label]: validation_data.append(d) else: training_data.append(d) index_counter +=1 X_train[k] = np.array(training_data) X_valid[k] = np.array(validation_data) for k in y: data = y[k] training_data = [] validation_data = [] index_counter = 0 for d in data: label = y['output_label'][index_counter] # add to validation split if index_counter in sampled_indexes[label]: validation_data.append(d) else: training_data.append(d) index_counter +=1 y_train[k] = np.array(training_data) y_valid[k] = np.array(validation_data) index_counter = 0 for id in ids: label = y['output_label'][index_counter] # add to validation split if index_counter in sampled_indexes[label]: y_valid_ids.append(id) else: y_train_ids.append(id) index_counter += 1 return X_train, y_train, y_train_ids, X_valid, y_valid, y_valid_ids def apply_oversampling(ids, labels, text_docs): count = {'HOF':0, 'NOT':0} label_to_ids = {'HOF':[], 'NOT':[]} c = 0 for l in labels: count[l] +=1 id = ids[c] label_to_ids[l].append(id) c+=1 oversampled_ids, oversampled_labels, oversampled_text_docs = [], [], [] if count['HOF'] > count['NOT']: max_label = 'HOF' min_label = 'NOT' else: max_label = 'NOT' min_label = 'HOF' label_diff = count[max_label] - count[min_label] random_ids = random.sample(label_to_ids[min_label], label_diff) for r in random_ids: id_index = ids.index(r) oversampled_ids.append(ids[id_index]) oversampled_labels.append(labels[id_index]) oversampled_text_docs.append(text_docs[id_index]) # add the existing data oversampled_ids.extend(ids) oversampled_text_docs.extend(text_docs) oversampled_labels.extend(labels) return oversampled_ids, oversampled_labels, oversampled_text_docs def tokenize(text): tags = ['<emphasis>', '<user>', '<surprise>', '<percent>', '<url>', '<', '<number>', '</allcaps>', '<money>', '<phone>', '<allcaps>', '<repeated>', '<laugh>', '<hashtag>', '<elongated>', '<sad>', '<annoyed>', '<date>', '<time>', '<censored>', '<happy>', '</hashtag>', '<email>'] tokens = text.split(' ') filtered_tokens = [] for t in tokens: if t not in tags: filtered_tokens.append(t) return filtered_tokens def pad_text(max_seq_len, token_ids): token_ids = token_ids[:min(len(token_ids), max_seq_len - 2)] token_ids = token_ids + [0] * (max_seq_len - len(token_ids)) return np.array(token_ids) def embed_text_with_hate_words(config, data: list, hate_words: list): x = list() for text in data: # tokenize tokens = text.split(' ') multihot_encoding_array = np.zeros(len(hate_words), dtype=int) for t in tokens: if t in hate_words: index = hate_words.index(t) multihot_encoding_array[index] = 1 x.append(multihot_encoding_array) return np.array(x) def embed_text_with_bert(config: dict, data: list, bert_tokenizer: FullTokenizer): x = list() for text in data: # tokenize tokens = bert_tokenizer.tokenize(text) tokens = ["[CLS]"] + tokens + ["[SEP]"] # convert tokens into IDs by embedding the text with BERT token_ids = bert_tokenizer.convert_tokens_to_ids(tokens) # pad zeros to the token ids, if necessary max_seq_len = config['tweet_text_seq_len'] token_ids = pad_text(max_seq_len, token_ids) x.append(token_ids) return np.array(x) def embed_text_with_characters(config: dict, data: list): char_tokenizer = Tokenizer(lower=True, char_level=True, oov_token="UNKNOWN") alphabet = " abcdefghijklmnopqrstuvwxyz" char_dict = {"PADDING": 0, "UNKNOWN": 1} for i, char in enumerate(alphabet): char_dict[char] = len(char_dict) char_tokenizer.word_index = char_dict x = char_tokenizer.texts_to_sequences(data) x_padded = pad_sequences(x, padding='post', maxlen=config['tweet_text_char_len']) return x_padded def normalize_text_docs(text_docs:list, text_preprocessor): normalized_text_docs = [] for text in text_docs: normalized_text = normalize_text(text, text_preprocessor) normalized_text_docs.append(normalized_text) return normalized_text_docs def encode_labels(data: list): y = list() label_to_index = {"HOF": 1, "NOT": 0} for label in data: y.append(label_to_index[label]) return np.array(y) def load_split(config, df, bert_tokenizer, hate_words, text_preprocessor, oversample:bool): X, y = {}, {} ids = df["id"].tolist() labels = df["label"].tolist() text_docs = df["text"].tolist() if oversample: ids, labels, text_docs = apply_oversampling(ids, labels, text_docs) if config['normalize_text']: text_docs = normalize_text_docs(text_docs, text_preprocessor) if "bert" in config["text_models"]: X["text_bert"] = embed_text_with_bert(config, text_docs, bert_tokenizer) if "hate_words" in config["text_models"]: X["text_hate_words"] = embed_text_with_hate_words(config, text_docs, hate_words) if "char_emb" in config["text_models"]: X["text_char_emb"] = embed_text_with_characters(config, text_docs) y['output_label'] = encode_labels(labels) return X, y, ids def load_dataset(config, bert_tokenizer, hate_words): train_df = pd.read_csv(config['base_dir'] + 'resources/hasoc_data/'+config['dataset_year']+'/train.tsv', sep='\t', header=0) test_df = pd.read_csv(config['base_dir'] + 'resources/hasoc_data/'+config['dataset_year']+'/test.tsv', sep='\t', header=0) # load the Ekphrasis preprocessor text_preprocessor = TextPreProcessor( # terms that will be normalized normalize=['url', 'email', 'percent', 'money', 'phone', 'user', 'time', 'url', 'date', 'number'], # terms that will be annotated annotate={"hashtag", "allcaps", "elongated", "repeated", 'emphasis', 'censored'}, fix_html=True, # fix HTML tokens # corpus from which the word statistics are going to be used # for word segmentation segmenter="twitter", # corpus from which the word statistics are going to be used # for spell correction corrector="twitter", unpack_hashtags=True, # perform word segmentation on hashtags unpack_contractions=True, # Unpack contractions (can't -> can not) spell_correct_elong=False, # spell correction for elongated words # select a tokenizer. You can use SocialTokenizer, or pass your own # the tokenizer, should take as input a string and return a list of tokens tokenizer=SocialTokenizer(lowercase=True).tokenize, # list of dictionaries, for replacing tokens extracted from the text, # with other expressions. You can pass more than one dictionaries. dicts=[emoticons] ) X_train, y_train, y_train_ids = load_split(config, train_df, bert_tokenizer, hate_words, text_preprocessor, oversample=config['oversample']) X_test, y_test, y_test_ids = load_split(config, test_df, bert_tokenizer, hate_words, text_preprocessor, oversample=False) X_train, y_train, y_train_ids, X_valid, y_valid, y_valid_ids = sample_validation_set(X_train, y_train, y_train_ids) return X_train, y_train, y_train_ids, X_valid, y_valid, y_valid_ids, X_test, y_test, y_test_ids
10,369
31.507837
144
py
HASOC-2021---Hate-Speech-Detection
HASOC-2021---Hate-Speech-Detection-main/file_utils.py
import os import json def save_list_to_file(input_list: list, file_path): file = open(file_path, 'w') file.write("\n".join(str(item) for item in input_list)) file.close() def save_string_to_file(text, file_path): file = open(file_path, 'w') file.write(text) file.close() def read_file_to_set(file_path): content = set() if os.path.isfile(file_path): with open(file_path, "r") as file: for l in file.readlines(): content.add(l.strip()) return content def read_file_to_list(file_path): content = list() if os.path.isfile(file_path): with open(file_path, "r") as file: for l in file.readlines(): content.append(l.strip().replace("\n", "")) return content def read_json_file(file_path): with open(file_path) as json_file: data = json.load(json_file) return data def path_exists(dir_path): return os.path.exists(dir_path) def create_folder(dir_path): if not path_exists(dir_path): os.mkdir(dir_path) pass
1,070
23.906977
59
py
HASOC-2021---Hate-Speech-Detection
HASOC-2021---Hate-Speech-Detection-main/models.py
import tensorflow as tf import numpy as np from bert import BertModelLayer from bert.loader import StockBertConfig, map_stock_config_to_params, load_stock_weights from tensorflow import keras from tensorflow.keras import layers class MultiHeadSelfAttention(layers.Layer): def __init__(self, embed_dim, num_heads=8): super(MultiHeadSelfAttention, self).__init__() self.embed_dim = embed_dim self.num_heads = num_heads if embed_dim % num_heads != 0: raise ValueError( f"embedding dimension = {embed_dim} should be divisible by number of heads = {num_heads}" ) self.projection_dim = embed_dim // num_heads self.query_dense = layers.Dense(embed_dim) self.key_dense = layers.Dense(embed_dim) self.value_dense = layers.Dense(embed_dim) self.combine_heads = layers.Dense(embed_dim) def attention(self, query, key, value): score = tf.matmul(query, key, transpose_b=True) dim_key = tf.cast(tf.shape(key)[-1], tf.float32) scaled_score = score / tf.math.sqrt(dim_key) weights = tf.nn.softmax(scaled_score, axis=-1) output = tf.matmul(weights, value) return output, weights def separate_heads(self, x, batch_size): x = tf.reshape(x, (batch_size, -1, self.num_heads, self.projection_dim)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, inputs): # x.shape = [batch_size, seq_len, embedding_dim] batch_size = tf.shape(inputs)[0] query = self.query_dense(inputs) # (batch_size, seq_len, embed_dim) key = self.key_dense(inputs) # (batch_size, seq_len, embed_dim) value = self.value_dense(inputs) # (batch_size, seq_len, embed_dim) query = self.separate_heads( query, batch_size ) # (batch_size, num_heads, seq_len, projection_dim) key = self.separate_heads( key, batch_size ) # (batch_size, num_heads, seq_len, projection_dim) value = self.separate_heads( value, batch_size ) # (batch_size, num_heads, seq_len, projection_dim) attention, weights = self.attention(query, key, value) attention = tf.transpose( attention, perm=[0, 2, 1, 3] ) # (batch_size, seq_len, num_heads, projection_dim) concat_attention = tf.reshape( attention, (batch_size, -1, self.embed_dim) ) # (batch_size, seq_len, embed_dim) output = self.combine_heads( concat_attention ) # (batch_size, seq_len, embed_dim) return output class TransformerBlock(layers.Layer): def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1): super(TransformerBlock, self).__init__() self.att = MultiHeadSelfAttention(embed_dim, num_heads) self.ffn = keras.Sequential( [layers.Dense(ff_dim, activation="relu"), layers.Dense(embed_dim),] ) self.layernorm1 = layers.LayerNormalization(epsilon=1e-6) self.layernorm2 = layers.LayerNormalization(epsilon=1e-6) self.dropout1 = layers.Dropout(rate) self.dropout2 = layers.Dropout(rate) def call(self, inputs, training): attn_output = self.att(inputs) attn_output = self.dropout1(attn_output, training=training) out1 = self.layernorm1(inputs + attn_output) ffn_output = self.ffn(out1) ffn_output = self.dropout2(ffn_output, training=training) return self.layernorm2(out1 + ffn_output) class BahdanauAttention(tf.keras.layers.Layer): def __init__(self, units): super(BahdanauAttention, self).__init__() self.W1 = tf.keras.layers.Dense(units) self.W2 = tf.keras.layers.Dense(units) self.V = tf.keras.layers.Dense(1) def get_config(self): config = super().get_config().copy() config.update({ 'W1': self.W1, 'W2': self.W2, 'V': self.V }) return config def call(self, values, query): # query hidden state shape == (batch_size, hidden size) # query_with_time_axis shape == (batch_size, 1, hidden size) # values shape == (batch_size, max_len, hidden size) # we are doing this to broadcast addition along the time axis to calculate the score query_with_time_axis = tf.expand_dims(query, 1) # score shape == (batch_size, max_length, 1) # we get 1 at the last axis because we are applying score to self.V # the shape of the tensor before applying self.V is (batch_size, max_length, units) score = self.V(tf.nn.tanh( self.W1(query_with_time_axis) + self.W2(values))) # attention_weights shape == (batch_size, max_length, 1) attention_weights = tf.nn.softmax(score, axis=1) # context_vector shape after sum == (batch_size, hidden_size) context_vector = attention_weights * values context_vector = tf.reduce_sum(context_vector, axis=1) return context_vector, attention_weights def flatten_layers(root_layer): if isinstance(root_layer, tf.keras.layers.Layer): yield root_layer for layer in root_layer._layers: for sub_layer in flatten_layers(layer): yield sub_layer def freeze_all_bert_layers(l_bert): """ Freezes all but LayerNorm and adapter layers - see arXiv:1902.00751. """ l_bert.trainable = False l_bert.encoders_layer.trainable = False for layer in l_bert.submodules: if layer.name in ["LayerNorm", "adapter-down", "adapter-up"]: layer.trainable = True else: layer.trainable = False def freeze_bert_layers(l_bert): """ Freezes all but LayerNorm and adapter layers - see arXiv:1902.00751. """ for layer in flatten_layers(l_bert): if layer.name in ["LayerNorm", "adapter-down", "adapter-up"]: layer.trainable = True # elif len(layer._layers) == 0: # layer.trainable = False else: layer.trainable = False l_bert.embeddings_layer.trainable = False def encode_gru_with_attention(config, input): if config["text_use_attention"]: gru_forward = tf.keras.layers.GRU(config["rnn_layer_size"], return_sequences=True, return_state=True, activation='relu') attention_layer = BahdanauAttention(config["text_attention_size"]) # apply forward GRU, attention forward_seq, forward_hidden_state = gru_forward(input) forward_attention_result, forward_attention_weights = attention_layer(forward_seq, forward_hidden_state) # concatenate attention results text_encoding = forward_attention_result else: gru_forward = tf.keras.layers.GRU(config["rnn_layer_size"], activation='relu') # apply forward GRU, attention text_encoding = gru_forward(input) return text_encoding def encode_lstm_with_attention(config, input): if config["text_use_attention"]: lstm_forward = tf.keras.layers.LSTM(config["rnn_layer_size"], return_sequences=True, return_state=True, activation='tanh') attention_layer = BahdanauAttention(config["text_attention_size"]) # apply forward GRU, attention forward_seq, forward_hidden_state, forward_cell_state = lstm_forward(input) forward_attention_result, forward_attention_weights = attention_layer(forward_seq, forward_hidden_state) # concatenate attention results text_encoding = forward_attention_result else: lstm_forward = tf.keras.layers.LSTM(config["rnn_layer_size"]) # apply forward GRU, attention text_encoding = lstm_forward(input) return text_encoding def encode_bigru_with_attention(config, input): if config["text_use_attention"]: gru_forward = tf.keras.layers.GRU(config["rnn_layer_size"], return_sequences=True, return_state=True, activation='tanh') gru_backward = tf.keras.layers.GRU(config["rnn_layer_size"], go_backwards=True, return_sequences=True, return_state=True, activation='tanh') attention_layer = BahdanauAttention(config["text_attention_size"]) # apply forward GRU, attention forward_seq, forward_hidden_state = gru_forward(input) forward_attention_result, forward_attention_weights = attention_layer(forward_seq, forward_hidden_state) # apply backward GRU, attention backward_seq, backward_hidden_state = gru_backward(input) backward_attention_result, backward_attention_weights = attention_layer(backward_seq, backward_hidden_state) # concatenate attention results text_encoding = tf.keras.layers.concatenate([backward_attention_result, forward_attention_result]) else: gru_forward = tf.keras.layers.GRU(config["rnn_layer_size"], activation='tanh') gru_backward = tf.keras.layers.GRU(config["rnn_layer_size"], go_backwards=True, activation='tanh') # apply forward GRU, attention forward_hidden_state = gru_forward(input) backward_hidden_state = gru_backward(input) # concatenate attention results text_encoding = tf.keras.layers.concatenate([backward_hidden_state, forward_hidden_state]) return text_encoding def encode_text_with_bert(config, input_layer, bert): bert_output = bert(input_layer) if config["rnn_type"] == 'gru': text_encoding = encode_gru_with_attention(config, bert_output) elif config["rnn_type"] == 'lstm': text_encoding = encode_lstm_with_attention(config, bert_output) elif config["rnn_type"] == 'bi-gru': text_encoding = encode_bigru_with_attention(config, bert_output) else: text_encoding = tf.keras.layers.Convolution1D(filters=100, kernel_size=5, padding='same', activation='tanh')( bert_output) text_encoding = tf.keras.layers.Convolution1D(filters=80, kernel_size=5, padding='same', activation='tanh')( text_encoding) text_encoding = tf.keras.layers.Convolution1D(filters=50, kernel_size=5, padding='same', activation='tanh')( text_encoding) text_encoding = tf.keras.layers.AvgPool1D()(text_encoding) text_encoding = tf.keras.layers.Flatten()(text_encoding) return text_encoding def encode_text_with_hateword_list(config, input_layer): hate_words_encoding = tf.keras.layers.Dense(1493, name="hatewords_norm_layer_1")(input_layer) hate_words_encoding = tf.keras.layers.BatchNormalization()(hate_words_encoding) hate_words_encoding = tf.keras.layers.Activation("relu")(hate_words_encoding) hate_words_encoding2 = tf.keras.layers.Dense(512, name="hatewords_norm_layer_2")(hate_words_encoding) hate_words_encoding2 = tf.keras.layers.BatchNormalization()(hate_words_encoding2) hate_words_encoding2 = tf.keras.layers.Activation("relu")(hate_words_encoding2) return hate_words_encoding2 def encode_text_with_char_embeddings(config, input_layer): char_embedding_layer = tf.keras.layers.Embedding(input_dim=config["char_size"], trainable=True, output_dim=50, embeddings_initializer='uniform', name="char_embs") char_emb_out = char_embedding_layer(input_layer) if config["text_encoder"] == 'gru': text_encoding = encode_gru_with_attention(config, char_emb_out) elif config["text_encoder"] == 'lstm': text_encoding = encode_lstm_with_attention(config, char_emb_out) elif config["text_encoder"] == 'bi-gru': text_encoding = encode_bigru_with_attention(config, char_emb_out) else: text_encoding = tf.keras.layers.Convolution1D(filters=40, kernel_size=5, padding='same', activation='relu')( char_emb_out) text_encoding = tf.keras.layers.Convolution1D(filters=20, kernel_size=5, padding='same', activation='relu')( text_encoding) text_encoding = tf.keras.layers.Convolution1D(filters=10, kernel_size=5, padding='same', activation='relu')( text_encoding) text_encoding = tf.keras.layers.AvgPool1D()(text_encoding) text_encoding = tf.keras.layers.Flatten()(text_encoding) return text_encoding def get_fusion_layer_sizes(individual_layer_size, num_modalitiies): layer_sizes = [] first_layer_size = individual_layer_size * num_modalitiies layer_sizes.append(first_layer_size) return layer_sizes def encode_inputs(config, bert_config_file, bert_check_point_file, adapter_size=None): """Creates a classification model.""" inputs = [] modality_outputs = [] image_models = [] has_bert_modality = False if "bert" in config["text_models"]: has_bert_modality = True if has_bert_modality: with tf.io.gfile.GFile(bert_config_file, "r") as reader: bc = StockBertConfig.from_json_string(reader.read()) bert_params = map_stock_config_to_params(bc) bert_params.adapter_size = adapter_size bert = BertModelLayer.from_params(bert_params, name="bert") else: bert = None if "bert" in config["text_models"]: tweet_text_bert_input = tf.keras.layers.Input(shape=(config['tweet_text_seq_len'],), dtype='int32', name="text_bert") inputs.append(tweet_text_bert_input) # encode with BERT tweet_text_encoding = encode_text_with_bert(config, tweet_text_bert_input, bert) modality_outputs.append(tweet_text_encoding) if "hate_words" in config["text_models"]: tweet_text_hate_words_input = tf.keras.layers.Input(shape=(1493,), dtype='int32', name="text_hate_words") inputs.append(tweet_text_hate_words_input) # encode hatewords tweet_text_hate_words_encoding = encode_text_with_hateword_list(config, tweet_text_hate_words_input) modality_outputs.append(tweet_text_hate_words_encoding) if "char_emb" in config["text_models"]: tweet_text_char_input = tf.keras.layers.Input(shape=(config['tweet_text_char_len'],), dtype='int32', name="text_char_emb") inputs.append(tweet_text_char_input) # encode with char embeddings tweet_text_char_encoding = encode_text_with_char_embeddings(config, tweet_text_char_input) modality_outputs.append(tweet_text_char_encoding) return inputs, modality_outputs, has_bert_modality, bert def get_model(config, bert_config_file, bert_check_point_file, adapter_size=None): """Creates a classification model.""" inputs, modality_outputs, has_bert_modality, bert = encode_inputs(config, bert_config_file, bert_check_point_file, adapter_size) outputs = [] if len(modality_outputs) > 1: concat_embedding = tf.keras.layers.concatenate(modality_outputs) else: concat_embedding = modality_outputs[0] fusion_layer_output = concat_embedding # fusion_layer_size = len(modality_outputs) * config['feature_normalization_layer_size'] fusion_layer_size = fusion_layer_output.shape[1] counter = 1 while fusion_layer_size > config['min_feature_normalization_layer_size']: fusion_layer_output = tf.keras.layers.Dense(fusion_layer_size, name="fusion_layer_"+str(counter))(fusion_layer_output) batch_norm_layer_output = tf.keras.layers.BatchNormalization(name="batch_norm_layer_"+str(counter))(fusion_layer_output) activation_layer_output = tf.keras.layers.Activation("relu", name="relu_layer_"+str(counter))(batch_norm_layer_output) if counter == 1: adapted_layer_size = np.power(2, int(np.log2(fusion_layer_size))) if adapted_layer_size == fusion_layer_size: fusion_layer_size /= 2 else: fusion_layer_size = adapted_layer_size else: # decrease by half fusion_layer_size /= 2 counter+=1 fusion_layer_output = activation_layer_output last_layer_output = tf.keras.layers.Dense(units=2, activation="softmax", name='output_label')(fusion_layer_output) outputs.append(last_layer_output) model = tf.keras.Model(inputs=inputs, outputs=outputs) # load the pre-trained model weights, if BERT is used as a modality if has_bert_modality: load_stock_weights(bert, bert_check_point_file) # freeze weights if adapter-BERT is used if adapter_size is not None: freeze_bert_layers(bert) else: freeze_all_bert_layers(bert) if config["optimizer"] == "sgd": optimizer = tf.keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9) elif config["optimizer"] == "rmsprop": optimizer = tf.keras.optimizers.RMSProp() elif config["optimizer"] == "adagrad": optimizer = tf.keras.optimizers.Adagrad() else: optimizer = tf.keras.optimizers.Adam(0.0001) # Enable Mixed Precision for faster computation, less memory # optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(optimizer) model.compile(optimizer=optimizer, loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")]) model.summary() return model
17,504
41.799511
166
py
steer
steer-master/ffjord/train_vae_flow.py
# !/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import argparse import time import torch import torch.utils.data import torch.optim as optim import numpy as np import math import random import os import datetime import lib.utils as utils import lib.layers.odefunc as odefunc import vae_lib.models.VAE as VAE import vae_lib.models.CNFVAE as CNFVAE from vae_lib.optimization.training import train, evaluate from vae_lib.utils.load_data import load_dataset from vae_lib.utils.plotting import plot_training_curve SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams', 'fixed_adams'] parser = argparse.ArgumentParser(description='PyTorch Sylvester Normalizing flows') parser.add_argument( '-d', '--dataset', type=str, default='mnist', choices=['mnist', 'freyfaces', 'omniglot', 'caltech'], metavar='DATASET', help='Dataset choice.' ) parser.add_argument( '-freys', '--freyseed', type=int, default=123, metavar='FREYSEED', help="""Seed for shuffling frey face dataset for test split. Ignored for other datasets. Results in paper are produced with seeds 123, 321, 231""" ) parser.add_argument('-nc', '--no_cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--manual_seed', type=int, help='manual seed, if not given resorts to random seed.') parser.add_argument( '-li', '--log_interval', type=int, default=10, metavar='LOG_INTERVAL', help='how many batches to wait before logging training status' ) parser.add_argument( '-od', '--out_dir', type=str, default='snapshots', metavar='OUT_DIR', help='output directory for model snapshots etc.' ) # optimization settings parser.add_argument( '-e', '--epochs', type=int, default=2000, metavar='EPOCHS', help='number of epochs to train (default: 2000)' ) parser.add_argument( '-es', '--early_stopping_epochs', type=int, default=35, metavar='EARLY_STOPPING', help='number of early stopping epochs' ) parser.add_argument( '-bs', '--batch_size', type=int, default=100, metavar='BATCH_SIZE', help='input batch size for training' ) parser.add_argument('-lr', '--learning_rate', type=float, default=0.0005, metavar='LEARNING_RATE', help='learning rate') parser.add_argument( '-w', '--warmup', type=int, default=100, metavar='N', help='number of epochs for warm-up. Set to 0 to turn warmup off.' ) parser.add_argument('--max_beta', type=float, default=1., metavar='MB', help='max beta for warm-up') parser.add_argument('--min_beta', type=float, default=0.0, metavar='MB', help='min beta for warm-up') parser.add_argument( '-f', '--flow', type=str, default='no_flow', choices=[ 'planar', 'iaf', 'householder', 'orthogonal', 'triangular', 'cnf', 'cnf_bias', 'cnf_hyper', 'cnf_rank', 'cnf_lyper', 'no_flow' ], help="""Type of flows to use, no flows can also be selected""" ) parser.add_argument('-r', '--rank', type=int, default=1) parser.add_argument( '-nf', '--num_flows', type=int, default=4, metavar='NUM_FLOWS', help='Number of flow layers, ignored in absence of flows' ) parser.add_argument( '-nv', '--num_ortho_vecs', type=int, default=8, metavar='NUM_ORTHO_VECS', help=""" For orthogonal flow: How orthogonal vectors per flow do you need. Ignored for other flow types.""" ) parser.add_argument( '-nh', '--num_householder', type=int, default=8, metavar='NUM_HOUSEHOLDERS', help=""" For Householder Sylvester flow: Number of Householder matrices per flow. Ignored for other flow types.""" ) parser.add_argument( '-mhs', '--made_h_size', type=int, default=320, metavar='MADEHSIZE', help='Width of mades for iaf. Ignored for all other flows.' ) parser.add_argument('--z_size', type=int, default=64, metavar='ZSIZE', help='how many stochastic hidden units') # gpu/cpu parser.add_argument('--gpu_num', type=int, default=0, metavar='GPU', help='choose GPU to run on.') # CNF settings parser.add_argument( "--layer_type", type=str, default="concat", choices=["ignore", "concat", "concat_v2", "squash", "concatsquash", "concatcoord", "hyper", "blend"] ) parser.add_argument('--dims', type=str, default='512-512') parser.add_argument("--num_blocks", type=int, default=1, help='Number of stacked CNFs.') parser.add_argument('--time_length', type=float, default=0.5) parser.add_argument('--train_T', type=eval, default=False) parser.add_argument("--divergence_fn", type=str, default="approximate", choices=["brute_force", "approximate"]) parser.add_argument("--nonlinearity", type=str, default="softplus", choices=odefunc.NONLINEARITIES) parser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS) parser.add_argument('--atol', type=float, default=1e-5) parser.add_argument('--rtol', type=float, default=1e-5) parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.") parser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None]) parser.add_argument('--test_atol', type=float, default=None) parser.add_argument('--test_rtol', type=float, default=None) parser.add_argument('--residual', type=eval, default=False, choices=[True, False]) parser.add_argument('--rademacher', type=eval, default=False, choices=[True, False]) parser.add_argument('--batch_norm', type=eval, default=False, choices=[True, False]) parser.add_argument('--bn_lag', type=float, default=0) # evaluation parser.add_argument('--evaluate', type=eval, default=False, choices=[True, False]) parser.add_argument('--model_path', type=str, default='') parser.add_argument('--retrain_encoder', type=eval, default=False, choices=[True, False]) args = parser.parse_args() args.cuda = not args.no_cuda and torch.cuda.is_available() if args.manual_seed is None: args.manual_seed = random.randint(1, 100000) random.seed(args.manual_seed) torch.manual_seed(args.manual_seed) np.random.seed(args.manual_seed) if args.cuda: # gpu device number torch.cuda.set_device(args.gpu_num) kwargs = {'num_workers': 0, 'pin_memory': True} if args.cuda else {} def run(args, kwargs): # ================================================================================================================== # SNAPSHOTS # ================================================================================================================== args.model_signature = str(datetime.datetime.now())[0:19].replace(' ', '_') args.model_signature = args.model_signature.replace(':', '_') snapshots_path = os.path.join(args.out_dir, 'vae_' + args.dataset + '_') snap_dir = snapshots_path + args.flow if args.flow != 'no_flow': snap_dir += '_' + 'num_flows_' + str(args.num_flows) if args.flow == 'orthogonal': snap_dir = snap_dir + '_num_vectors_' + str(args.num_ortho_vecs) elif args.flow == 'orthogonalH': snap_dir = snap_dir + '_num_householder_' + str(args.num_householder) elif args.flow == 'iaf': snap_dir = snap_dir + '_madehsize_' + str(args.made_h_size) elif args.flow == 'permutation': snap_dir = snap_dir + '_' + 'kernelsize_' + str(args.kernel_size) elif args.flow == 'mixed': snap_dir = snap_dir + '_' + 'num_householder_' + str(args.num_householder) elif args.flow == 'cnf_rank': snap_dir = snap_dir + '_rank_' + str(args.rank) + '_' + args.dims + '_num_blocks_' + str(args.num_blocks) elif 'cnf' in args.flow: snap_dir = snap_dir + '_' + args.dims + '_num_blocks_' + str(args.num_blocks) if args.retrain_encoder: snap_dir = snap_dir + '_retrain-encoder_' elif args.evaluate: snap_dir = snap_dir + '_evaluate_' snap_dir = snap_dir + '__' + args.model_signature + '/' args.snap_dir = snap_dir if not os.path.exists(snap_dir): os.makedirs(snap_dir) # logger utils.makedirs(args.snap_dir) logger = utils.get_logger(logpath=os.path.join(args.snap_dir, 'logs'), filepath=os.path.abspath(__file__)) logger.info(args) # SAVING torch.save(args, snap_dir + args.flow + '.config') # ================================================================================================================== # LOAD DATA # ================================================================================================================== train_loader, val_loader, test_loader, args = load_dataset(args, **kwargs) if not args.evaluate: # ============================================================================================================== # SELECT MODEL # ============================================================================================================== # flow parameters and architecture choice are passed on to model through args if args.flow == 'no_flow': model = VAE.VAE(args) elif args.flow == 'planar': model = VAE.PlanarVAE(args) elif args.flow == 'iaf': model = VAE.IAFVAE(args) elif args.flow == 'orthogonal': model = VAE.OrthogonalSylvesterVAE(args) elif args.flow == 'householder': model = VAE.HouseholderSylvesterVAE(args) elif args.flow == 'triangular': model = VAE.TriangularSylvesterVAE(args) elif args.flow == 'cnf': model = CNFVAE.CNFVAE(args) elif args.flow == 'cnf_bias': model = CNFVAE.AmortizedBiasCNFVAE(args) elif args.flow == 'cnf_hyper': model = CNFVAE.HypernetCNFVAE(args) elif args.flow == 'cnf_lyper': model = CNFVAE.LypernetCNFVAE(args) elif args.flow == 'cnf_rank': model = CNFVAE.AmortizedLowRankCNFVAE(args) else: raise ValueError('Invalid flow choice') if args.retrain_encoder: logger.info(f"Initializing decoder from {args.model_path}") dec_model = torch.load(args.model_path) dec_sd = {} for k, v in dec_model.state_dict().items(): if 'p_x' in k: dec_sd[k] = v model.load_state_dict(dec_sd, strict=False) if args.cuda: logger.info("Model on GPU") model.cuda() logger.info(model) if args.retrain_encoder: parameters = [] logger.info('Optimizing over:') for name, param in model.named_parameters(): if 'p_x' not in name: logger.info(name) parameters.append(param) else: parameters = model.parameters() optimizer = optim.Adamax(parameters, lr=args.learning_rate, eps=1.e-7) # ================================================================================================================== # TRAINING # ================================================================================================================== train_loss = [] val_loss = [] # for early stopping best_loss = np.inf best_bpd = np.inf e = 0 epoch = 0 train_times = [] for epoch in range(1, args.epochs + 1): t_start = time.time() tr_loss = train(epoch, train_loader, model, optimizer, args, logger) train_loss.append(tr_loss) train_times.append(time.time() - t_start) logger.info('One training epoch took %.2f seconds' % (time.time() - t_start)) v_loss, v_bpd = evaluate(val_loader, model, args, logger, epoch=epoch) val_loss.append(v_loss) # early-stopping if v_loss < best_loss: e = 0 best_loss = v_loss if args.input_type != 'binary': best_bpd = v_bpd logger.info('->model saved<-') torch.save(model, snap_dir + args.flow + '.model') # torch.save(model, snap_dir + args.flow + '_' + args.architecture + '.model') elif (args.early_stopping_epochs > 0) and (epoch >= args.warmup): e += 1 if e > args.early_stopping_epochs: break if args.input_type == 'binary': logger.info( '--> Early stopping: {}/{} (BEST: loss {:.4f})\n'.format(e, args.early_stopping_epochs, best_loss) ) else: logger.info( '--> Early stopping: {}/{} (BEST: loss {:.4f}, bpd {:.4f})\n'. format(e, args.early_stopping_epochs, best_loss, best_bpd) ) if math.isnan(v_loss): raise ValueError('NaN encountered!') train_loss = np.hstack(train_loss) val_loss = np.array(val_loss) plot_training_curve(train_loss, val_loss, fname=snap_dir + '/training_curve_%s.pdf' % args.flow) # training time per epoch train_times = np.array(train_times) mean_train_time = np.mean(train_times) std_train_time = np.std(train_times, ddof=1) logger.info('Average train time per epoch: %.2f +/- %.2f' % (mean_train_time, std_train_time)) # ================================================================================================================== # EVALUATION # ================================================================================================================== logger.info(args) logger.info('Stopped after %d epochs' % epoch) logger.info('Average train time per epoch: %.2f +/- %.2f' % (mean_train_time, std_train_time)) final_model = torch.load(snap_dir + args.flow + '.model') validation_loss, validation_bpd = evaluate(val_loader, final_model, args, logger) else: validation_loss = "N/A" validation_bpd = "N/A" logger.info(f"Loading model from {args.model_path}") final_model = torch.load(args.model_path) test_loss, test_bpd = evaluate(test_loader, final_model, args, logger, testing=True) logger.info('FINAL EVALUATION ON VALIDATION SET. ELBO (VAL): {:.4f}'.format(validation_loss)) logger.info('FINAL EVALUATION ON TEST SET. NLL (TEST): {:.4f}'.format(test_loss)) if args.input_type != 'binary': logger.info('FINAL EVALUATION ON VALIDATION SET. ELBO (VAL) BPD : {:.4f}'.format(validation_bpd)) logger.info('FINAL EVALUATION ON TEST SET. NLL (TEST) BPD: {:.4f}'.format(test_bpd)) if __name__ == "__main__": run(args, kwargs)
14,613
39.707521
124
py
steer
steer-master/ffjord/train_toy.py
import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import argparse import os import time import torch import torch.optim as optim import lib.toy_data as toy_data import lib.utils as utils from lib.visualize_flow import visualize_transform import lib.layers.odefunc as odefunc from train_misc import standard_normal_logprob from train_misc import set_cnf_options, count_nfe, count_parameters, count_total_time from train_misc import add_spectral_norm, spectral_norm_power_iteration from train_misc import create_regularization_fns, get_regularization, append_regularization_to_log from train_misc import build_model_tabular from diagnostics.viz_toy import save_trajectory, trajectory_to_video SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams', 'fixed_adams'] parser = argparse.ArgumentParser('Continuous Normalizing Flow') parser.add_argument( '--data', choices=['swissroll', '8gaussians', 'pinwheel', 'circles', 'moons', '2spirals', 'checkerboard', 'rings'], type=str, default='pinwheel' ) parser.add_argument( "--layer_type", type=str, default="concatsquash", choices=["ignore", "concat", "concat_v2", "squash", "concatsquash", "concatcoord", "hyper", "blend"] ) parser.add_argument('--dims', type=str, default='64-64-64') parser.add_argument("--num_blocks", type=int, default=1, help='Number of stacked CNFs.') parser.add_argument('--time_length', type=float, default=0.5) parser.add_argument('--train_T', type=eval, default=True) parser.add_argument("--divergence_fn", type=str, default="brute_force", choices=["brute_force", "approximate"]) parser.add_argument("--nonlinearity", type=str, default="tanh", choices=odefunc.NONLINEARITIES) parser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS) parser.add_argument('--atol', type=float, default=1e-5) parser.add_argument('--rtol', type=float, default=1e-5) parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.") parser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None]) parser.add_argument('--test_atol', type=float, default=None) parser.add_argument('--test_rtol', type=float, default=None) parser.add_argument('--residual', type=eval, default=False, choices=[True, False]) parser.add_argument('--rademacher', type=eval, default=False, choices=[True, False]) parser.add_argument('--spectral_norm', type=eval, default=False, choices=[True, False]) parser.add_argument('--batch_norm', type=eval, default=False, choices=[True, False]) parser.add_argument('--bn_lag', type=float, default=0) parser.add_argument('--niters', type=int, default=10000) parser.add_argument('--batch_size', type=int, default=100) parser.add_argument('--test_batch_size', type=int, default=1000) parser.add_argument('--lr', type=float, default=1e-3) parser.add_argument('--weight_decay', type=float, default=1e-5) # Track quantities parser.add_argument('--l1int', type=float, default=None, help="int_t ||f||_1") parser.add_argument('--l2int', type=float, default=None, help="int_t ||f||_2") parser.add_argument('--dl2int', type=float, default=None, help="int_t ||f^T df/dt||_2") parser.add_argument('--JFrobint', type=float, default=None, help="int_t ||df/dx||_F") parser.add_argument('--JdiagFrobint', type=float, default=None, help="int_t ||df_i/dx_i||_F") parser.add_argument('--JoffdiagFrobint', type=float, default=None, help="int_t ||df/dx - df_i/dx_i||_F") parser.add_argument('--save', type=str, default='experiments/cnf') parser.add_argument('--viz_freq', type=int, default=100) parser.add_argument('--val_freq', type=int, default=100) parser.add_argument('--log_freq', type=int, default=10) parser.add_argument('--gpu', type=int, default=0) args = parser.parse_args() # logger utils.makedirs(args.save) logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__)) if args.layer_type == "blend": logger.info("!! Setting time_length from None to 1.0 due to use of Blend layers.") args.time_length = 1.0 logger.info(args) device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu') def get_transforms(model): def sample_fn(z, logpz=None): if logpz is not None: return model(z, logpz, reverse=True) else: return model(z, reverse=True) def density_fn(x, logpx=None): if logpx is not None: return model(x, logpx, reverse=False) else: return model(x, reverse=False) return sample_fn, density_fn def compute_loss(args, model, batch_size=None): if batch_size is None: batch_size = args.batch_size # load data x = toy_data.inf_train_gen(args.data, batch_size=batch_size) x = torch.from_numpy(x).type(torch.float32).to(device) zero = torch.zeros(x.shape[0], 1).to(x) # transform to z z, delta_logp = model(x, zero) # compute log q(z) logpz = standard_normal_logprob(z).sum(1, keepdim=True) logpx = logpz - delta_logp loss = -torch.mean(logpx) return loss if __name__ == '__main__': regularization_fns, regularization_coeffs = create_regularization_fns(args) model = build_model_tabular(args, 2, regularization_fns).to(device) if args.spectral_norm: add_spectral_norm(model) set_cnf_options(args, model) logger.info(model) logger.info("Number of trainable parameters: {}".format(count_parameters(model))) optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) time_meter = utils.RunningAverageMeter(0.93) loss_meter = utils.RunningAverageMeter(0.93) nfef_meter = utils.RunningAverageMeter(0.93) nfeb_meter = utils.RunningAverageMeter(0.93) tt_meter = utils.RunningAverageMeter(0.93) end = time.time() best_loss = float('inf') model.train() for itr in range(1, args.niters + 1): optimizer.zero_grad() if args.spectral_norm: spectral_norm_power_iteration(model, 1) loss = compute_loss(args, model) loss_meter.update(loss.item()) if len(regularization_coeffs) > 0: reg_states = get_regularization(model, regularization_coeffs) reg_loss = sum( reg_state * coeff for reg_state, coeff in zip(reg_states, regularization_coeffs) if coeff != 0 ) loss = loss + reg_loss total_time = count_total_time(model) nfe_forward = count_nfe(model) loss.backward() optimizer.step() nfe_total = count_nfe(model) nfe_backward = nfe_total - nfe_forward nfef_meter.update(nfe_forward) nfeb_meter.update(nfe_backward) time_meter.update(time.time() - end) tt_meter.update(total_time) log_message = ( 'Iter {:04d} | Time {:.4f}({:.4f}) | Loss {:.6f}({:.6f}) | NFE Forward {:.0f}({:.1f})' ' | NFE Backward {:.0f}({:.1f}) | CNF Time {:.4f}({:.4f})'.format( itr, time_meter.val, time_meter.avg, loss_meter.val, loss_meter.avg, nfef_meter.val, nfef_meter.avg, nfeb_meter.val, nfeb_meter.avg, tt_meter.val, tt_meter.avg ) ) if len(regularization_coeffs) > 0: log_message = append_regularization_to_log(log_message, regularization_fns, reg_states) logger.info(log_message) if itr % args.val_freq == 0 or itr == args.niters: with torch.no_grad(): model.eval() test_loss = compute_loss(args, model, batch_size=args.test_batch_size) test_nfe = count_nfe(model) log_message = '[TEST] Iter {:04d} | Test Loss {:.6f} | NFE {:.0f}'.format(itr, test_loss, test_nfe) logger.info(log_message) if test_loss.item() < best_loss: best_loss = test_loss.item() utils.makedirs(args.save) torch.save({ 'args': args, 'state_dict': model.state_dict(), }, os.path.join(args.save, 'checkpt.pth')) model.train() if itr % args.viz_freq == 0: with torch.no_grad(): model.eval() p_samples = toy_data.inf_train_gen(args.data, batch_size=2000) sample_fn, density_fn = get_transforms(model) plt.figure(figsize=(9, 3)) visualize_transform( p_samples, torch.randn, standard_normal_logprob, transform=sample_fn, inverse_transform=density_fn, samples=True, npts=800, device=device ) fig_filename = os.path.join(args.save, 'figs', '{:04d}.jpg'.format(itr)) utils.makedirs(os.path.dirname(fig_filename)) plt.savefig(fig_filename) plt.close() model.train() end = time.time() logger.info('Training has finished.') save_traj_dir = os.path.join(args.save, 'trajectory') logger.info('Plotting trajectory to {}'.format(save_traj_dir)) data_samples = toy_data.inf_train_gen(args.data, batch_size=2000) save_trajectory(model, data_samples, save_traj_dir, device=device) trajectory_to_video(save_traj_dir)
9,288
39.038793
119
py
steer
steer-master/ffjord/train_img2d.py
import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import argparse import os import time import torch import torch.optim as optim import lib.utils as utils from lib.visualize_flow import visualize_transform import lib.layers.odefunc as odefunc from train_misc import standard_normal_logprob from train_misc import set_cnf_options, count_nfe, count_parameters, count_total_time from train_misc import add_spectral_norm, spectral_norm_power_iteration from train_misc import create_regularization_fns, get_regularization, append_regularization_to_log from train_misc import build_model_tabular from diagnostics.viz_toy import save_trajectory, trajectory_to_video SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams', 'fixed_adams'] parser = argparse.ArgumentParser('Continuous Normalizing Flow') parser.add_argument('--img', type=str, required=True) parser.add_argument('--data', type=str, default='dummy') parser.add_argument( "--layer_type", type=str, default="concatsquash", choices=["ignore", "concat", "concat_v2", "squash", "concatsquash", "concatcoord", "hyper", "blend"] ) parser.add_argument('--dims', type=str, default='64-64-64') parser.add_argument("--num_blocks", type=int, default=1, help='Number of stacked CNFs.') parser.add_argument('--time_length', type=float, default=0.5) parser.add_argument('--train_T', type=eval, default=True) parser.add_argument("--divergence_fn", type=str, default="brute_force", choices=["brute_force", "approximate"]) parser.add_argument("--nonlinearity", type=str, default="tanh", choices=odefunc.NONLINEARITIES) parser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS) parser.add_argument('--atol', type=float, default=1e-5) parser.add_argument('--rtol', type=float, default=1e-5) parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.") parser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None]) parser.add_argument('--test_atol', type=float, default=None) parser.add_argument('--test_rtol', type=float, default=None) parser.add_argument('--residual', type=eval, default=False, choices=[True, False]) parser.add_argument('--rademacher', type=eval, default=False, choices=[True, False]) parser.add_argument('--spectral_norm', type=eval, default=False, choices=[True, False]) parser.add_argument('--batch_norm', type=eval, default=False, choices=[True, False]) parser.add_argument('--bn_lag', type=float, default=0) parser.add_argument('--niters', type=int, default=10000) parser.add_argument('--batch_size', type=int, default=1000) parser.add_argument('--test_batch_size', type=int, default=1000) parser.add_argument('--lr', type=float, default=1e-3) parser.add_argument('--weight_decay', type=float, default=1e-5) # Track quantities parser.add_argument('--l1int', type=float, default=None, help="int_t ||f||_1") parser.add_argument('--l2int', type=float, default=None, help="int_t ||f||_2") parser.add_argument('--dl2int', type=float, default=None, help="int_t ||f^T df/dt||_2") parser.add_argument('--JFrobint', type=float, default=None, help="int_t ||df/dx||_F") parser.add_argument('--JdiagFrobint', type=float, default=None, help="int_t ||df_i/dx_i||_F") parser.add_argument('--JoffdiagFrobint', type=float, default=None, help="int_t ||df/dx - df_i/dx_i||_F") parser.add_argument('--save', type=str, default='experiments/cnf') parser.add_argument('--viz_freq', type=int, default=100) parser.add_argument('--val_freq', type=int, default=100) parser.add_argument('--log_freq', type=int, default=10) parser.add_argument('--gpu', type=int, default=0) args = parser.parse_args() # logger utils.makedirs(args.save) logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__)) if args.layer_type == "blend": logger.info("!! Setting time_length from None to 1.0 due to use of Blend layers.") args.time_length = 1.0 logger.info(args) device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu') from PIL import Image import numpy as np img = np.array(Image.open(args.img).convert('L')) h, w = img.shape xx = np.linspace(-4, 4, w) yy = np.linspace(-4, 4, h) xx, yy = np.meshgrid(xx, yy) xx = xx.reshape(-1, 1) yy = yy.reshape(-1, 1) means = np.concatenate([xx, yy], 1) img = img.max() - img probs = img.reshape(-1) / img.sum() std = np.array([8 / w / 2, 8 / h / 2]) def sample_data(data=None, rng=None, batch_size=200): """data and rng are ignored.""" inds = np.random.choice(int(probs.shape[0]), int(batch_size), p=probs) m = means[inds] samples = np.random.randn(*m.shape) * std + m return samples def get_transforms(model): def sample_fn(z, logpz=None): if logpz is not None: return model(z, logpz, reverse=True) else: return model(z, reverse=True) def density_fn(x, logpx=None): if logpx is not None: return model(x, logpx, reverse=False) else: return model(x, reverse=False) return sample_fn, density_fn def compute_loss(args, model, batch_size=None): if batch_size is None: batch_size = args.batch_size # load data x = sample_data(args.data, batch_size=batch_size) x = torch.from_numpy(x).type(torch.float32).to(device) zero = torch.zeros(x.shape[0], 1).to(x) # transform to z z, delta_logp = model(x, zero) # compute log q(z) logpz = standard_normal_logprob(z).sum(1, keepdim=True) logpx = logpz - delta_logp loss = -torch.mean(logpx) return loss if __name__ == '__main__': regularization_fns, regularization_coeffs = create_regularization_fns(args) model = build_model_tabular(args, 2, regularization_fns).to(device) if args.spectral_norm: add_spectral_norm(model) set_cnf_options(args, model) logger.info(model) logger.info("Number of trainable parameters: {}".format(count_parameters(model))) optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) time_meter = utils.RunningAverageMeter(0.93) loss_meter = utils.RunningAverageMeter(0.93) nfef_meter = utils.RunningAverageMeter(0.93) nfeb_meter = utils.RunningAverageMeter(0.93) tt_meter = utils.RunningAverageMeter(0.93) end = time.time() best_loss = float('inf') model.train() for itr in range(1, args.niters + 1): optimizer.zero_grad() if args.spectral_norm: spectral_norm_power_iteration(model, 1) loss = compute_loss(args, model) loss_meter.update(loss.item()) if len(regularization_coeffs) > 0: reg_states = get_regularization(model, regularization_coeffs) reg_loss = sum( reg_state * coeff for reg_state, coeff in zip(reg_states, regularization_coeffs) if coeff != 0 ) loss = loss + reg_loss total_time = count_total_time(model) nfe_forward = count_nfe(model) loss.backward() optimizer.step() nfe_total = count_nfe(model) nfe_backward = nfe_total - nfe_forward nfef_meter.update(nfe_forward) nfeb_meter.update(nfe_backward) time_meter.update(time.time() - end) tt_meter.update(total_time) log_message = ( 'Iter {:04d} | Time {:.4f}({:.4f}) | Loss {:.6f}({:.6f}) | NFE Forward {:.0f}({:.1f})' ' | NFE Backward {:.0f}({:.1f}) | CNF Time {:.4f}({:.4f})'.format( itr, time_meter.val, time_meter.avg, loss_meter.val, loss_meter.avg, nfef_meter.val, nfef_meter.avg, nfeb_meter.val, nfeb_meter.avg, tt_meter.val, tt_meter.avg ) ) if len(regularization_coeffs) > 0: log_message = append_regularization_to_log(log_message, regularization_fns, reg_states) logger.info(log_message) if itr % args.val_freq == 0 or itr == args.niters: with torch.no_grad(): model.eval() test_loss = compute_loss(args, model, batch_size=args.test_batch_size) test_nfe = count_nfe(model) log_message = '[TEST] Iter {:04d} | Test Loss {:.6f} | NFE {:.0f}'.format(itr, test_loss, test_nfe) logger.info(log_message) if test_loss.item() < best_loss: best_loss = test_loss.item() utils.makedirs(args.save) torch.save({ 'args': args, 'state_dict': model.state_dict(), }, os.path.join(args.save, 'checkpt.pth')) model.train() if itr % args.viz_freq == 0: with torch.no_grad(): model.eval() p_samples = sample_data(args.data, batch_size=2000) sample_fn, density_fn = get_transforms(model) plt.figure(figsize=(9, 3)) visualize_transform( p_samples, torch.randn, standard_normal_logprob, transform=sample_fn, inverse_transform=density_fn, samples=True, npts=800, device=device ) fig_filename = os.path.join(args.save, 'figs', '{:04d}.jpg'.format(itr)) utils.makedirs(os.path.dirname(fig_filename)) plt.savefig(fig_filename) plt.close() model.train() end = time.time() logger.info('Training has finished.') save_traj_dir = os.path.join(args.save, 'trajectory') logger.info('Plotting trajectory to {}'.format(save_traj_dir)) data_samples = sample_data(args.data, batch_size=2000) save_trajectory(model, data_samples, save_traj_dir, device=device) trajectory_to_video(save_traj_dir)
9,789
37.543307
119
py
steer
steer-master/ffjord/train_cnf.py
import argparse import os import time import numpy as np import torch import torch.optim as optim import torchvision.datasets as dset import torchvision.transforms as tforms from torchvision.utils import save_image import lib.layers as layers import lib.utils as utils import lib.odenvp as odenvp import lib.multiscale_parallel as multiscale_parallel from train_misc import standard_normal_logprob from train_misc import set_cnf_options, count_nfe, count_parameters, count_total_time from train_misc import add_spectral_norm, spectral_norm_power_iteration from train_misc import create_regularization_fns, get_regularization, append_regularization_to_log # go fast boi!! torch.backends.cudnn.benchmark = True SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams'] parser = argparse.ArgumentParser("Continuous Normalizing Flow") parser.add_argument("--data", choices=["mnist", "svhn", "cifar10", 'lsun_church'], type=str, default="mnist") parser.add_argument("--dims", type=str, default="8,32,32,8") parser.add_argument("--strides", type=str, default="2,2,1,-2,-2") parser.add_argument("--num_blocks", type=int, default=1, help='Number of stacked CNFs.') parser.add_argument("--conv", type=eval, default=True, choices=[True, False]) parser.add_argument( "--layer_type", type=str, default="ignore", choices=["ignore", "concat", "concat_v2", "squash", "concatsquash", "concatcoord", "hyper", "blend"] ) parser.add_argument("--divergence_fn", type=str, default="approximate", choices=["brute_force", "approximate"]) parser.add_argument( "--nonlinearity", type=str, default="softplus", choices=["tanh", "relu", "softplus", "elu", "swish"] ) parser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS) parser.add_argument('--atol', type=float, default=1e-5) parser.add_argument('--rtol', type=float, default=1e-5) parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.") parser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None]) parser.add_argument('--test_atol', type=float, default=None) parser.add_argument('--test_rtol', type=float, default=None) parser.add_argument("--imagesize", type=int, default=None) parser.add_argument("--alpha", type=float, default=1e-6) parser.add_argument('--time_length', type=float, default=1.0) parser.add_argument('--train_T', type=eval, default=True) parser.add_argument("--num_epochs", type=int, default=1000) parser.add_argument("--batch_size", type=int, default=200) parser.add_argument( "--batch_size_schedule", type=str, default="", help="Increases the batchsize at every given epoch, dash separated." ) parser.add_argument("--test_batch_size", type=int, default=200) parser.add_argument("--lr", type=float, default=1e-3) parser.add_argument("--warmup_iters", type=float, default=1000) parser.add_argument("--weight_decay", type=float, default=0.0) parser.add_argument("--spectral_norm_niter", type=int, default=10) parser.add_argument("--add_noise", type=eval, default=True, choices=[True, False]) parser.add_argument("--batch_norm", type=eval, default=False, choices=[True, False]) parser.add_argument('--residual', type=eval, default=False, choices=[True, False]) parser.add_argument('--autoencode', type=eval, default=False, choices=[True, False]) parser.add_argument('--rademacher', type=eval, default=True, choices=[True, False]) parser.add_argument('--spectral_norm', type=eval, default=False, choices=[True, False]) parser.add_argument('--multiscale', type=eval, default=False, choices=[True, False]) parser.add_argument('--parallel', type=eval, default=False, choices=[True, False]) # Regularizations parser.add_argument('--l1int', type=float, default=None, help="int_t ||f||_1") parser.add_argument('--l2int', type=float, default=None, help="int_t ||f||_2") parser.add_argument('--dl2int', type=float, default=None, help="int_t ||f^T df/dt||_2") parser.add_argument('--JFrobint', type=float, default=None, help="int_t ||df/dx||_F") parser.add_argument('--JdiagFrobint', type=float, default=None, help="int_t ||df_i/dx_i||_F") parser.add_argument('--JoffdiagFrobint', type=float, default=None, help="int_t ||df/dx - df_i/dx_i||_F") parser.add_argument("--time_penalty", type=float, default=0, help="Regularization on the end_time.") parser.add_argument( "--max_grad_norm", type=float, default=1e10, help="Max norm of graidents (default is just stupidly high to avoid any clipping)" ) parser.add_argument("--begin_epoch", type=int, default=1) parser.add_argument("--resume", type=str, default=None) parser.add_argument("--save", type=str, default="experiments/cnf") parser.add_argument("--val_freq", type=int, default=1) parser.add_argument("--log_freq", type=int, default=10) args = parser.parse_args() # logger utils.makedirs(args.save) logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__)) if args.layer_type == "blend": logger.info("!! Setting time_length from None to 1.0 due to use of Blend layers.") args.time_length = 1.0 logger.info(args) def add_noise(x): """ [0, 1] -> [0, 255] -> add noise -> [0, 1] """ if args.add_noise: noise = x.new().resize_as_(x).uniform_() x = x * 255 + noise x = x / 256 return x def update_lr(optimizer, itr): iter_frac = min(float(itr + 1) / max(args.warmup_iters, 1), 1.0) lr = args.lr * iter_frac for param_group in optimizer.param_groups: param_group["lr"] = lr def get_train_loader(train_set, epoch): if args.batch_size_schedule != "": epochs = [0] + list(map(int, args.batch_size_schedule.split("-"))) n_passed = sum(np.array(epochs) <= epoch) current_batch_size = int(args.batch_size * n_passed) else: current_batch_size = args.batch_size train_loader = torch.utils.data.DataLoader( dataset=train_set, batch_size=current_batch_size, shuffle=True, drop_last=True, pin_memory=True ) logger.info("===> Using batch size {}. Total {} iterations/epoch.".format(current_batch_size, len(train_loader))) return train_loader def get_dataset(args): trans = lambda im_size: tforms.Compose([tforms.Resize(im_size), tforms.ToTensor(), add_noise]) if args.data == "mnist": im_dim = 1 im_size = 28 if args.imagesize is None else args.imagesize train_set = dset.MNIST(root="./data", train=True, transform=trans(im_size), download=True) test_set = dset.MNIST(root="./data", train=False, transform=trans(im_size), download=True) elif args.data == "svhn": im_dim = 3 im_size = 32 if args.imagesize is None else args.imagesize train_set = dset.SVHN(root="./data", split="train", transform=trans(im_size), download=True) test_set = dset.SVHN(root="./data", split="test", transform=trans(im_size), download=True) elif args.data == "cifar10": im_dim = 3 im_size = 32 if args.imagesize is None else args.imagesize train_set = dset.CIFAR10( root="./data", train=True, transform=tforms.Compose([ tforms.Resize(im_size), tforms.RandomHorizontalFlip(), tforms.ToTensor(), add_noise, ]), download=True ) test_set = dset.CIFAR10(root="./data", train=False, transform=trans(im_size), download=True) elif args.data == 'celeba': im_dim = 3 im_size = 64 if args.imagesize is None else args.imagesize train_set = dset.CelebA( train=True, transform=tforms.Compose([ tforms.ToPILImage(), tforms.Resize(im_size), tforms.RandomHorizontalFlip(), tforms.ToTensor(), add_noise, ]) ) test_set = dset.CelebA( train=False, transform=tforms.Compose([ tforms.ToPILImage(), tforms.Resize(im_size), tforms.ToTensor(), add_noise, ]) ) elif args.data == 'lsun_church': im_dim = 3 im_size = 64 if args.imagesize is None else args.imagesize train_set = dset.LSUN( 'data', ['church_outdoor_train'], transform=tforms.Compose([ tforms.Resize(96), tforms.RandomCrop(64), tforms.Resize(im_size), tforms.ToTensor(), add_noise, ]) ) test_set = dset.LSUN( 'data', ['church_outdoor_val'], transform=tforms.Compose([ tforms.Resize(96), tforms.RandomCrop(64), tforms.Resize(im_size), tforms.ToTensor(), add_noise, ]) ) data_shape = (im_dim, im_size, im_size) if not args.conv: data_shape = (im_dim * im_size * im_size,) test_loader = torch.utils.data.DataLoader( dataset=test_set, batch_size=args.test_batch_size, shuffle=False, drop_last=True ) return train_set, test_loader, data_shape def compute_bits_per_dim(x, model): zero = torch.zeros(x.shape[0], 1).to(x) # Don't use data parallelize if batch size is small. # if x.shape[0] < 200: # model = model.module z, delta_logp = model(x, zero) # run model forward logpz = standard_normal_logprob(z).view(z.shape[0], -1).sum(1, keepdim=True) # logp(z) logpx = logpz - delta_logp logpx_per_dim = torch.sum(logpx) / x.nelement() # averaged over batches bits_per_dim = -(logpx_per_dim - np.log(256)) / np.log(2) return bits_per_dim def create_model(args, data_shape, regularization_fns): hidden_dims = tuple(map(int, args.dims.split(","))) strides = tuple(map(int, args.strides.split(","))) if args.multiscale: model = odenvp.ODENVP( (args.batch_size, *data_shape), n_blocks=args.num_blocks, intermediate_dims=hidden_dims, nonlinearity=args.nonlinearity, alpha=args.alpha, cnf_kwargs={"T": args.time_length, "train_T": args.train_T, "regularization_fns": regularization_fns}, ) elif args.parallel: model = multiscale_parallel.MultiscaleParallelCNF( (args.batch_size, *data_shape), n_blocks=args.num_blocks, intermediate_dims=hidden_dims, alpha=args.alpha, time_length=args.time_length, ) else: if args.autoencode: def build_cnf(): autoencoder_diffeq = layers.AutoencoderDiffEqNet( hidden_dims=hidden_dims, input_shape=data_shape, strides=strides, conv=args.conv, layer_type=args.layer_type, nonlinearity=args.nonlinearity, ) odefunc = layers.AutoencoderODEfunc( autoencoder_diffeq=autoencoder_diffeq, divergence_fn=args.divergence_fn, residual=args.residual, rademacher=args.rademacher, ) cnf = layers.CNF( odefunc=odefunc, T=args.time_length, regularization_fns=regularization_fns, solver=args.solver, ) return cnf else: def build_cnf(): diffeq = layers.ODEnet( hidden_dims=hidden_dims, input_shape=data_shape, strides=strides, conv=args.conv, layer_type=args.layer_type, nonlinearity=args.nonlinearity, ) odefunc = layers.ODEfunc( diffeq=diffeq, divergence_fn=args.divergence_fn, residual=args.residual, rademacher=args.rademacher, ) cnf = layers.CNF( odefunc=odefunc, T=args.time_length, train_T=args.train_T, regularization_fns=regularization_fns, solver=args.solver, ) return cnf chain = [layers.LogitTransform(alpha=args.alpha)] if args.alpha > 0 else [layers.ZeroMeanTransform()] chain = chain + [build_cnf() for _ in range(args.num_blocks)] if args.batch_norm: chain.append(layers.MovingBatchNorm2d(data_shape[0])) model = layers.SequentialFlow(chain) return model if __name__ == "__main__": # get deivce device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") cvt = lambda x: x.type(torch.float32).to(device, non_blocking=True) # load dataset train_set, test_loader, data_shape = get_dataset(args) # build model regularization_fns, regularization_coeffs = create_regularization_fns(args) model = create_model(args, data_shape, regularization_fns) if args.spectral_norm: add_spectral_norm(model, logger) set_cnf_options(args, model) logger.info(model) logger.info("Number of trainable parameters: {}".format(count_parameters(model))) # optimizer optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) # restore parameters if args.resume is not None: checkpt = torch.load(args.resume, map_location=lambda storage, loc: storage) model.load_state_dict(checkpt["state_dict"]) if "optim_state_dict" in checkpt.keys(): optimizer.load_state_dict(checkpt["optim_state_dict"]) # Manually move optimizer state to device. for state in optimizer.state.values(): for k, v in state.items(): if torch.is_tensor(v): state[k] = cvt(v) if torch.cuda.is_available(): model = torch.nn.DataParallel(model).cuda() # For visualization. fixed_z = cvt(torch.randn(100, *data_shape)) time_meter = utils.RunningAverageMeter(0.97) loss_meter = utils.RunningAverageMeter(0.97) steps_meter = utils.RunningAverageMeter(0.97) grad_meter = utils.RunningAverageMeter(0.97) tt_meter = utils.RunningAverageMeter(0.97) if args.spectral_norm and not args.resume: spectral_norm_power_iteration(model, 500) best_loss = float("inf") itr = 0 for epoch in range(args.begin_epoch, args.num_epochs + 1): model.train() train_loader = get_train_loader(train_set, epoch) for _, (x, y) in enumerate(train_loader): start = time.time() update_lr(optimizer, itr) optimizer.zero_grad() if not args.conv: x = x.view(x.shape[0], -1) # cast data and move to device x = cvt(x) # compute loss loss = compute_bits_per_dim(x, model) if regularization_coeffs: reg_states = get_regularization(model, regularization_coeffs) reg_loss = sum( reg_state * coeff for reg_state, coeff in zip(reg_states, regularization_coeffs) if coeff != 0 ) loss = loss + reg_loss total_time = count_total_time(model) loss = loss + total_time * args.time_penalty loss.backward() grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() if args.spectral_norm: spectral_norm_power_iteration(model, args.spectral_norm_niter) time_meter.update(time.time() - start) loss_meter.update(loss.item()) steps_meter.update(count_nfe(model)) grad_meter.update(grad_norm) tt_meter.update(total_time) if itr % args.log_freq == 0: log_message = ( "Iter {:04d} | Time {:.4f}({:.4f}) | Bit/dim {:.4f}({:.4f}) | " "Steps {:.0f}({:.2f}) | Grad Norm {:.4f}({:.4f}) | Total Time {:.2f}({:.2f})".format( itr, time_meter.val, time_meter.avg, loss_meter.val, loss_meter.avg, steps_meter.val, steps_meter.avg, grad_meter.val, grad_meter.avg, tt_meter.val, tt_meter.avg ) ) if regularization_coeffs: log_message = append_regularization_to_log(log_message, regularization_fns, reg_states) logger.info(log_message) itr += 1 # compute test loss model.eval() if epoch % args.val_freq == 0: with torch.no_grad(): start = time.time() logger.info("validating...") losses = [] for (x, y) in test_loader: if not args.conv: x = x.view(x.shape[0], -1) x = cvt(x) loss = compute_bits_per_dim(x, model) losses.append(loss) loss = 0 for i in range(len(losses)): loss = loss + losses[i] loss = loss/(len(losses)) logger.info("Epoch {:04d} | Time {:.4f}, Bit/dim {:.4f}".format(epoch, time.time() - start, loss)) if loss < best_loss: best_loss = loss utils.makedirs(args.save) torch.save({ "args": args, "state_dict": model.module.state_dict() if torch.cuda.is_available() else model.state_dict(), "optim_state_dict": optimizer.state_dict(), }, os.path.join(args.save, "checkpt.pth")) # visualize samples and density with torch.no_grad(): fig_filename = os.path.join(args.save, "figs", "{:04d}.jpg".format(epoch)) utils.makedirs(os.path.dirname(fig_filename)) generated_samples = model(fixed_z, reverse=True).view(-1, *data_shape) save_image(generated_samples, fig_filename, nrow=10)
18,212
39.654018
119
py
steer
steer-master/ffjord/train_discrete_tabular.py
import argparse import os import time import torch import lib.utils as utils from lib.custom_optimizers import Adam import lib.layers as layers import datasets from train_misc import standard_normal_logprob, count_parameters parser = argparse.ArgumentParser() parser.add_argument( '--data', choices=['power', 'gas', 'hepmass', 'miniboone', 'bsds300'], type=str, default='miniboone' ) parser.add_argument('--depth', type=int, default=10) parser.add_argument('--dims', type=str, default="100-100") parser.add_argument('--nonlinearity', type=str, default="tanh") parser.add_argument('--glow', type=eval, default=False, choices=[True, False]) parser.add_argument('--batch_norm', type=eval, default=False, choices=[True, False]) parser.add_argument('--bn_lag', type=float, default=0) parser.add_argument('--early_stopping', type=int, default=30) parser.add_argument('--batch_size', type=int, default=1000) parser.add_argument('--test_batch_size', type=int, default=None) parser.add_argument('--lr', type=float, default=1e-4) parser.add_argument('--weight_decay', type=float, default=1e-6) parser.add_argument('--resume', type=str, default=None) parser.add_argument('--save', type=str, default='experiments/cnf') parser.add_argument('--evaluate', action='store_true') parser.add_argument('--val_freq', type=int, default=200) parser.add_argument('--log_freq', type=int, default=10) args = parser.parse_args() # logger utils.makedirs(args.save) logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__)) logger.info(args) test_batch_size = args.test_batch_size if args.test_batch_size else args.batch_size def batch_iter(X, batch_size=args.batch_size, shuffle=False): """ X: feature tensor (shape: num_instances x num_features) """ if shuffle: idxs = torch.randperm(X.shape[0]) else: idxs = torch.arange(X.shape[0]) if X.is_cuda: idxs = idxs.cuda() for batch_idxs in idxs.split(batch_size): yield X[batch_idxs] ndecs = 0 def update_lr(optimizer, n_vals_without_improvement): global ndecs if ndecs == 0 and n_vals_without_improvement > args.early_stopping // 3: for param_group in optimizer.param_groups: param_group["lr"] = args.lr / 10 ndecs = 1 elif ndecs == 1 and n_vals_without_improvement > args.early_stopping // 3 * 2: for param_group in optimizer.param_groups: param_group["lr"] = args.lr / 100 ndecs = 2 else: for param_group in optimizer.param_groups: param_group["lr"] = args.lr / 10**ndecs def load_data(name): if name == 'bsds300': return datasets.BSDS300() elif name == 'power': return datasets.POWER() elif name == 'gas': return datasets.GAS() elif name == 'hepmass': return datasets.HEPMASS() elif name == 'miniboone': return datasets.MINIBOONE() else: raise ValueError('Unknown dataset') def build_model(input_dim): hidden_dims = tuple(map(int, args.dims.split("-"))) chain = [] for i in range(args.depth): if args.glow: chain.append(layers.BruteForceLayer(input_dim)) chain.append(layers.MaskedCouplingLayer(input_dim, hidden_dims, 'alternate', swap=i % 2 == 0)) if args.batch_norm: chain.append(layers.MovingBatchNorm1d(input_dim, bn_lag=args.bn_lag)) return layers.SequentialFlow(chain) def compute_loss(x, model): zero = torch.zeros(x.shape[0], 1).to(x) z, delta_logp = model(x, zero) # run model forward logpz = standard_normal_logprob(z).view(z.shape[0], -1).sum(1, keepdim=True) # logp(z) logpx = logpz - delta_logp loss = -torch.mean(logpx) return loss def restore_model(model, filename): checkpt = torch.load(filename, map_location=lambda storage, loc: storage) model.load_state_dict(checkpt["state_dict"]) return model if __name__ == '__main__': device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") cvt = lambda x: x.type(torch.float32).to(device, non_blocking=True) logger.info('Using {} GPUs.'.format(torch.cuda.device_count())) data = load_data(args.data) data.trn.x = torch.from_numpy(data.trn.x) data.val.x = torch.from_numpy(data.val.x) data.tst.x = torch.from_numpy(data.tst.x) model = build_model(data.n_dims).to(device) if args.resume is not None: checkpt = torch.load(args.resume) model.load_state_dict(checkpt['state_dict']) logger.info(model) logger.info("Number of trainable parameters: {}".format(count_parameters(model))) if not args.evaluate: optimizer = Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) time_meter = utils.RunningAverageMeter(0.98) loss_meter = utils.RunningAverageMeter(0.98) best_loss = float('inf') itr = 0 n_vals_without_improvement = 0 end = time.time() model.train() while True: if args.early_stopping > 0 and n_vals_without_improvement > args.early_stopping: break for x in batch_iter(data.trn.x, shuffle=True): if args.early_stopping > 0 and n_vals_without_improvement > args.early_stopping: break optimizer.zero_grad() x = cvt(x) loss = compute_loss(x, model) loss_meter.update(loss.item()) loss.backward() optimizer.step() time_meter.update(time.time() - end) if itr % args.log_freq == 0: log_message = ( 'Iter {:06d} | Epoch {:.2f} | Time {:.4f}({:.4f}) | Loss {:.6f}({:.6f}) | '.format( itr, float(itr) / (data.trn.x.shape[0] / float(args.batch_size)), time_meter.val, time_meter.avg, loss_meter.val, loss_meter.avg ) ) logger.info(log_message) itr += 1 end = time.time() # Validation loop. if itr % args.val_freq == 0: model.eval() start_time = time.time() with torch.no_grad(): val_loss = utils.AverageMeter() for x in batch_iter(data.val.x, batch_size=test_batch_size): x = cvt(x) val_loss.update(compute_loss(x, model).item(), x.shape[0]) if val_loss.avg < best_loss: best_loss = val_loss.avg utils.makedirs(args.save) torch.save({ 'args': args, 'state_dict': model.state_dict(), }, os.path.join(args.save, 'checkpt.pth')) n_vals_without_improvement = 0 else: n_vals_without_improvement += 1 update_lr(optimizer, n_vals_without_improvement) log_message = ( '[VAL] Iter {:06d} | Val Loss {:.6f} | ' 'NoImproveEpochs {:02d}/{:02d}'.format( itr, val_loss.avg, n_vals_without_improvement, args.early_stopping ) ) logger.info(log_message) model.train() logger.info('Training has finished.') model = restore_model(model, os.path.join(args.save, 'checkpt.pth')).to(device) logger.info('Evaluating model on test set.') model.eval() with torch.no_grad(): test_loss = utils.AverageMeter() for itr, x in enumerate(batch_iter(data.tst.x, batch_size=test_batch_size)): x = cvt(x) test_loss.update(compute_loss(x, model).item(), x.shape[0]) logger.info('Progress: {:.2f}%'.format(itr / (data.tst.x.shape[0] / test_batch_size))) log_message = '[TEST] Iter {:06d} | Test Loss {:.6f} '.format(itr, test_loss.avg) logger.info(log_message)
8,301
34.177966
120
py
steer
steer-master/ffjord/train_tabular.py
import argparse import os import time import torch import lib.utils as utils import lib.layers.odefunc as odefunc from lib.custom_optimizers import Adam import datasets from train_misc import standard_normal_logprob from train_misc import set_cnf_options, count_nfe, count_parameters, count_total_time from train_misc import create_regularization_fns, get_regularization, append_regularization_to_log from train_misc import build_model_tabular, override_divergence_fn SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams', 'fixed_adams'] parser = argparse.ArgumentParser('Continuous Normalizing Flow') parser.add_argument( '--data', choices=['power', 'gas', 'hepmass', 'miniboone', 'bsds300'], type=str, default='miniboone' ) parser.add_argument( "--layer_type", type=str, default="concatsquash", choices=["ignore", "concat", "concat_v2", "squash", "concatsquash", "concatcoord", "hyper", "blend"] ) parser.add_argument('--hdim_factor', type=int, default=10) parser.add_argument('--nhidden', type=int, default=1) parser.add_argument("--num_blocks", type=int, default=1, help='Number of stacked CNFs.') parser.add_argument('--time_length', type=float, default=1.0) parser.add_argument('--train_T', type=eval, default=True) parser.add_argument("--divergence_fn", type=str, default="approximate", choices=["brute_force", "approximate"]) parser.add_argument("--nonlinearity", type=str, default="softplus", choices=odefunc.NONLINEARITIES) parser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS) parser.add_argument('--atol', type=float, default=1e-8) parser.add_argument('--rtol', type=float, default=1e-6) parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.") parser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None]) parser.add_argument('--test_atol', type=float, default=None) parser.add_argument('--test_rtol', type=float, default=None) parser.add_argument('--residual', type=eval, default=False, choices=[True, False]) parser.add_argument('--rademacher', type=eval, default=False, choices=[True, False]) parser.add_argument('--batch_norm', type=eval, default=False, choices=[True, False]) parser.add_argument('--bn_lag', type=float, default=0) parser.add_argument('--early_stopping', type=int, default=30) parser.add_argument('--batch_size', type=int, default=1000) parser.add_argument('--test_batch_size', type=int, default=None) parser.add_argument('--lr', type=float, default=1e-3) parser.add_argument('--weight_decay', type=float, default=1e-6) # Track quantities parser.add_argument('--l1int', type=float, default=None, help="int_t ||f||_1") parser.add_argument('--l2int', type=float, default=None, help="int_t ||f||_2") parser.add_argument('--dl2int', type=float, default=None, help="int_t ||f^T df/dt||_2") parser.add_argument('--JFrobint', type=float, default=None, help="int_t ||df/dx||_F") parser.add_argument('--JdiagFrobint', type=float, default=None, help="int_t ||df_i/dx_i||_F") parser.add_argument('--JoffdiagFrobint', type=float, default=None, help="int_t ||df/dx - df_i/dx_i||_F") parser.add_argument('--resume', type=str, default=None) parser.add_argument('--save', type=str, default='experiments/cnf') parser.add_argument('--evaluate', action='store_true') parser.add_argument('--val_freq', type=int, default=200) parser.add_argument('--log_freq', type=int, default=10) args = parser.parse_args() # logger utils.makedirs(args.save) logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__)) if args.layer_type == "blend": logger.info("!! Setting time_length from None to 1.0 due to use of Blend layers.") args.time_length = 1.0 args.train_T = False logger.info(args) test_batch_size = args.test_batch_size if args.test_batch_size else args.batch_size def batch_iter(X, batch_size=args.batch_size, shuffle=False): """ X: feature tensor (shape: num_instances x num_features) """ if shuffle: idxs = torch.randperm(X.shape[0]) else: idxs = torch.arange(X.shape[0]) if X.is_cuda: idxs = idxs.cuda() for batch_idxs in idxs.split(batch_size): yield X[batch_idxs] ndecs = 0 def update_lr(optimizer, n_vals_without_improvement): global ndecs if ndecs == 0 and n_vals_without_improvement > args.early_stopping // 3: for param_group in optimizer.param_groups: param_group["lr"] = args.lr / 10 ndecs = 1 elif ndecs == 1 and n_vals_without_improvement > args.early_stopping // 3 * 2: for param_group in optimizer.param_groups: param_group["lr"] = args.lr / 100 ndecs = 2 else: for param_group in optimizer.param_groups: param_group["lr"] = args.lr / 10**ndecs def load_data(name): if name == 'bsds300': return datasets.BSDS300() elif name == 'power': return datasets.POWER() elif name == 'gas': return datasets.GAS() elif name == 'hepmass': return datasets.HEPMASS() elif name == 'miniboone': return datasets.MINIBOONE() else: raise ValueError('Unknown dataset') def compute_loss(x, model): zero = torch.zeros(x.shape[0], 1).to(x) z, delta_logp = model(x, zero) # run model forward logpz = standard_normal_logprob(z).view(z.shape[0], -1).sum(1, keepdim=True) # logp(z) logpx = logpz - delta_logp loss = -torch.mean(logpx) return loss def restore_model(model, filename): checkpt = torch.load(filename, map_location=lambda storage, loc: storage) model.load_state_dict(checkpt["state_dict"]) return model if __name__ == '__main__': device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") cvt = lambda x: x.type(torch.float32).to(device, non_blocking=True) logger.info('Using {} GPUs.'.format(torch.cuda.device_count())) data = load_data(args.data) data.trn.x = torch.from_numpy(data.trn.x) data.val.x = torch.from_numpy(data.val.x) data.tst.x = torch.from_numpy(data.tst.x) args.dims = '-'.join([str(args.hdim_factor * data.n_dims)] * args.nhidden) regularization_fns, regularization_coeffs = create_regularization_fns(args) model = build_model_tabular(args, data.n_dims, regularization_fns).to(device) set_cnf_options(args, model) for k in model.state_dict().keys(): logger.info(k) if args.resume is not None: checkpt = torch.load(args.resume) # Backwards compatibility with an older version of the code. # TODO: remove upon release. filtered_state_dict = {} for k, v in checkpt['state_dict'].items(): if 'diffeq.diffeq' not in k: filtered_state_dict[k.replace('module.', '')] = v model.load_state_dict(filtered_state_dict) logger.info(model) logger.info("Number of trainable parameters: {}".format(count_parameters(model))) if not args.evaluate: optimizer = Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) time_meter = utils.RunningAverageMeter(0.98) loss_meter = utils.RunningAverageMeter(0.98) nfef_meter = utils.RunningAverageMeter(0.98) nfeb_meter = utils.RunningAverageMeter(0.98) tt_meter = utils.RunningAverageMeter(0.98) best_loss = float('inf') itr = 0 n_vals_without_improvement = 0 end = time.time() model.train() while True: if args.early_stopping > 0 and n_vals_without_improvement > args.early_stopping: break for x in batch_iter(data.trn.x, shuffle=True): if args.early_stopping > 0 and n_vals_without_improvement > args.early_stopping: break optimizer.zero_grad() x = cvt(x) loss = compute_loss(x, model) loss_meter.update(loss.item()) if len(regularization_coeffs) > 0: reg_states = get_regularization(model, regularization_coeffs) reg_loss = sum( reg_state * coeff for reg_state, coeff in zip(reg_states, regularization_coeffs) if coeff != 0 ) loss = loss + reg_loss total_time = count_total_time(model) nfe_forward = count_nfe(model) loss.backward() optimizer.step() nfe_total = count_nfe(model) nfe_backward = nfe_total - nfe_forward nfef_meter.update(nfe_forward) nfeb_meter.update(nfe_backward) time_meter.update(time.time() - end) tt_meter.update(total_time) if itr % args.log_freq == 0: log_message = ( 'Iter {:06d} | Epoch {:.2f} | Time {:.4f}({:.4f}) | Loss {:.6f}({:.6f}) | ' 'NFE Forward {:.0f}({:.1f}) | NFE Backward {:.0f}({:.1f}) | CNF Time {:.4f}({:.4f})'.format( itr, float(itr) / (data.trn.x.shape[0] / float(args.batch_size)), time_meter.val, time_meter.avg, loss_meter.val, loss_meter.avg, nfef_meter.val, nfef_meter.avg, nfeb_meter.val, nfeb_meter.avg, tt_meter.val, tt_meter.avg ) ) if len(regularization_coeffs) > 0: log_message = append_regularization_to_log(log_message, regularization_fns, reg_states) logger.info(log_message) itr += 1 end = time.time() # Validation loop. if itr % args.val_freq == 0: model.eval() start_time = time.time() with torch.no_grad(): val_loss = utils.AverageMeter() val_nfe = utils.AverageMeter() for x in batch_iter(data.val.x, batch_size=test_batch_size): x = cvt(x) val_loss.update(compute_loss(x, model).item(), x.shape[0]) val_nfe.update(count_nfe(model)) if val_loss.avg < best_loss: best_loss = val_loss.avg utils.makedirs(args.save) torch.save({ 'args': args, 'state_dict': model.state_dict(), }, os.path.join(args.save, 'checkpt.pth')) n_vals_without_improvement = 0 else: n_vals_without_improvement += 1 update_lr(optimizer, n_vals_without_improvement) log_message = ( '[VAL] Iter {:06d} | Val Loss {:.6f} | NFE {:.0f} | ' 'NoImproveEpochs {:02d}/{:02d}'.format( itr, val_loss.avg, val_nfe.avg, n_vals_without_improvement, args.early_stopping ) ) logger.info(log_message) model.train() logger.info('Training has finished.') model = restore_model(model, os.path.join(args.save, 'checkpt.pth')).to(device) set_cnf_options(args, model) logger.info('Evaluating model on test set.') model.eval() override_divergence_fn(model, "brute_force") with torch.no_grad(): test_loss = utils.AverageMeter() test_nfe = utils.AverageMeter() for itr, x in enumerate(batch_iter(data.tst.x, batch_size=test_batch_size)): x = cvt(x) test_loss.update(compute_loss(x, model).item(), x.shape[0]) test_nfe.update(count_nfe(model)) logger.info('Progress: {:.2f}%'.format(100. * itr / (data.tst.x.shape[0] / test_batch_size))) log_message = '[TEST] Iter {:06d} | Test Loss {:.6f} | NFE {:.0f}'.format(itr, test_loss.avg, test_nfe.avg) logger.info(log_message)
12,249
38.90228
120
py
steer
steer-master/ffjord/train_discrete_toy.py
import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import argparse import os import time import torch import torch.optim as optim import lib.layers as layers import lib.toy_data as toy_data import lib.utils as utils from lib.visualize_flow import visualize_transform from train_misc import standard_normal_logprob from train_misc import count_parameters SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams', 'fixed_adams'] parser = argparse.ArgumentParser('Continuous Normalizing Flow') parser.add_argument( '--data', choices=['swissroll', '8gaussians', 'pinwheel', 'circles', 'moons', '2spirals', 'checkerboard', 'rings'], type=str, default='pinwheel' ) parser.add_argument('--depth', help='number of coupling layers', type=int, default=10) parser.add_argument('--glow', type=eval, choices=[True, False], default=False) parser.add_argument('--nf', type=eval, choices=[True, False], default=False) parser.add_argument('--niters', type=int, default=100001) parser.add_argument('--batch_size', type=int, default=100) parser.add_argument('--test_batch_size', type=int, default=1000) parser.add_argument('--lr', type=float, default=1e-4) parser.add_argument('--weight_decay', type=float, default=0) # Track quantities parser.add_argument('--l1int', type=float, default=None, help="int_t ||f||_1") parser.add_argument('--l2int', type=float, default=None, help="int_t ||f||_2") parser.add_argument('--dl2int', type=float, default=None, help="int_t ||f^T df/dt||_2") parser.add_argument('--JFrobint', type=float, default=None, help="int_t ||df/dx||_F") parser.add_argument('--JdiagFrobint', type=float, default=None, help="int_t ||df_i/dx_i||_F") parser.add_argument('--JoffdiagFrobint', type=float, default=None, help="int_t ||df/dx - df_i/dx_i||_F") parser.add_argument('--save', type=str, default='experiments/cnf') parser.add_argument('--viz_freq', type=int, default=1000) parser.add_argument('--val_freq', type=int, default=1000) parser.add_argument('--log_freq', type=int, default=100) parser.add_argument('--gpu', type=int, default=0) args = parser.parse_args() # logger utils.makedirs(args.save) logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__)) logger.info(args) device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu') def construct_model(): if args.nf: chain = [] for i in range(args.depth): chain.append(layers.PlanarFlow(2)) return layers.SequentialFlow(chain) else: chain = [] for i in range(args.depth): if args.glow: chain.append(layers.BruteForceLayer(2)) chain.append(layers.CouplingLayer(2, swap=i % 2 == 0)) return layers.SequentialFlow(chain) def get_transforms(model): if args.nf: sample_fn = None else: def sample_fn(z, logpz=None): if logpz is not None: return model(z, logpz, reverse=True) else: return model(z, reverse=True) def density_fn(x, logpx=None): if logpx is not None: return model(x, logpx, reverse=False) else: return model(x, reverse=False) return sample_fn, density_fn def compute_loss(args, model, batch_size=None): if batch_size is None: batch_size = args.batch_size # load data x = toy_data.inf_train_gen(args.data, batch_size=batch_size) x = torch.from_numpy(x).type(torch.float32).to(device) zero = torch.zeros(x.shape[0], 1).to(x) # transform to z z, delta_logp = model(x, zero) # compute log q(z) logpz = standard_normal_logprob(z).sum(1, keepdim=True) logpx = logpz - delta_logp loss = -torch.mean(logpx) return loss if __name__ == '__main__': model = construct_model().to(device) logger.info(model) logger.info("Number of trainable parameters: {}".format(count_parameters(model))) optimizer = optim.Adamax(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) time_meter = utils.RunningAverageMeter(0.98) loss_meter = utils.RunningAverageMeter(0.98) end = time.time() best_loss = float('inf') model.train() for itr in range(1, args.niters + 1): optimizer.zero_grad() loss = compute_loss(args, model) loss_meter.update(loss.item()) loss.backward() optimizer.step() time_meter.update(time.time() - end) if itr % args.log_freq == 0: log_message = ( 'Iter {:04d} | Time {:.4f}({:.4f}) | Loss {:.6f}({:.6f})'.format( itr, time_meter.val, time_meter.avg, loss_meter.val, loss_meter.avg ) ) logger.info(log_message) if itr % args.val_freq == 0 or itr == args.niters: with torch.no_grad(): model.eval() test_loss = compute_loss(args, model, batch_size=args.test_batch_size) log_message = '[TEST] Iter {:04d} | Test Loss {:.6f}'.format(itr, test_loss) logger.info(log_message) if test_loss.item() < best_loss: best_loss = test_loss.item() utils.makedirs(args.save) torch.save({ 'args': args, 'state_dict': model.state_dict(), }, os.path.join(args.save, 'checkpt.pth')) model.train() if itr % args.viz_freq == 0: with torch.no_grad(): model.eval() p_samples = toy_data.inf_train_gen(args.data, batch_size=2000) sample_fn, density_fn = get_transforms(model) plt.figure(figsize=(9, 3)) visualize_transform( p_samples, torch.randn, standard_normal_logprob, transform=sample_fn, inverse_transform=density_fn, samples=True, npts=800, device=device ) fig_filename = os.path.join(args.save, 'figs', '{:04d}.jpg'.format(itr)) utils.makedirs(os.path.dirname(fig_filename)) plt.savefig(fig_filename) plt.close() model.train() end = time.time() logger.info('Training has finished.')
6,334
32.877005
119
py
steer
steer-master/ffjord/train_misc.py
import six import math import lib.layers.wrappers.cnf_regularization as reg_lib import lib.spectral_norm as spectral_norm import lib.layers as layers from lib.layers.odefunc import divergence_bf, divergence_approx def standard_normal_logprob(z): logZ = -0.5 * math.log(2 * math.pi) return logZ - z.pow(2) / 2 def set_cnf_options(args, model): def _set(module): if isinstance(module, layers.CNF): # Set training settings module.solver = args.solver module.atol = args.atol module.rtol = args.rtol if args.step_size is not None: module.solver_options['step_size'] = args.step_size # If using fixed-grid adams, restrict order to not be too high. if args.solver in ['fixed_adams', 'explicit_adams']: module.solver_options['max_order'] = 4 # Set the test settings module.test_solver = args.test_solver if args.test_solver else args.solver module.test_atol = args.test_atol if args.test_atol else args.atol module.test_rtol = args.test_rtol if args.test_rtol else args.rtol if isinstance(module, layers.ODEfunc): module.rademacher = args.rademacher module.residual = args.residual model.apply(_set) def override_divergence_fn(model, divergence_fn): def _set(module): if isinstance(module, layers.ODEfunc): if divergence_fn == "brute_force": module.divergence_fn = divergence_bf elif divergence_fn == "approximate": module.divergence_fn = divergence_approx model.apply(_set) def count_nfe(model): class AccNumEvals(object): def __init__(self): self.num_evals = 0 def __call__(self, module): if isinstance(module, layers.ODEfunc): self.num_evals += module.num_evals() accumulator = AccNumEvals() model.apply(accumulator) return accumulator.num_evals def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) def count_total_time(model): class Accumulator(object): def __init__(self): self.total_time = 0 def __call__(self, module): if isinstance(module, layers.CNF): self.total_time = self.total_time + module.sqrt_end_time * module.sqrt_end_time accumulator = Accumulator() model.apply(accumulator) return accumulator.total_time def add_spectral_norm(model, logger=None): """Applies spectral norm to all modules within the scope of a CNF.""" def apply_spectral_norm(module): if 'weight' in module._parameters: if logger: logger.info("Adding spectral norm to {}".format(module)) spectral_norm.inplace_spectral_norm(module, 'weight') def find_cnf(module): if isinstance(module, layers.CNF): module.apply(apply_spectral_norm) else: for child in module.children(): find_cnf(child) find_cnf(model) def spectral_norm_power_iteration(model, n_power_iterations=1): def recursive_power_iteration(module): if hasattr(module, spectral_norm.POWER_ITERATION_FN): getattr(module, spectral_norm.POWER_ITERATION_FN)(n_power_iterations) model.apply(recursive_power_iteration) REGULARIZATION_FNS = { "l1int": reg_lib.l1_regularzation_fn, "l2int": reg_lib.l2_regularzation_fn, "dl2int": reg_lib.directional_l2_regularization_fn, "JFrobint": reg_lib.jacobian_frobenius_regularization_fn, "JdiagFrobint": reg_lib.jacobian_diag_frobenius_regularization_fn, "JoffdiagFrobint": reg_lib.jacobian_offdiag_frobenius_regularization_fn, } INV_REGULARIZATION_FNS = {v: k for k, v in six.iteritems(REGULARIZATION_FNS)} def append_regularization_to_log(log_message, regularization_fns, reg_states): for i, reg_fn in enumerate(regularization_fns): log_message = log_message + " | " + INV_REGULARIZATION_FNS[reg_fn] + ": {:.8f}".format(reg_states[i].item()) return log_message def create_regularization_fns(args): regularization_fns = [] regularization_coeffs = [] for arg_key, reg_fn in six.iteritems(REGULARIZATION_FNS): if getattr(args, arg_key) is not None: regularization_fns.append(reg_fn) regularization_coeffs.append(eval("args." + arg_key)) regularization_fns = tuple(regularization_fns) regularization_coeffs = tuple(regularization_coeffs) return regularization_fns, regularization_coeffs def get_regularization(model, regularization_coeffs): if len(regularization_coeffs) == 0: return None acc_reg_states = tuple([0.] * len(regularization_coeffs)) for module in model.modules(): if isinstance(module, layers.CNF): acc_reg_states = tuple(acc + reg for acc, reg in zip(acc_reg_states, module.get_regularization_states())) return acc_reg_states def build_model_tabular(args, dims, regularization_fns=None): hidden_dims = tuple(map(int, args.dims.split("-"))) def build_cnf(): diffeq = layers.ODEnet( hidden_dims=hidden_dims, input_shape=(dims,), strides=None, conv=False, layer_type=args.layer_type, nonlinearity=args.nonlinearity, ) odefunc = layers.ODEfunc( diffeq=diffeq, divergence_fn=args.divergence_fn, residual=args.residual, rademacher=args.rademacher, ) cnf = layers.CNF( odefunc=odefunc, T=args.time_length, train_T=args.train_T, regularization_fns=regularization_fns, solver=args.solver, ) return cnf chain = [build_cnf() for _ in range(args.num_blocks)] if args.batch_norm: bn_layers = [layers.MovingBatchNorm1d(dims, bn_lag=args.bn_lag) for _ in range(args.num_blocks)] bn_chain = [layers.MovingBatchNorm1d(dims, bn_lag=args.bn_lag)] for a, b in zip(chain, bn_layers): bn_chain.append(a) bn_chain.append(b) chain = bn_chain model = layers.SequentialFlow(chain) set_cnf_options(args, model) return model
6,311
30.402985
117
py
steer
steer-master/ffjord/vae_lib/models/VAE.py
from __future__ import print_function import torch import torch.nn as nn import vae_lib.models.flows as flows from vae_lib.models.layers import GatedConv2d, GatedConvTranspose2d class VAE(nn.Module): """ The base VAE class containing gated convolutional encoder and decoder architecture. Can be used as a base class for VAE's with normalizing flows. """ def __init__(self, args): super(VAE, self).__init__() # extract model settings from args self.z_size = args.z_size self.input_size = args.input_size self.input_type = args.input_type if self.input_size == [1, 28, 28] or self.input_size == [3, 28, 28]: self.last_kernel_size = 7 elif self.input_size == [1, 28, 20]: self.last_kernel_size = (7, 5) else: raise ValueError('invalid input size!!') self.q_z_nn, self.q_z_mean, self.q_z_var = self.create_encoder() self.p_x_nn, self.p_x_mean = self.create_decoder() self.q_z_nn_output_dim = 256 # auxiliary if args.cuda: self.FloatTensor = torch.cuda.FloatTensor else: self.FloatTensor = torch.FloatTensor # log-det-jacobian = 0 without flows self.log_det_j = self.FloatTensor(1).zero_() def create_encoder(self): """ Helper function to create the elemental blocks for the encoder. Creates a gated convnet encoder. the encoder expects data as input of shape (batch_size, num_channels, width, height). """ if self.input_type == 'binary': q_z_nn = nn.Sequential( GatedConv2d(self.input_size[0], 32, 5, 1, 2), GatedConv2d(32, 32, 5, 2, 2), GatedConv2d(32, 64, 5, 1, 2), GatedConv2d(64, 64, 5, 2, 2), GatedConv2d(64, 64, 5, 1, 2), GatedConv2d(64, 256, self.last_kernel_size, 1, 0), ) q_z_mean = nn.Linear(256, self.z_size) q_z_var = nn.Sequential( nn.Linear(256, self.z_size), nn.Softplus(), ) return q_z_nn, q_z_mean, q_z_var elif self.input_type == 'multinomial': act = None q_z_nn = nn.Sequential( GatedConv2d(self.input_size[0], 32, 5, 1, 2, activation=act), GatedConv2d(32, 32, 5, 2, 2, activation=act), GatedConv2d(32, 64, 5, 1, 2, activation=act), GatedConv2d(64, 64, 5, 2, 2, activation=act), GatedConv2d(64, 64, 5, 1, 2, activation=act), GatedConv2d(64, 256, self.last_kernel_size, 1, 0, activation=act) ) q_z_mean = nn.Linear(256, self.z_size) q_z_var = nn.Sequential(nn.Linear(256, self.z_size), nn.Softplus(), nn.Hardtanh(min_val=0.01, max_val=7.)) return q_z_nn, q_z_mean, q_z_var def create_decoder(self): """ Helper function to create the elemental blocks for the decoder. Creates a gated convnet decoder. """ num_classes = 256 if self.input_type == 'binary': p_x_nn = nn.Sequential( GatedConvTranspose2d(self.z_size, 64, self.last_kernel_size, 1, 0), GatedConvTranspose2d(64, 64, 5, 1, 2), GatedConvTranspose2d(64, 32, 5, 2, 2, 1), GatedConvTranspose2d(32, 32, 5, 1, 2), GatedConvTranspose2d(32, 32, 5, 2, 2, 1), GatedConvTranspose2d(32, 32, 5, 1, 2) ) p_x_mean = nn.Sequential(nn.Conv2d(32, self.input_size[0], 1, 1, 0), nn.Sigmoid()) return p_x_nn, p_x_mean elif self.input_type == 'multinomial': act = None p_x_nn = nn.Sequential( GatedConvTranspose2d(self.z_size, 64, self.last_kernel_size, 1, 0, activation=act), GatedConvTranspose2d(64, 64, 5, 1, 2, activation=act), GatedConvTranspose2d(64, 32, 5, 2, 2, 1, activation=act), GatedConvTranspose2d(32, 32, 5, 1, 2, activation=act), GatedConvTranspose2d(32, 32, 5, 2, 2, 1, activation=act), GatedConvTranspose2d(32, 32, 5, 1, 2, activation=act) ) p_x_mean = nn.Sequential( nn.Conv2d(32, 256, 5, 1, 2), nn.Conv2d(256, self.input_size[0] * num_classes, 1, 1, 0), # output shape: batch_size, num_channels * num_classes, pixel_width, pixel_height ) return p_x_nn, p_x_mean else: raise ValueError('invalid input type!!') def reparameterize(self, mu, var): """ Samples z from a multivariate Gaussian with diagonal covariance matrix using the reparameterization trick. """ std = var.sqrt() eps = self.FloatTensor(std.size()).normal_() z = eps.mul(std).add_(mu) return z def encode(self, x): """ Encoder expects following data shapes as input: shape = (batch_size, num_channels, width, height) """ h = self.q_z_nn(x) h = h.view(h.size(0), -1) mean = self.q_z_mean(h) var = self.q_z_var(h) return mean, var def decode(self, z): """ Decoder outputs reconstructed image in the following shapes: x_mean.shape = (batch_size, num_channels, width, height) """ z = z.view(z.size(0), self.z_size, 1, 1) h = self.p_x_nn(z) x_mean = self.p_x_mean(h) return x_mean def forward(self, x): """ Evaluates the model as a whole, encodes and decodes. Note that the log det jacobian is zero for a plain VAE (without flows), and z_0 = z_k. """ # mean and variance of z z_mu, z_var = self.encode(x) # sample z z = self.reparameterize(z_mu, z_var) x_mean = self.decode(z) return x_mean, z_mu, z_var, self.log_det_j, z, z class PlanarVAE(VAE): """ Variational auto-encoder with planar flows in the encoder. """ def __init__(self, args): super(PlanarVAE, self).__init__(args) # Initialize log-det-jacobian to zero self.log_det_j = 0. # Flow parameters flow = flows.Planar self.num_flows = args.num_flows # Amortized flow parameters self.amor_u = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size) self.amor_w = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size) self.amor_b = nn.Linear(self.q_z_nn_output_dim, self.num_flows) # Normalizing flow layers for k in range(self.num_flows): flow_k = flow() self.add_module('flow_' + str(k), flow_k) def encode(self, x): """ Encoder that ouputs parameters for base distribution of z and flow parameters. """ batch_size = x.size(0) h = self.q_z_nn(x) h = h.view(-1, self.q_z_nn_output_dim) mean_z = self.q_z_mean(h) var_z = self.q_z_var(h) # return amortized u an w for all flows u = self.amor_u(h).view(batch_size, self.num_flows, self.z_size, 1) w = self.amor_w(h).view(batch_size, self.num_flows, 1, self.z_size) b = self.amor_b(h).view(batch_size, self.num_flows, 1, 1) return mean_z, var_z, u, w, b def forward(self, x): """ Forward pass with planar flows for the transformation z_0 -> z_1 -> ... -> z_k. Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ]. """ self.log_det_j = 0. z_mu, z_var, u, w, b = self.encode(x) # Sample z_0 z = [self.reparameterize(z_mu, z_var)] # Normalizing flows for k in range(self.num_flows): flow_k = getattr(self, 'flow_' + str(k)) z_k, log_det_jacobian = flow_k(z[k], u[:, k, :, :], w[:, k, :, :], b[:, k, :, :]) z.append(z_k) self.log_det_j += log_det_jacobian x_mean = self.decode(z[-1]) return x_mean, z_mu, z_var, self.log_det_j, z[0], z[-1] class OrthogonalSylvesterVAE(VAE): """ Variational auto-encoder with orthogonal flows in the encoder. """ def __init__(self, args): super(OrthogonalSylvesterVAE, self).__init__(args) # Initialize log-det-jacobian to zero self.log_det_j = 0. # Flow parameters flow = flows.Sylvester self.num_flows = args.num_flows self.num_ortho_vecs = args.num_ortho_vecs assert (self.num_ortho_vecs <= self.z_size) and (self.num_ortho_vecs > 0) # Orthogonalization parameters if self.num_ortho_vecs == self.z_size: self.cond = 1.e-5 else: self.cond = 1.e-6 self.steps = 100 identity = torch.eye(self.num_ortho_vecs, self.num_ortho_vecs) # Add batch dimension identity = identity.unsqueeze(0) # Put identity in buffer so that it will be moved to GPU if needed by any call of .cuda self.register_buffer('_eye', identity) self._eye.requires_grad = False # Masks needed for triangular R1 and R2. triu_mask = torch.triu(torch.ones(self.num_ortho_vecs, self.num_ortho_vecs), diagonal=1) triu_mask = triu_mask.unsqueeze(0).unsqueeze(3) diag_idx = torch.arange(0, self.num_ortho_vecs).long() self.register_buffer('triu_mask', triu_mask) self.triu_mask.requires_grad = False self.register_buffer('diag_idx', diag_idx) # Amortized flow parameters # Diagonal elements of R1 * R2 have to satisfy -1 < R1 * R2 for flow to be invertible self.diag_activation = nn.Tanh() self.amor_d = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.num_ortho_vecs * self.num_ortho_vecs) self.amor_diag1 = nn.Sequential( nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.num_ortho_vecs), self.diag_activation ) self.amor_diag2 = nn.Sequential( nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.num_ortho_vecs), self.diag_activation ) self.amor_q = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size * self.num_ortho_vecs) self.amor_b = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.num_ortho_vecs) # Normalizing flow layers for k in range(self.num_flows): flow_k = flow(self.num_ortho_vecs) self.add_module('flow_' + str(k), flow_k) def batch_construct_orthogonal(self, q): """ Batch orthogonal matrix construction. :param q: q contains batches of matrices, shape : (batch_size * num_flows, z_size * num_ortho_vecs) :return: batches of orthogonalized matrices, shape: (batch_size * num_flows, z_size, num_ortho_vecs) """ # Reshape to shape (num_flows * batch_size, z_size * num_ortho_vecs) q = q.view(-1, self.z_size * self.num_ortho_vecs) norm = torch.norm(q, p=2, dim=1, keepdim=True) amat = torch.div(q, norm) dim0 = amat.size(0) amat = amat.resize(dim0, self.z_size, self.num_ortho_vecs) max_norm = 0. # Iterative orthogonalization for s in range(self.steps): tmp = torch.bmm(amat.transpose(2, 1), amat) tmp = self._eye - tmp tmp = self._eye + 0.5 * tmp amat = torch.bmm(amat, tmp) # Testing for convergence test = torch.bmm(amat.transpose(2, 1), amat) - self._eye norms2 = torch.sum(torch.norm(test, p=2, dim=2)**2, dim=1) norms = torch.sqrt(norms2) max_norm = torch.max(norms).item() if max_norm <= self.cond: break if max_norm > self.cond: print('\nWARNING WARNING WARNING: orthogonalization not complete') print('\t Final max norm =', max_norm) print() # Reshaping: first dimension is batch_size amat = amat.view(-1, self.num_flows, self.z_size, self.num_ortho_vecs) amat = amat.transpose(0, 1) return amat def encode(self, x): """ Encoder that ouputs parameters for base distribution of z and flow parameters. """ batch_size = x.size(0) h = self.q_z_nn(x) h = h.view(-1, self.q_z_nn_output_dim) mean_z = self.q_z_mean(h) var_z = self.q_z_var(h) # Amortized r1, r2, q, b for all flows full_d = self.amor_d(h) diag1 = self.amor_diag1(h) diag2 = self.amor_diag2(h) full_d = full_d.resize(batch_size, self.num_ortho_vecs, self.num_ortho_vecs, self.num_flows) diag1 = diag1.resize(batch_size, self.num_ortho_vecs, self.num_flows) diag2 = diag2.resize(batch_size, self.num_ortho_vecs, self.num_flows) r1 = full_d * self.triu_mask r2 = full_d.transpose(2, 1) * self.triu_mask r1[:, self.diag_idx, self.diag_idx, :] = diag1 r2[:, self.diag_idx, self.diag_idx, :] = diag2 q = self.amor_q(h) b = self.amor_b(h) # Resize flow parameters to divide over K flows b = b.resize(batch_size, 1, self.num_ortho_vecs, self.num_flows) return mean_z, var_z, r1, r2, q, b def forward(self, x): """ Forward pass with orthogonal sylvester flows for the transformation z_0 -> z_1 -> ... -> z_k. Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ]. """ self.log_det_j = 0. z_mu, z_var, r1, r2, q, b = self.encode(x) # Orthogonalize all q matrices q_ortho = self.batch_construct_orthogonal(q) # Sample z_0 z = [self.reparameterize(z_mu, z_var)] # Normalizing flows for k in range(self.num_flows): flow_k = getattr(self, 'flow_' + str(k)) z_k, log_det_jacobian = flow_k(z[k], r1[:, :, :, k], r2[:, :, :, k], q_ortho[k, :, :, :], b[:, :, :, k]) z.append(z_k) self.log_det_j += log_det_jacobian x_mean = self.decode(z[-1]) return x_mean, z_mu, z_var, self.log_det_j, z[0], z[-1] class HouseholderSylvesterVAE(VAE): """ Variational auto-encoder with householder sylvester flows in the encoder. """ def __init__(self, args): super(HouseholderSylvesterVAE, self).__init__(args) # Initialize log-det-jacobian to zero self.log_det_j = 0. # Flow parameters flow = flows.Sylvester self.num_flows = args.num_flows self.num_householder = args.num_householder assert self.num_householder > 0 identity = torch.eye(self.z_size, self.z_size) # Add batch dimension identity = identity.unsqueeze(0) # Put identity in buffer so that it will be moved to GPU if needed by any call of .cuda self.register_buffer('_eye', identity) self._eye.requires_grad = False # Masks needed for triangular r1 and r2. triu_mask = torch.triu(torch.ones(self.z_size, self.z_size), diagonal=1) triu_mask = triu_mask.unsqueeze(0).unsqueeze(3) diag_idx = torch.arange(0, self.z_size).long() self.register_buffer('triu_mask', triu_mask) self.triu_mask.requires_grad = False self.register_buffer('diag_idx', diag_idx) # Amortized flow parameters # Diagonal elements of r1 * r2 have to satisfy -1 < r1 * r2 for flow to be invertible self.diag_activation = nn.Tanh() self.amor_d = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size * self.z_size) self.amor_diag1 = nn.Sequential( nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size), self.diag_activation ) self.amor_diag2 = nn.Sequential( nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size), self.diag_activation ) self.amor_q = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size * self.num_householder) self.amor_b = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size) # Normalizing flow layers for k in range(self.num_flows): flow_k = flow(self.z_size) self.add_module('flow_' + str(k), flow_k) def batch_construct_orthogonal(self, q): """ Batch orthogonal matrix construction. :param q: q contains batches of matrices, shape : (batch_size, num_flows * z_size * num_householder) :return: batches of orthogonalized matrices, shape: (batch_size * num_flows, z_size, z_size) """ # Reshape to shape (num_flows * batch_size * num_householder, z_size) q = q.view(-1, self.z_size) norm = torch.norm(q, p=2, dim=1, keepdim=True) # ||v||_2 v = torch.div(q, norm) # v / ||v||_2 # Calculate Householder Matrices vvT = torch.bmm(v.unsqueeze(2), v.unsqueeze(1)) # v * v_T : batch_dot( B x L x 1 * B x 1 x L ) = B x L x L amat = self._eye - 2 * vvT # NOTICE: v is already normalized! so there is no need to calculate vvT/vTv # Reshaping: first dimension is batch_size * num_flows amat = amat.view(-1, self.num_householder, self.z_size, self.z_size) tmp = amat[:, 0] for k in range(1, self.num_householder): tmp = torch.bmm(amat[:, k], tmp) amat = tmp.view(-1, self.num_flows, self.z_size, self.z_size) amat = amat.transpose(0, 1) return amat def encode(self, x): """ Encoder that ouputs parameters for base distribution of z and flow parameters. """ batch_size = x.size(0) h = self.q_z_nn(x) h = h.view(-1, self.q_z_nn_output_dim) mean_z = self.q_z_mean(h) var_z = self.q_z_var(h) # Amortized r1, r2, q, b for all flows full_d = self.amor_d(h) diag1 = self.amor_diag1(h) diag2 = self.amor_diag2(h) full_d = full_d.resize(batch_size, self.z_size, self.z_size, self.num_flows) diag1 = diag1.resize(batch_size, self.z_size, self.num_flows) diag2 = diag2.resize(batch_size, self.z_size, self.num_flows) r1 = full_d * self.triu_mask r2 = full_d.transpose(2, 1) * self.triu_mask r1[:, self.diag_idx, self.diag_idx, :] = diag1 r2[:, self.diag_idx, self.diag_idx, :] = diag2 q = self.amor_q(h) b = self.amor_b(h) # Resize flow parameters to divide over K flows b = b.resize(batch_size, 1, self.z_size, self.num_flows) return mean_z, var_z, r1, r2, q, b def forward(self, x): """ Forward pass with orthogonal flows for the transformation z_0 -> z_1 -> ... -> z_k. Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ]. """ self.log_det_j = 0. z_mu, z_var, r1, r2, q, b = self.encode(x) # Orthogonalize all q matrices q_ortho = self.batch_construct_orthogonal(q) # Sample z_0 z = [self.reparameterize(z_mu, z_var)] # Normalizing flows for k in range(self.num_flows): flow_k = getattr(self, 'flow_' + str(k)) q_k = q_ortho[k] z_k, log_det_jacobian = flow_k(z[k], r1[:, :, :, k], r2[:, :, :, k], q_k, b[:, :, :, k], sum_ldj=True) z.append(z_k) self.log_det_j += log_det_jacobian x_mean = self.decode(z[-1]) return x_mean, z_mu, z_var, self.log_det_j, z[0], z[-1] class TriangularSylvesterVAE(VAE): """ Variational auto-encoder with triangular Sylvester flows in the encoder. Alternates between setting the orthogonal matrix equal to permutation and identity matrix for each flow. """ def __init__(self, args): super(TriangularSylvesterVAE, self).__init__(args) # Initialize log-det-jacobian to zero self.log_det_j = 0. # Flow parameters flow = flows.TriangularSylvester self.num_flows = args.num_flows # permuting indices corresponding to Q=P (permutation matrix) for every other flow flip_idx = torch.arange(self.z_size - 1, -1, -1).long() self.register_buffer('flip_idx', flip_idx) # Masks needed for triangular r1 and r2. triu_mask = torch.triu(torch.ones(self.z_size, self.z_size), diagonal=1) triu_mask = triu_mask.unsqueeze(0).unsqueeze(3) diag_idx = torch.arange(0, self.z_size).long() self.register_buffer('triu_mask', triu_mask) self.triu_mask.requires_grad = False self.register_buffer('diag_idx', diag_idx) # Amortized flow parameters # Diagonal elements of r1 * r2 have to satisfy -1 < r1 * r2 for flow to be invertible self.diag_activation = nn.Tanh() self.amor_d = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size * self.z_size) self.amor_diag1 = nn.Sequential( nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size), self.diag_activation ) self.amor_diag2 = nn.Sequential( nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size), self.diag_activation ) self.amor_b = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size) # Normalizing flow layers for k in range(self.num_flows): flow_k = flow(self.z_size) self.add_module('flow_' + str(k), flow_k) def encode(self, x): """ Encoder that ouputs parameters for base distribution of z and flow parameters. """ batch_size = x.size(0) h = self.q_z_nn(x) h = h.view(-1, self.q_z_nn_output_dim) mean_z = self.q_z_mean(h) var_z = self.q_z_var(h) # Amortized r1, r2, b for all flows full_d = self.amor_d(h) diag1 = self.amor_diag1(h) diag2 = self.amor_diag2(h) full_d = full_d.resize(batch_size, self.z_size, self.z_size, self.num_flows) diag1 = diag1.resize(batch_size, self.z_size, self.num_flows) diag2 = diag2.resize(batch_size, self.z_size, self.num_flows) r1 = full_d * self.triu_mask r2 = full_d.transpose(2, 1) * self.triu_mask r1[:, self.diag_idx, self.diag_idx, :] = diag1 r2[:, self.diag_idx, self.diag_idx, :] = diag2 b = self.amor_b(h) # Resize flow parameters to divide over K flows b = b.resize(batch_size, 1, self.z_size, self.num_flows) return mean_z, var_z, r1, r2, b def forward(self, x): """ Forward pass with orthogonal flows for the transformation z_0 -> z_1 -> ... -> z_k. Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ]. """ self.log_det_j = 0. z_mu, z_var, r1, r2, b = self.encode(x) # Sample z_0 z = [self.reparameterize(z_mu, z_var)] # Normalizing flows for k in range(self.num_flows): flow_k = getattr(self, 'flow_' + str(k)) if k % 2 == 1: # Alternate with reorderering z for triangular flow permute_z = self.flip_idx else: permute_z = None z_k, log_det_jacobian = flow_k(z[k], r1[:, :, :, k], r2[:, :, :, k], b[:, :, :, k], permute_z, sum_ldj=True) z.append(z_k) self.log_det_j += log_det_jacobian x_mean = self.decode(z[-1]) return x_mean, z_mu, z_var, self.log_det_j, z[0], z[-1] class IAFVAE(VAE): """ Variational auto-encoder with inverse autoregressive flows in the encoder. """ def __init__(self, args): super(IAFVAE, self).__init__(args) # Initialize log-det-jacobian to zero self.log_det_j = 0. self.h_size = args.made_h_size self.h_context = nn.Linear(self.q_z_nn_output_dim, self.h_size) # Flow parameters self.num_flows = args.num_flows self.flow = flows.IAF( z_size=self.z_size, num_flows=self.num_flows, num_hidden=1, h_size=self.h_size, conv2d=False ) def encode(self, x): """ Encoder that ouputs parameters for base distribution of z and context h for flows. """ h = self.q_z_nn(x) h = h.view(-1, self.q_z_nn_output_dim) mean_z = self.q_z_mean(h) var_z = self.q_z_var(h) h_context = self.h_context(h) return mean_z, var_z, h_context def forward(self, x): """ Forward pass with inverse autoregressive flows for the transformation z_0 -> z_1 -> ... -> z_k. Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ]. """ # mean and variance of z z_mu, z_var, h_context = self.encode(x) # sample z z_0 = self.reparameterize(z_mu, z_var) # iaf flows z_k, self.log_det_j = self.flow(z_0, h_context) # decode x_mean = self.decode(z_k) return x_mean, z_mu, z_var, self.log_det_j, z_0, z_k
25,211
33.255435
120
py
steer
steer-master/ffjord/vae_lib/models/CNFVAE.py
import torch import torch.nn as nn from train_misc import build_model_tabular import lib.layers as layers from .VAE import VAE import lib.layers.diffeq_layers as diffeq_layers from lib.layers.odefunc import NONLINEARITIES from torchdiffeq import odeint_adjoint as odeint def get_hidden_dims(args): return tuple(map(int, args.dims.split("-"))) + (args.z_size,) def concat_layer_num_params(in_dim, out_dim): return (in_dim + 1) * out_dim + out_dim class CNFVAE(VAE): def __init__(self, args): super(CNFVAE, self).__init__(args) # CNF model self.cnf = build_model_tabular(args, args.z_size) if args.cuda: self.cuda() def encode(self, x): """ Encoder that ouputs parameters for base distribution of z and flow parameters. """ h = self.q_z_nn(x) h = h.view(-1, self.q_z_nn_output_dim) mean_z = self.q_z_mean(h) var_z = self.q_z_var(h) return mean_z, var_z def forward(self, x): """ Forward pass with planar flows for the transformation z_0 -> z_1 -> ... -> z_k. Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ]. """ z_mu, z_var = self.encode(x) # Sample z_0 z0 = self.reparameterize(z_mu, z_var) zero = torch.zeros(x.shape[0], 1).to(x) zk, delta_logp = self.cnf(z0, zero) # run model forward x_mean = self.decode(zk) return x_mean, z_mu, z_var, -delta_logp.view(-1), z0, zk class AmortizedBiasODEnet(nn.Module): def __init__(self, hidden_dims, input_dim, layer_type="concat", nonlinearity="softplus"): super(AmortizedBiasODEnet, self).__init__() base_layer = { "ignore": diffeq_layers.IgnoreLinear, "hyper": diffeq_layers.HyperLinear, "squash": diffeq_layers.SquashLinear, "concat": diffeq_layers.ConcatLinear, "concat_v2": diffeq_layers.ConcatLinear_v2, "concatsquash": diffeq_layers.ConcatSquashLinear, "blend": diffeq_layers.BlendLinear, "concatcoord": diffeq_layers.ConcatLinear, }[layer_type] self.input_dim = input_dim # build layers and add them layers = [] activation_fns = [] hidden_shape = input_dim for dim_out in hidden_dims: layer = base_layer(hidden_shape, dim_out) layers.append(layer) activation_fns.append(NONLINEARITIES[nonlinearity]) hidden_shape = dim_out self.layers = nn.ModuleList(layers) self.activation_fns = nn.ModuleList(activation_fns[:-1]) def _unpack_params(self, params): return [params] def forward(self, t, y, am_biases): dx = y for l, layer in enumerate(self.layers): dx = layer(t, dx) this_bias, am_biases = am_biases[:, :dx.size(1)], am_biases[:, dx.size(1):] dx = dx + this_bias # if not last layer, use nonlinearity if l < len(self.layers) - 1: dx = self.activation_fns[l](dx) return dx class AmortizedLowRankODEnet(nn.Module): def __init__(self, hidden_dims, input_dim, rank=1, layer_type="concat", nonlinearity="softplus"): super(AmortizedLowRankODEnet, self).__init__() base_layer = { "ignore": diffeq_layers.IgnoreLinear, "hyper": diffeq_layers.HyperLinear, "squash": diffeq_layers.SquashLinear, "concat": diffeq_layers.ConcatLinear, "concat_v2": diffeq_layers.ConcatLinear_v2, "concatsquash": diffeq_layers.ConcatSquashLinear, "blend": diffeq_layers.BlendLinear, "concatcoord": diffeq_layers.ConcatLinear, }[layer_type] self.input_dim = input_dim # build layers and add them layers = [] activation_fns = [] hidden_shape = input_dim self.output_dims = hidden_dims self.input_dims = (input_dim,) + hidden_dims[:-1] for dim_out in hidden_dims: layer = base_layer(hidden_shape, dim_out) layers.append(layer) activation_fns.append(NONLINEARITIES[nonlinearity]) hidden_shape = dim_out self.layers = nn.ModuleList(layers) self.activation_fns = nn.ModuleList(activation_fns[:-1]) self.rank = rank def _unpack_params(self, params): return [params] def _rank_k_bmm(self, x, u, v): xu = torch.bmm(x[:, None], u.view(x.shape[0], x.shape[-1], self.rank)) xuv = torch.bmm(xu, v.view(x.shape[0], self.rank, -1)) return xuv[:, 0] def forward(self, t, y, am_params): dx = y for l, (layer, in_dim, out_dim) in enumerate(zip(self.layers, self.input_dims, self.output_dims)): this_u, am_params = am_params[:, :in_dim * self.rank], am_params[:, in_dim * self.rank:] this_v, am_params = am_params[:, :out_dim * self.rank], am_params[:, out_dim * self.rank:] this_bias, am_params = am_params[:, :out_dim], am_params[:, out_dim:] xw = layer(t, dx) xw_am = self._rank_k_bmm(dx, this_u, this_v) dx = xw + xw_am + this_bias # if not last layer, use nonlinearity if l < len(self.layers) - 1: dx = self.activation_fns[l](dx) return dx class HyperODEnet(nn.Module): def __init__(self, hidden_dims, input_dim, layer_type="concat", nonlinearity="softplus"): super(HyperODEnet, self).__init__() assert layer_type == "concat" self.input_dim = input_dim # build layers and add them activation_fns = [] for dim_out in hidden_dims + (input_dim,): activation_fns.append(NONLINEARITIES[nonlinearity]) self.activation_fns = nn.ModuleList(activation_fns[:-1]) self.output_dims = hidden_dims self.input_dims = (input_dim,) + hidden_dims[:-1] def _pack_inputs(self, t, x): tt = torch.ones_like(x[:, :1]) * t ttx = torch.cat([tt, x], 1) return ttx def _unpack_params(self, params): layer_params = [] for in_dim, out_dim in zip(self.input_dims, self.output_dims): this_num_params = concat_layer_num_params(in_dim, out_dim) # get params for this layer this_params, params = params[:, :this_num_params], params[:, this_num_params:] # split into weight and bias bias, weight_params = this_params[:, :out_dim], this_params[:, out_dim:] weight = weight_params.view(weight_params.size(0), in_dim + 1, out_dim) layer_params.append(weight) layer_params.append(bias) return layer_params def _layer(self, t, x, weight, bias): # weights is (batch, in_dim + 1, out_dim) ttx = self._pack_inputs(t, x) # (batch, in_dim + 1) ttx = ttx.view(ttx.size(0), 1, ttx.size(1)) # (batch, 1, in_dim + 1) xw = torch.bmm(ttx, weight)[:, 0, :] # (batch, out_dim) return xw + bias def forward(self, t, y, *layer_params): dx = y for l, (weight, bias) in enumerate(zip(layer_params[::2], layer_params[1::2])): dx = self._layer(t, dx, weight, bias) # if not last layer, use nonlinearity if l < len(layer_params) - 1: dx = self.activation_fns[l](dx) return dx class LyperODEnet(nn.Module): def __init__(self, hidden_dims, input_dim, layer_type="concat", nonlinearity="softplus"): super(LyperODEnet, self).__init__() base_layer = { "ignore": diffeq_layers.IgnoreLinear, "hyper": diffeq_layers.HyperLinear, "squash": diffeq_layers.SquashLinear, "concat": diffeq_layers.ConcatLinear, "concat_v2": diffeq_layers.ConcatLinear_v2, "concatsquash": diffeq_layers.ConcatSquashLinear, "blend": diffeq_layers.BlendLinear, "concatcoord": diffeq_layers.ConcatLinear, }[layer_type] self.input_dim = input_dim # build layers and add them layers = [] activation_fns = [] hidden_shape = input_dim self.dims = (input_dim,) + hidden_dims self.output_dims = hidden_dims self.input_dims = (input_dim,) + hidden_dims[:-1] for dim_out in hidden_dims[:-1]: layer = base_layer(hidden_shape, dim_out) layers.append(layer) activation_fns.append(NONLINEARITIES[nonlinearity]) hidden_shape = dim_out self.layers = nn.ModuleList(layers) self.activation_fns = nn.ModuleList(activation_fns) def _pack_inputs(self, t, x): tt = torch.ones_like(x[:, :1]) * t ttx = torch.cat([tt, x], 1) return ttx def _unpack_params(self, params): return [params] def _am_layer(self, t, x, weight, bias): # weights is (batch, in_dim + 1, out_dim) ttx = self._pack_inputs(t, x) # (batch, in_dim + 1) ttx = ttx.view(ttx.size(0), 1, ttx.size(1)) # (batch, 1, in_dim + 1) xw = torch.bmm(ttx, weight)[:, 0, :] # (batch, out_dim) return xw + bias def forward(self, t, x, am_params): dx = x for layer, act in zip(self.layers, self.activation_fns): dx = act(layer(t, dx)) bias, weight_params = am_params[:, :self.dims[-1]], am_params[:, self.dims[-1]:] weight = weight_params.view(weight_params.size(0), self.dims[-2] + 1, self.dims[-1]) dx = self._am_layer(t, dx, weight, bias) return dx def construct_amortized_odefunc(args, z_dim, amortization_type="bias"): hidden_dims = get_hidden_dims(args) if amortization_type == "bias": diffeq = AmortizedBiasODEnet( hidden_dims=hidden_dims, input_dim=z_dim, layer_type=args.layer_type, nonlinearity=args.nonlinearity, ) elif amortization_type == "hyper": diffeq = HyperODEnet( hidden_dims=hidden_dims, input_dim=z_dim, layer_type=args.layer_type, nonlinearity=args.nonlinearity, ) elif amortization_type == "lyper": diffeq = LyperODEnet( hidden_dims=hidden_dims, input_dim=z_dim, layer_type=args.layer_type, nonlinearity=args.nonlinearity, ) elif amortization_type == "low_rank": diffeq = AmortizedLowRankODEnet( hidden_dims=hidden_dims, input_dim=z_dim, layer_type=args.layer_type, nonlinearity=args.nonlinearity, rank=args.rank, ) odefunc = layers.ODEfunc( diffeq=diffeq, divergence_fn=args.divergence_fn, residual=args.residual, rademacher=args.rademacher, ) return odefunc class AmortizedCNFVAE(VAE): h_size = 256 def __init__(self, args): super(AmortizedCNFVAE, self).__init__(args) # CNF model self.odefuncs = nn.ModuleList([ construct_amortized_odefunc(args, args.z_size, self.amortization_type) for _ in range(args.num_blocks) ]) self.q_am = self._amortized_layers(args) assert len(self.q_am) == args.num_blocks or len(self.q_am) == 0 if args.cuda: self.cuda() self.register_buffer('integration_times', torch.tensor([0.0, args.time_length])) self.atol = args.atol self.rtol = args.rtol self.solver = args.solver def encode(self, x): """ Encoder that ouputs parameters for base distribution of z and flow parameters. """ h = self.q_z_nn(x) h = h.view(-1, self.q_z_nn_output_dim) mean_z = self.q_z_mean(h) var_z = self.q_z_var(h) am_params = [q_am(h) for q_am in self.q_am] return mean_z, var_z, am_params def forward(self, x): self.log_det_j = 0. z_mu, z_var, am_params = self.encode(x) # Sample z_0 z0 = self.reparameterize(z_mu, z_var) delta_logp = torch.zeros(x.shape[0], 1).to(x) z = z0 for odefunc, am_param in zip(self.odefuncs, am_params): am_param_unpacked = odefunc.diffeq._unpack_params(am_param) odefunc.before_odeint() states = odeint( odefunc, (z, delta_logp) + tuple(am_param_unpacked), self.integration_times.to(z), atol=self.atol, rtol=self.rtol, method=self.solver, ) z, delta_logp = states[0][-1], states[1][-1] x_mean = self.decode(z) return x_mean, z_mu, z_var, -delta_logp.view(-1), z0, z class AmortizedBiasCNFVAE(AmortizedCNFVAE): amortization_type = "bias" def _amortized_layers(self, args): hidden_dims = get_hidden_dims(args) bias_size = sum(hidden_dims) return nn.ModuleList([nn.Linear(self.h_size, bias_size) for _ in range(args.num_blocks)]) class AmortizedLowRankCNFVAE(AmortizedCNFVAE): amortization_type = "low_rank" def _amortized_layers(self, args): out_dims = get_hidden_dims(args) in_dims = (out_dims[-1],) + out_dims[:-1] params_size = (sum(in_dims) + sum(out_dims)) * args.rank + sum(out_dims) return nn.ModuleList([nn.Linear(self.h_size, params_size) for _ in range(args.num_blocks)]) class HypernetCNFVAE(AmortizedCNFVAE): amortization_type = "hyper" def _amortized_layers(self, args): hidden_dims = get_hidden_dims(args) input_dims = (args.z_size,) + hidden_dims[:-1] assert args.layer_type == "concat", "hypernets only support concat layers at the moment" weight_dims = [concat_layer_num_params(in_dim, out_dim) for in_dim, out_dim in zip(input_dims, hidden_dims)] weight_size = sum(weight_dims) return nn.ModuleList([nn.Linear(self.h_size, weight_size) for _ in range(args.num_blocks)]) class LypernetCNFVAE(AmortizedCNFVAE): amortization_type = "lyper" def _amortized_layers(self, args): dims = (args.z_size,) + get_hidden_dims(args) weight_size = concat_layer_num_params(dims[-2], dims[-1]) return nn.ModuleList([nn.Linear(self.h_size, weight_size) for _ in range(args.num_blocks)])
14,405
33.881356
116
py
steer
steer-master/ffjord/vae_lib/models/layers.py
import torch import torch.nn as nn from torch.nn.parameter import Parameter import numpy as np import torch.nn.functional as F class Identity(nn.Module): def __init__(self): super(Identity, self).__init__() def forward(self, x): return x class GatedConv2d(nn.Module): def __init__(self, input_channels, output_channels, kernel_size, stride, padding, dilation=1, activation=None): super(GatedConv2d, self).__init__() self.activation = activation self.sigmoid = nn.Sigmoid() self.h = nn.Conv2d(input_channels, output_channels, kernel_size, stride, padding, dilation) self.g = nn.Conv2d(input_channels, output_channels, kernel_size, stride, padding, dilation) def forward(self, x): if self.activation is None: h = self.h(x) else: h = self.activation(self.h(x)) g = self.sigmoid(self.g(x)) return h * g class GatedConvTranspose2d(nn.Module): def __init__( self, input_channels, output_channels, kernel_size, stride, padding, output_padding=0, dilation=1, activation=None ): super(GatedConvTranspose2d, self).__init__() self.activation = activation self.sigmoid = nn.Sigmoid() self.h = nn.ConvTranspose2d( input_channels, output_channels, kernel_size, stride, padding, output_padding, dilation=dilation ) self.g = nn.ConvTranspose2d( input_channels, output_channels, kernel_size, stride, padding, output_padding, dilation=dilation ) def forward(self, x): if self.activation is None: h = self.h(x) else: h = self.activation(self.h(x)) g = self.sigmoid(self.g(x)) return h * g class MaskedLinear(nn.Module): """ Creates masked linear layer for MLP MADE. For input (x) to hidden (h) or hidden to hidden layers choose diagonal_zeros = False. For hidden to output (y) layers: If output depends on input through y_i = f(x_{<i}) set diagonal_zeros = True. Else if output depends on input through y_i = f(x_{<=i}) set diagonal_zeros = False. """ def __init__(self, in_features, out_features, diagonal_zeros=False, bias=True): super(MaskedLinear, self).__init__() self.in_features = in_features self.out_features = out_features self.diagonal_zeros = diagonal_zeros self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) mask = torch.from_numpy(self.build_mask()) if torch.cuda.is_available(): mask = mask.cuda() self.mask = torch.autograd.Variable(mask, requires_grad=False) self.reset_parameters() def reset_parameters(self): nn.init.kaiming_normal(self.weight) if self.bias is not None: self.bias.data.zero_() def build_mask(self): n_in, n_out = self.in_features, self.out_features assert n_in % n_out == 0 or n_out % n_in == 0 mask = np.ones((n_in, n_out), dtype=np.float32) if n_out >= n_in: k = n_out // n_in for i in range(n_in): mask[i + 1:, i * k:(i + 1) * k] = 0 if self.diagonal_zeros: mask[i:i + 1, i * k:(i + 1) * k] = 0 else: k = n_in // n_out for i in range(n_out): mask[(i + 1) * k:, i:i + 1] = 0 if self.diagonal_zeros: mask[i * k:(i + 1) * k:, i:i + 1] = 0 return mask def forward(self, x): output = x.mm(self.mask * self.weight) if self.bias is not None: return output.add(self.bias.expand_as(output)) else: return output def __repr__(self): if self.bias is not None: bias = True else: bias = False return self.__class__.__name__ + ' (' \ + str(self.in_features) + ' -> ' \ + str(self.out_features) + ', diagonal_zeros=' \ + str(self.diagonal_zeros) + ', bias=' \ + str(bias) + ')' class MaskedConv2d(nn.Module): """ Creates masked convolutional autoregressive layer for pixelCNN. For input (x) to hidden (h) or hidden to hidden layers choose diagonal_zeros = False. For hidden to output (y) layers: If output depends on input through y_i = f(x_{<i}) set diagonal_zeros = True. Else if output depends on input through y_i = f(x_{<=i}) set diagonal_zeros = False. """ def __init__(self, in_features, out_features, size_kernel=(3, 3), diagonal_zeros=False, bias=True): super(MaskedConv2d, self).__init__() self.in_features = in_features self.out_features = out_features self.size_kernel = size_kernel self.diagonal_zeros = diagonal_zeros self.weight = Parameter(torch.FloatTensor(out_features, in_features, *self.size_kernel)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) mask = torch.from_numpy(self.build_mask()) if torch.cuda.is_available(): mask = mask.cuda() self.mask = torch.autograd.Variable(mask, requires_grad=False) self.reset_parameters() def reset_parameters(self): nn.init.kaiming_normal(self.weight) if self.bias is not None: self.bias.data.zero_() def build_mask(self): n_in, n_out = self.in_features, self.out_features assert n_out % n_in == 0 or n_in % n_out == 0, "%d - %d" % (n_in, n_out) # Build autoregressive mask l = (self.size_kernel[0] - 1) // 2 m = (self.size_kernel[1] - 1) // 2 mask = np.ones((n_out, n_in, self.size_kernel[0], self.size_kernel[1]), dtype=np.float32) mask[:, :, :l, :] = 0 mask[:, :, l, :m] = 0 if n_out >= n_in: k = n_out // n_in for i in range(n_in): mask[i * k:(i + 1) * k, i + 1:, l, m] = 0 if self.diagonal_zeros: mask[i * k:(i + 1) * k, i:i + 1, l, m] = 0 else: k = n_in // n_out for i in range(n_out): mask[i:i + 1, (i + 1) * k:, l, m] = 0 if self.diagonal_zeros: mask[i:i + 1, i * k:(i + 1) * k:, l, m] = 0 return mask def forward(self, x): output = F.conv2d(x, self.mask * self.weight, bias=self.bias, padding=(1, 1)) return output def __repr__(self): if self.bias is not None: bias = True else: bias = False return self.__class__.__name__ + ' (' \ + str(self.in_features) + ' -> ' \ + str(self.out_features) + ', diagonal_zeros=' \ + str(self.diagonal_zeros) + ', bias=' \ + str(bias) + ', size_kernel=' \ + str(self.size_kernel) + ')'
7,128
32.947619
115
py
steer
steer-master/ffjord/vae_lib/models/__init__.py
0
0
0
py
steer
steer-master/ffjord/vae_lib/models/flows.py
""" Collection of flow strategies """ from __future__ import print_function import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F from vae_lib.models.layers import MaskedConv2d, MaskedLinear class Planar(nn.Module): """ PyTorch implementation of planar flows as presented in "Variational Inference with Normalizing Flows" by Danilo Jimenez Rezende, Shakir Mohamed. Model assumes amortized flow parameters. """ def __init__(self): super(Planar, self).__init__() self.h = nn.Tanh() self.softplus = nn.Softplus() def der_h(self, x): """ Derivative of tanh """ return 1 - self.h(x)**2 def forward(self, zk, u, w, b): """ Forward pass. Assumes amortized u, w and b. Conditions on diagonals of u and w for invertibility will be be satisfied inside this function. Computes the following transformation: z' = z + u h( w^T z + b) or actually z'^T = z^T + h(z^T w + b)u^T Assumes the following input shapes: shape u = (batch_size, z_size, 1) shape w = (batch_size, 1, z_size) shape b = (batch_size, 1, 1) shape z = (batch_size, z_size). """ zk = zk.unsqueeze(2) # reparameterize u such that the flow becomes invertible (see appendix paper) uw = torch.bmm(w, u) m_uw = -1. + self.softplus(uw) w_norm_sq = torch.sum(w**2, dim=2, keepdim=True) u_hat = u + ((m_uw - uw) * w.transpose(2, 1) / w_norm_sq) # compute flow with u_hat wzb = torch.bmm(w, zk) + b z = zk + u_hat * self.h(wzb) z = z.squeeze(2) # compute logdetJ psi = w * self.der_h(wzb) log_det_jacobian = torch.log(torch.abs(1 + torch.bmm(psi, u_hat))) log_det_jacobian = log_det_jacobian.squeeze(2).squeeze(1) return z, log_det_jacobian class Sylvester(nn.Module): """ Sylvester normalizing flow. """ def __init__(self, num_ortho_vecs): super(Sylvester, self).__init__() self.num_ortho_vecs = num_ortho_vecs self.h = nn.Tanh() triu_mask = torch.triu(torch.ones(num_ortho_vecs, num_ortho_vecs), diagonal=1).unsqueeze(0) diag_idx = torch.arange(0, num_ortho_vecs).long() self.register_buffer('triu_mask', Variable(triu_mask)) self.triu_mask.requires_grad = False self.register_buffer('diag_idx', diag_idx) def der_h(self, x): return self.der_tanh(x) def der_tanh(self, x): return 1 - self.h(x)**2 def _forward(self, zk, r1, r2, q_ortho, b, sum_ldj=True): """ All flow parameters are amortized. Conditions on diagonals of R1 and R2 for invertibility need to be satisfied outside of this function. Computes the following transformation: z' = z + QR1 h( R2Q^T z + b) or actually z'^T = z^T + h(z^T Q R2^T + b^T)R1^T Q^T :param zk: shape: (batch_size, z_size) :param r1: shape: (batch_size, num_ortho_vecs, num_ortho_vecs) :param r2: shape: (batch_size, num_ortho_vecs, num_ortho_vecs) :param q_ortho: shape (batch_size, z_size , num_ortho_vecs) :param b: shape: (batch_size, 1, self.z_size) :return: z, log_det_j """ # Amortized flow parameters zk = zk.unsqueeze(1) # Save diagonals for log_det_j diag_r1 = r1[:, self.diag_idx, self.diag_idx] diag_r2 = r2[:, self.diag_idx, self.diag_idx] r1_hat = r1 r2_hat = r2 qr2 = torch.bmm(q_ortho, r2_hat.transpose(2, 1)) qr1 = torch.bmm(q_ortho, r1_hat) r2qzb = torch.bmm(zk, qr2) + b z = torch.bmm(self.h(r2qzb), qr1.transpose(2, 1)) + zk z = z.squeeze(1) # Compute log|det J| # Output log_det_j in shape (batch_size) instead of (batch_size,1) diag_j = diag_r1 * diag_r2 diag_j = self.der_h(r2qzb).squeeze(1) * diag_j diag_j += 1. log_diag_j = diag_j.abs().log() if sum_ldj: log_det_j = log_diag_j.sum(-1) else: log_det_j = log_diag_j return z, log_det_j def forward(self, zk, r1, r2, q_ortho, b, sum_ldj=True): return self._forward(zk, r1, r2, q_ortho, b, sum_ldj) class TriangularSylvester(nn.Module): """ Sylvester normalizing flow with Q=P or Q=I. """ def __init__(self, z_size): super(TriangularSylvester, self).__init__() self.z_size = z_size self.h = nn.Tanh() diag_idx = torch.arange(0, z_size).long() self.register_buffer('diag_idx', diag_idx) def der_h(self, x): return self.der_tanh(x) def der_tanh(self, x): return 1 - self.h(x)**2 def _forward(self, zk, r1, r2, b, permute_z=None, sum_ldj=True): """ All flow parameters are amortized. conditions on diagonals of R1 and R2 need to be satisfied outside of this function. Computes the following transformation: z' = z + QR1 h( R2Q^T z + b) or actually z'^T = z^T + h(z^T Q R2^T + b^T)R1^T Q^T with Q = P a permutation matrix (equal to identity matrix if permute_z=None) :param zk: shape: (batch_size, z_size) :param r1: shape: (batch_size, num_ortho_vecs, num_ortho_vecs). :param r2: shape: (batch_size, num_ortho_vecs, num_ortho_vecs). :param b: shape: (batch_size, 1, self.z_size) :return: z, log_det_j """ # Amortized flow parameters zk = zk.unsqueeze(1) # Save diagonals for log_det_j diag_r1 = r1[:, self.diag_idx, self.diag_idx] diag_r2 = r2[:, self.diag_idx, self.diag_idx] if permute_z is not None: # permute order of z z_per = zk[:, :, permute_z] else: z_per = zk r2qzb = torch.bmm(z_per, r2.transpose(2, 1)) + b z = torch.bmm(self.h(r2qzb), r1.transpose(2, 1)) if permute_z is not None: # permute order of z again back again z = z[:, :, permute_z] z += zk z = z.squeeze(1) # Compute log|det J| # Output log_det_j in shape (batch_size) instead of (batch_size,1) diag_j = diag_r1 * diag_r2 diag_j = self.der_h(r2qzb).squeeze(1) * diag_j diag_j += 1. log_diag_j = diag_j.abs().log() if sum_ldj: log_det_j = log_diag_j.sum(-1) else: log_det_j = log_diag_j return z, log_det_j def forward(self, zk, r1, r2, q_ortho, b, sum_ldj=True): return self._forward(zk, r1, r2, q_ortho, b, sum_ldj) class IAF(nn.Module): """ PyTorch implementation of inverse autoregressive flows as presented in "Improving Variational Inference with Inverse Autoregressive Flow" by Diederik P. Kingma, Tim Salimans, Rafal Jozefowicz, Xi Chen, Ilya Sutskever, Max Welling. Inverse Autoregressive Flow with either MADE MLPs or Pixel CNNs. Contains several flows. Each transformation takes as an input the previous stochastic z, and a context h. The structure of each flow is then as follows: z <- autoregressive_layer(z) + h, allow for diagonal connections z <- autoregressive_layer(z), allow for diagonal connections : z <- autoregressive_layer(z), do not allow for diagonal connections. Note that the size of h needs to be the same as h_size, which is the width of the MADE layers. """ def __init__(self, z_size, num_flows=2, num_hidden=0, h_size=50, forget_bias=1., conv2d=False): super(IAF, self).__init__() self.z_size = z_size self.num_flows = num_flows self.num_hidden = num_hidden self.h_size = h_size self.conv2d = conv2d if not conv2d: ar_layer = MaskedLinear else: ar_layer = MaskedConv2d self.activation = torch.nn.ELU # self.activation = torch.nn.ReLU self.forget_bias = forget_bias self.flows = [] self.param_list = [] # For reordering z after each flow flip_idx = torch.arange(self.z_size - 1, -1, -1).long() self.register_buffer('flip_idx', flip_idx) for k in range(num_flows): arch_z = [ar_layer(z_size, h_size), self.activation()] self.param_list += list(arch_z[0].parameters()) z_feats = torch.nn.Sequential(*arch_z) arch_zh = [] for j in range(num_hidden): arch_zh += [ar_layer(h_size, h_size), self.activation()] self.param_list += list(arch_zh[-2].parameters()) zh_feats = torch.nn.Sequential(*arch_zh) linear_mean = ar_layer(h_size, z_size, diagonal_zeros=True) linear_std = ar_layer(h_size, z_size, diagonal_zeros=True) self.param_list += list(linear_mean.parameters()) self.param_list += list(linear_std.parameters()) if torch.cuda.is_available(): z_feats = z_feats.cuda() zh_feats = zh_feats.cuda() linear_mean = linear_mean.cuda() linear_std = linear_std.cuda() self.flows.append((z_feats, zh_feats, linear_mean, linear_std)) self.param_list = torch.nn.ParameterList(self.param_list) def forward(self, z, h_context): logdets = 0. for i, flow in enumerate(self.flows): if (i + 1) % 2 == 0 and not self.conv2d: # reverse ordering to help mixing z = z[:, self.flip_idx] h = flow[0](z) h = h + h_context h = flow[1](h) mean = flow[2](h) gate = F.sigmoid(flow[3](h) + self.forget_bias) z = gate * z + (1 - gate) * mean logdets += torch.sum(gate.log().view(gate.size(0), -1), 1) return z, logdets
9,939
32.133333
118
py
steer
steer-master/ffjord/vae_lib/optimization/loss.py
from __future__ import print_function import numpy as np import torch import torch.nn as nn from vae_lib.utils.distributions import log_normal_diag, log_normal_standard, log_bernoulli import torch.nn.functional as F def binary_loss_function(recon_x, x, z_mu, z_var, z_0, z_k, ldj, beta=1.): """ Computes the binary loss function while summing over batch dimension, not averaged! :param recon_x: shape: (batch_size, num_channels, pixel_width, pixel_height), bernoulli parameters p(x=1) :param x: shape (batchsize, num_channels, pixel_width, pixel_height), pixel values rescaled between [0, 1]. :param z_mu: mean of z_0 :param z_var: variance of z_0 :param z_0: first stochastic latent variable :param z_k: last stochastic latent variable :param ldj: log det jacobian :param beta: beta for kl loss :return: loss, ce, kl """ reconstruction_function = nn.BCELoss(size_average=False) batch_size = x.size(0) # - N E_q0 [ ln p(x|z_k) ] bce = reconstruction_function(recon_x, x) # ln p(z_k) (not averaged) log_p_zk = log_normal_standard(z_k, dim=1) # ln q(z_0) (not averaged) log_q_z0 = log_normal_diag(z_0, mean=z_mu, log_var=z_var.log(), dim=1) # N E_q0[ ln q(z_0) - ln p(z_k) ] summed_logs = torch.sum(log_q_z0 - log_p_zk) # sum over batches summed_ldj = torch.sum(ldj) # ldj = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ] kl = (summed_logs - summed_ldj) loss = bce + beta * kl loss /= float(batch_size) bce /= float(batch_size) kl /= float(batch_size) return loss, bce, kl def multinomial_loss_function(x_logit, x, z_mu, z_var, z_0, z_k, ldj, args, beta=1.): """ Computes the cross entropy loss function while summing over batch dimension, not averaged! :param x_logit: shape: (batch_size, num_classes * num_channels, pixel_width, pixel_height), real valued logits :param x: shape (batchsize, num_channels, pixel_width, pixel_height), pixel values rescaled between [0, 1]. :param z_mu: mean of z_0 :param z_var: variance of z_0 :param z_0: first stochastic latent variable :param z_k: last stochastic latent variable :param ldj: log det jacobian :param args: global parameter settings :param beta: beta for kl loss :return: loss, ce, kl """ num_classes = 256 batch_size = x.size(0) x_logit = x_logit.view(batch_size, num_classes, args.input_size[0], args.input_size[1], args.input_size[2]) # make integer class labels target = (x * (num_classes - 1)).long() # - N E_q0 [ ln p(x|z_k) ] # sums over batch dimension (and feature dimension) ce = cross_entropy(x_logit, target, size_average=False) # ln p(z_k) (not averaged) log_p_zk = log_normal_standard(z_k, dim=1) # ln q(z_0) (not averaged) log_q_z0 = log_normal_diag(z_0, mean=z_mu, log_var=z_var.log(), dim=1) # N E_q0[ ln q(z_0) - ln p(z_k) ] summed_logs = torch.sum(log_q_z0 - log_p_zk) # sum over batches summed_ldj = torch.sum(ldj) # ldj = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ] kl = (summed_logs - summed_ldj) loss = ce + beta * kl loss /= float(batch_size) ce /= float(batch_size) kl /= float(batch_size) return loss, ce, kl def binary_loss_array(recon_x, x, z_mu, z_var, z_0, z_k, ldj, beta=1.): """ Computes the binary loss without averaging or summing over the batch dimension. """ batch_size = x.size(0) # if not summed over batch_dimension if len(ldj.size()) > 1: ldj = ldj.view(ldj.size(0), -1).sum(-1) # TODO: upgrade to newest pytorch version on master branch, there the nn.BCELoss comes with the option # reduce, which when set to False, does no sum over batch dimension. bce = -log_bernoulli(x.view(batch_size, -1), recon_x.view(batch_size, -1), dim=1) # ln p(z_k) (not averaged) log_p_zk = log_normal_standard(z_k, dim=1) # ln q(z_0) (not averaged) log_q_z0 = log_normal_diag(z_0, mean=z_mu, log_var=z_var.log(), dim=1) # ln q(z_0) - ln p(z_k) ] logs = log_q_z0 - log_p_zk loss = bce + beta * (logs - ldj) return loss def multinomial_loss_array(x_logit, x, z_mu, z_var, z_0, z_k, ldj, args, beta=1.): """ Computes the discritezed logistic loss without averaging or summing over the batch dimension. """ num_classes = 256 batch_size = x.size(0) x_logit = x_logit.view(batch_size, num_classes, args.input_size[0], args.input_size[1], args.input_size[2]) # make integer class labels target = (x * (num_classes - 1)).long() # - N E_q0 [ ln p(x|z_k) ] # computes cross entropy over all dimensions separately: ce = cross_entropy(x_logit, target, size_average=False, reduce=False) # sum over feature dimension ce = ce.view(batch_size, -1).sum(dim=1) # ln p(z_k) (not averaged) log_p_zk = log_normal_standard(z_k.view(batch_size, -1), dim=1) # ln q(z_0) (not averaged) log_q_z0 = log_normal_diag( z_0.view(batch_size, -1), mean=z_mu.view(batch_size, -1), log_var=z_var.log().view(batch_size, -1), dim=1 ) # ln q(z_0) - ln p(z_k) ] logs = log_q_z0 - log_p_zk loss = ce + beta * (logs - ldj) return loss def cross_entropy(input, target, weight=None, size_average=True, ignore_index=-100, reduce=True): r""" Taken from the master branch of pytorch, accepts (N, C, d_1, d_2, ..., d_K) input shapes instead of only (N, C, d_1, d_2) or (N, C). This criterion combines `log_softmax` and `nll_loss` in a single function. See :class:`~torch.nn.CrossEntropyLoss` for details. Args: input: Variable :math:`(N, C)` where `C = number of classes` target: Variable :math:`(N)` where each value is `0 <= targets[i] <= C-1` weight (Tensor, optional): a manual rescaling weight given to each class. If given, has to be a Tensor of size `C` size_average (bool, optional): By default, the losses are averaged over observations for each minibatch. However, if the field sizeAverage is set to False, the losses are instead summed for each minibatch. Ignored if reduce is False. Default: ``True`` ignore_index (int, optional): Specifies a target value that is ignored and does not contribute to the input gradient. When size_average is True, the loss is averaged over non-ignored targets. Default: -100 reduce (bool, optional): By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch element instead and ignores size_average. Default: ``True`` """ return nll_loss(F.log_softmax(input, 1), target, weight, size_average, ignore_index, reduce) def nll_loss(input, target, weight=None, size_average=True, ignore_index=-100, reduce=True): r""" Taken from the master branch of pytorch, accepts (N, C, d_1, d_2, ..., d_K) input shapes instead of only (N, C, d_1, d_2) or (N, C). The negative log likelihood loss. See :class:`~torch.nn.NLLLoss` for details. Args: input: :math:`(N, C)` where `C = number of classes` or :math:`(N, C, H, W)` in case of 2D Loss, or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K > 1` in the case of K-dimensional loss. target: :math:`(N)` where each value is `0 <= targets[i] <= C-1`, or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K >= 1` for K-dimensional loss. weight (Tensor, optional): a manual rescaling weight given to each class. If given, has to be a Tensor of size `C` size_average (bool, optional): By default, the losses are averaged over observations for each minibatch. If size_average is False, the losses are summed for each minibatch. Default: ``True`` ignore_index (int, optional): Specifies a target value that is ignored and does not contribute to the input gradient. When size_average is True, the loss is averaged over non-ignored targets. Default: -100 """ dim = input.dim() if dim == 2: return F.nll_loss( input, target, weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce ) elif dim == 4: return F.nll_loss( input, target, weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce ) elif dim == 3 or dim > 4: n = input.size(0) c = input.size(1) out_size = (n,) + input.size()[2:] if target.size()[1:] != input.size()[2:]: raise ValueError('Expected target size {}, got {}'.format(out_size, input.size())) input = input.contiguous().view(n, c, 1, -1) target = target.contiguous().view(n, 1, -1) if reduce: _loss = nn.NLLLoss2d(weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce) return _loss(input, target) out = F.nll_loss( input, target, weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce ) return out.view(out_size) else: raise ValueError('Expected 2 or more dimensions (got {})'.format(dim)) def calculate_loss(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args, beta=1.): """ Picks the correct loss depending on the input type. """ if args.input_type == 'binary': loss, rec, kl = binary_loss_function(x_mean, x, z_mu, z_var, z_0, z_k, ldj, beta=beta) bpd = 0. elif args.input_type == 'multinomial': loss, rec, kl = multinomial_loss_function(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args, beta=beta) bpd = loss.data[0] / (np.prod(args.input_size) * np.log(2.)) else: raise ValueError('Invalid input type for calculate loss: %s.' % args.input_type) return loss, rec, kl, bpd def calculate_loss_array(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args): """ Picks the correct loss depending on the input type. """ if args.input_type == 'binary': loss = binary_loss_array(x_mean, x, z_mu, z_var, z_0, z_k, ldj) elif args.input_type == 'multinomial': loss = multinomial_loss_array(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args) else: raise ValueError('Invalid input type for calculate loss: %s.' % args.input_type) return loss
10,566
37.849265
116
py
steer
steer-master/ffjord/vae_lib/optimization/training.py
from __future__ import print_function import time import torch from vae_lib.optimization.loss import calculate_loss from vae_lib.utils.visual_evaluation import plot_reconstructions from vae_lib.utils.log_likelihood import calculate_likelihood import numpy as np from train_misc import count_nfe, override_divergence_fn def train(epoch, train_loader, model, opt, args, logger): model.train() train_loss = np.zeros(len(train_loader)) train_bpd = np.zeros(len(train_loader)) num_data = 0 # set warmup coefficient beta = min([(epoch * 1.) / max([args.warmup, 1.]), args.max_beta]) logger.info('beta = {:5.4f}'.format(beta)) end = time.time() for batch_idx, (data, _) in enumerate(train_loader): if args.cuda: data = data.cuda() if args.dynamic_binarization: data = torch.bernoulli(data) data = data.view(-1, *args.input_size) opt.zero_grad() x_mean, z_mu, z_var, ldj, z0, zk = model(data) if 'cnf' in args.flow: f_nfe = count_nfe(model) loss, rec, kl, bpd = calculate_loss(x_mean, data, z_mu, z_var, z0, zk, ldj, args, beta=beta) loss.backward() if 'cnf' in args.flow: t_nfe = count_nfe(model) b_nfe = t_nfe - f_nfe train_loss[batch_idx] = loss.item() train_bpd[batch_idx] = bpd opt.step() rec = rec.item() kl = kl.item() num_data += len(data) batch_time = time.time() - end end = time.time() if batch_idx % args.log_interval == 0: if args.input_type == 'binary': perc = 100. * batch_idx / len(train_loader) log_msg = ( 'Epoch {:3d} [{:5d}/{:5d} ({:2.0f}%)] | Time {:.3f} | Loss {:11.6f} | ' 'Rec {:11.6f} | KL {:11.6f}'.format( epoch, num_data, len(train_loader.sampler), perc, batch_time, loss.item(), rec, kl ) ) else: perc = 100. * batch_idx / len(train_loader) tmp = 'Epoch {:3d} [{:5d}/{:5d} ({:2.0f}%)] | Time {:.3f} | Loss {:11.6f} | Bits/dim {:8.6f}' log_msg = tmp.format(epoch, num_data, len(train_loader.sampler), perc, batch_time, loss.item(), bpd), '\trec: {:11.3f}\tkl: {:11.6f}'.format(rec, kl) log_msg = "".join(log_msg) if 'cnf' in args.flow: log_msg += ' | NFE Forward {} | NFE Backward {}'.format(f_nfe, b_nfe) logger.info(log_msg) if args.input_type == 'binary': logger.info('====> Epoch: {:3d} Average train loss: {:.4f}'.format(epoch, train_loss.sum() / len(train_loader))) else: logger.info( '====> Epoch: {:3d} Average train loss: {:.4f}, average bpd: {:.4f}'. format(epoch, train_loss.sum() / len(train_loader), train_bpd.sum() / len(train_loader)) ) return train_loss def evaluate(data_loader, model, args, logger, testing=False, epoch=0): model.eval() loss = 0. batch_idx = 0 bpd = 0. if args.input_type == 'binary': loss_type = 'elbo' else: loss_type = 'bpd' if testing and 'cnf' in args.flow: override_divergence_fn(model, "brute_force") for data, _ in data_loader: batch_idx += 1 if args.cuda: data = data.cuda() with torch.no_grad(): data = data.view(-1, *args.input_size) x_mean, z_mu, z_var, ldj, z0, zk = model(data) batch_loss, rec, kl, batch_bpd = calculate_loss(x_mean, data, z_mu, z_var, z0, zk, ldj, args) bpd += batch_bpd loss += batch_loss.item() # PRINT RECONSTRUCTIONS if batch_idx == 1 and testing is False: plot_reconstructions(data, x_mean, batch_loss, loss_type, epoch, args) loss /= len(data_loader) bpd /= len(data_loader) if testing: logger.info('====> Test set loss: {:.4f}'.format(loss)) # Compute log-likelihood if testing and not ("cnf" in args.flow): # don't compute log-likelihood for cnf models with torch.no_grad(): test_data = data_loader.dataset.tensors[0] if args.cuda: test_data = test_data.cuda() logger.info('Computing log-likelihood on test set') model.eval() if args.dataset == 'caltech': log_likelihood, nll_bpd = calculate_likelihood(test_data, model, args, logger, S=2000, MB=500) else: log_likelihood, nll_bpd = calculate_likelihood(test_data, model, args, logger, S=5000, MB=500) if 'cnf' in args.flow: override_divergence_fn(model, args.divergence_fn) else: log_likelihood = None nll_bpd = None if args.input_type in ['multinomial']: bpd = loss / (np.prod(args.input_size) * np.log(2.)) if testing and not ("cnf" in args.flow): logger.info('====> Test set log-likelihood: {:.4f}'.format(log_likelihood)) if args.input_type != 'binary': logger.info('====> Test set bpd (elbo): {:.4f}'.format(bpd)) logger.info( '====> Test set bpd (log-likelihood): {:.4f}'. format(log_likelihood / (np.prod(args.input_size) * np.log(2.))) ) if not testing: return loss, bpd else: return log_likelihood, nll_bpd
5,518
31.087209
120
py
steer
steer-master/ffjord/vae_lib/optimization/__init__.py
0
0
0
py
steer
steer-master/ffjord/vae_lib/utils/distributions.py
from __future__ import print_function import torch import torch.utils.data import math MIN_EPSILON = 1e-5 MAX_EPSILON = 1. - 1e-5 PI = torch.FloatTensor([math.pi]) if torch.cuda.is_available(): PI = PI.cuda() # N(x | mu, var) = 1/sqrt{2pi var} exp[-1/(2 var) (x-mean)(x-mean)] # log N(x| mu, var) = -log sqrt(2pi) -0.5 log var - 0.5 (x-mean)(x-mean)/var def log_normal_diag(x, mean, log_var, average=False, reduce=True, dim=None): log_norm = -0.5 * (log_var + (x - mean) * (x - mean) * log_var.exp().reciprocal()) if reduce: if average: return torch.mean(log_norm, dim) else: return torch.sum(log_norm, dim) else: return log_norm def log_normal_normalized(x, mean, log_var, average=False, reduce=True, dim=None): log_norm = -(x - mean) * (x - mean) log_norm *= torch.reciprocal(2. * log_var.exp()) log_norm += -0.5 * log_var log_norm += -0.5 * torch.log(2. * PI) if reduce: if average: return torch.mean(log_norm, dim) else: return torch.sum(log_norm, dim) else: return log_norm def log_normal_standard(x, average=False, reduce=True, dim=None): log_norm = -0.5 * x * x if reduce: if average: return torch.mean(log_norm, dim) else: return torch.sum(log_norm, dim) else: return log_norm def log_bernoulli(x, mean, average=False, reduce=True, dim=None): probs = torch.clamp(mean, min=MIN_EPSILON, max=MAX_EPSILON) log_bern = x * torch.log(probs) + (1. - x) * torch.log(1. - probs) if reduce: if average: return torch.mean(log_bern, dim) else: return torch.sum(log_bern, dim) else: return log_bern
1,768
25.80303
86
py
steer
steer-master/ffjord/vae_lib/utils/plotting.py
from __future__ import division from __future__ import print_function import numpy as np import matplotlib # noninteractive background matplotlib.use('Agg') import matplotlib.pyplot as plt def plot_training_curve(train_loss, validation_loss, fname='training_curve.pdf', labels=None): """ Plots train_loss and validation loss as a function of optimization iteration :param train_loss: np.array of train_loss (1D or 2D) :param validation_loss: np.array of validation loss (1D or 2D) :param fname: output file name :param labels: if train_loss and validation loss are 2D, then labels indicate which variable is varied accross training curves. :return: None """ plt.close() matplotlib.rcParams.update({'font.size': 14}) matplotlib.rcParams['mathtext.fontset'] = 'stix' matplotlib.rcParams['font.family'] = 'STIXGeneral' if len(train_loss.shape) == 1: # Single training curve fig, ax = plt.subplots(nrows=1, ncols=1) figsize = (6, 4) if train_loss.shape[0] == validation_loss.shape[0]: # validation score evaluated every iteration x = np.arange(train_loss.shape[0]) ax.plot(x, train_loss, '-', lw=2., color='black', label='train') ax.plot(x, validation_loss, '-', lw=2., color='blue', label='val') elif train_loss.shape[0] % validation_loss.shape[0] == 0: # validation score evaluated every epoch x = np.arange(train_loss.shape[0]) ax.plot(x, train_loss, '-', lw=2., color='black', label='train') x = np.arange(validation_loss.shape[0]) x = (x + 1) * train_loss.shape[0] / validation_loss.shape[0] ax.plot(x, validation_loss, '-', lw=2., color='blue', label='val') else: raise ValueError('Length of train_loss and validation_loss must be equal or divisible') miny = np.minimum(validation_loss.min(), train_loss.min()) - 20. maxy = np.maximum(validation_loss.max(), train_loss.max()) + 30. ax.set_ylim([miny, maxy]) elif len(train_loss.shape) == 2: # Multiple training curves cmap = plt.cm.brg cNorm = matplotlib.colors.Normalize(vmin=0, vmax=train_loss.shape[0]) scalarMap = matplotlib.cm.ScalarMappable(norm=cNorm, cmap=cmap) fig, ax = plt.subplots(nrows=1, ncols=1) figsize = (6, 4) if labels is None: labels = ['%d' % i for i in range(train_loss.shape[0])] if train_loss.shape[1] == validation_loss.shape[1]: for i in range(train_loss.shape[0]): color_val = scalarMap.to_rgba(i) # validation score evaluated every iteration x = np.arange(train_loss.shape[0]) ax.plot(x, train_loss[i], '-', lw=2., color=color_val, label=labels[i]) ax.plot(x, validation_loss[i], '--', lw=2., color=color_val) elif train_loss.shape[1] % validation_loss.shape[1] == 0: for i in range(train_loss.shape[0]): color_val = scalarMap.to_rgba(i) # validation score evaluated every epoch x = np.arange(train_loss.shape[1]) ax.plot(x, train_loss[i], '-', lw=2., color=color_val, label=labels[i]) x = np.arange(validation_loss.shape[1]) x = (x + 1) * train_loss.shape[1] / validation_loss.shape[1] ax.plot(x, validation_loss[i], '-', lw=2., color=color_val) miny = np.minimum(validation_loss.min(), train_loss.min()) - 20. maxy = np.maximum(validation_loss.max(), train_loss.max()) + 30. ax.set_ylim([miny, maxy]) else: raise ValueError('train_loss and validation_loss must be 1D or 2D arrays') ax.set_xlabel('iteration') ax.set_ylabel('loss') plt.title('Training and validation loss') fig.set_size_inches(figsize) fig.subplots_adjust(hspace=0.1) plt.savefig(fname, bbox_inches='tight') plt.close()
4,021
37.304762
106
py
steer
steer-master/ffjord/vae_lib/utils/log_likelihood.py
from __future__ import print_function import time import numpy as np from scipy.misc import logsumexp from vae_lib.optimization.loss import calculate_loss_array def calculate_likelihood(X, model, args, logger, S=5000, MB=500): # set auxiliary variables for number of training and test sets N_test = X.size(0) X = X.view(-1, *args.input_size) likelihood_test = [] if S <= MB: R = 1 else: R = S // MB S = MB end = time.time() for j in range(N_test): x_single = X[j].unsqueeze(0) a = [] for r in range(0, R): # Repeat it for all training points x = x_single.expand(S, *x_single.size()[1:]).contiguous() x_mean, z_mu, z_var, ldj, z0, zk = model(x) a_tmp = calculate_loss_array(x_mean, x, z_mu, z_var, z0, zk, ldj, args) a.append(-a_tmp.cpu().data.numpy()) # calculate max a = np.asarray(a) a = np.reshape(a, (a.shape[0] * a.shape[1], 1)) likelihood_x = logsumexp(a) likelihood_test.append(likelihood_x - np.log(len(a))) if j % 1 == 0: logger.info('Progress: {:.2f}% | Time: {:.4f}'.format(j / (1. * N_test) * 100, time.time() - end)) end = time.time() likelihood_test = np.array(likelihood_test) nll = -np.mean(likelihood_test) if args.input_type == 'multinomial': bpd = nll / (np.prod(args.input_size) * np.log(2.)) elif args.input_type == 'binary': bpd = 0. else: raise ValueError('invalid input type!') return nll, bpd
1,592
25.114754
110
py
steer
steer-master/ffjord/vae_lib/utils/load_data.py
from __future__ import print_function import torch import torch.utils.data as data_utils import pickle from scipy.io import loadmat import numpy as np import os def load_static_mnist(args, **kwargs): """ Dataloading function for static mnist. Outputs image data in vectorized form: each image is a vector of size 784 """ args.dynamic_binarization = False args.input_type = 'binary' args.input_size = [1, 28, 28] # start processing def lines_to_np_array(lines): return np.array([[int(i) for i in line.split()] for line in lines]) with open(os.path.join('data', 'MNIST_static', 'binarized_mnist_train.amat')) as f: lines = f.readlines() x_train = lines_to_np_array(lines).astype('float32') with open(os.path.join('data', 'MNIST_static', 'binarized_mnist_valid.amat')) as f: lines = f.readlines() x_val = lines_to_np_array(lines).astype('float32') with open(os.path.join('data', 'MNIST_static', 'binarized_mnist_test.amat')) as f: lines = f.readlines() x_test = lines_to_np_array(lines).astype('float32') # shuffle train data np.random.shuffle(x_train) # idle y's y_train = np.zeros((x_train.shape[0], 1)) y_val = np.zeros((x_val.shape[0], 1)) y_test = np.zeros((x_test.shape[0], 1)) # pytorch data loader train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train)) train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val)) val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs) test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test)) test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs) return train_loader, val_loader, test_loader, args def load_freyfaces(args, **kwargs): # set args args.input_size = [1, 28, 20] args.input_type = 'multinomial' args.dynamic_binarization = False TRAIN = 1565 VAL = 200 TEST = 200 # start processing with open('data/Freyfaces/freyfaces.pkl', 'rb') as f: data = pickle.load(f, encoding="latin1")[0] data = data / 255. # NOTE: shuffling is done before splitting into train and test set, so test set is different for every run! # shuffle data: np.random.seed(args.freyseed) np.random.shuffle(data) # train images x_train = data[0:TRAIN].reshape(-1, 28 * 20) # validation images x_val = data[TRAIN:(TRAIN + VAL)].reshape(-1, 28 * 20) # test images x_test = data[(TRAIN + VAL):(TRAIN + VAL + TEST)].reshape(-1, 28 * 20) # idle y's y_train = np.zeros((x_train.shape[0], 1)) y_val = np.zeros((x_val.shape[0], 1)) y_test = np.zeros((x_test.shape[0], 1)) # pytorch data loader train = data_utils.TensorDataset(torch.from_numpy(x_train).float(), torch.from_numpy(y_train)) train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val)) val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs) test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test)) test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs) return train_loader, val_loader, test_loader, args def load_omniglot(args, **kwargs): n_validation = 1345 # set args args.input_size = [1, 28, 28] args.input_type = 'binary' args.dynamic_binarization = True # start processing def reshape_data(data): return data.reshape((-1, 28, 28)).reshape((-1, 28 * 28), order='F') omni_raw = loadmat(os.path.join('data', 'OMNIGLOT', 'chardata.mat')) # train and test data train_data = reshape_data(omni_raw['data'].T.astype('float32')) x_test = reshape_data(omni_raw['testdata'].T.astype('float32')) # shuffle train data np.random.shuffle(train_data) # set train and validation data x_train = train_data[:-n_validation] x_val = train_data[-n_validation:] # binarize if args.dynamic_binarization: args.input_type = 'binary' np.random.seed(777) x_val = np.random.binomial(1, x_val) x_test = np.random.binomial(1, x_test) else: args.input_type = 'gray' # idle y's y_train = np.zeros((x_train.shape[0], 1)) y_val = np.zeros((x_val.shape[0], 1)) y_test = np.zeros((x_test.shape[0], 1)) # pytorch data loader train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train)) train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val)) val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs) test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test)) test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs) return train_loader, val_loader, test_loader, args def load_caltech101silhouettes(args, **kwargs): # set args args.input_size = [1, 28, 28] args.input_type = 'binary' args.dynamic_binarization = False # start processing def reshape_data(data): return data.reshape((-1, 28, 28)).reshape((-1, 28 * 28), order='F') caltech_raw = loadmat(os.path.join('data', 'Caltech101Silhouettes', 'caltech101_silhouettes_28_split1.mat')) # train, validation and test data x_train = 1. - reshape_data(caltech_raw['train_data'].astype('float32')) np.random.shuffle(x_train) x_val = 1. - reshape_data(caltech_raw['val_data'].astype('float32')) np.random.shuffle(x_val) x_test = 1. - reshape_data(caltech_raw['test_data'].astype('float32')) y_train = caltech_raw['train_labels'] y_val = caltech_raw['val_labels'] y_test = caltech_raw['test_labels'] # pytorch data loader train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train)) train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val)) val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs) test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test)) test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs) return train_loader, val_loader, test_loader, args def load_dataset(args, **kwargs): if args.dataset == 'mnist': train_loader, val_loader, test_loader, args = load_static_mnist(args, **kwargs) elif args.dataset == 'caltech': train_loader, val_loader, test_loader, args = load_caltech101silhouettes(args, **kwargs) elif args.dataset == 'freyfaces': train_loader, val_loader, test_loader, args = load_freyfaces(args, **kwargs) elif args.dataset == 'omniglot': train_loader, val_loader, test_loader, args = load_omniglot(args, **kwargs) else: raise Exception('Wrong name of the dataset!') return train_loader, val_loader, test_loader, args
7,592
35.859223
116
py
steer
steer-master/ffjord/vae_lib/utils/__init__.py
0
0
0
py
steer
steer-master/ffjord/vae_lib/utils/visual_evaluation.py
from __future__ import print_function import os import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec def plot_reconstructions(data, recon_mean, loss, loss_type, epoch, args): if args.input_type == 'multinomial': # data is already between 0 and 1 num_classes = 256 # Find largest class logit tmp = recon_mean.view(-1, num_classes, *args.input_size).max(dim=1)[1] recon_mean = tmp.float() / (num_classes - 1.) if epoch == 1: if not os.path.exists(args.snap_dir + 'reconstruction/'): os.makedirs(args.snap_dir + 'reconstruction/') # VISUALIZATION: plot real images plot_images(args, data.data.cpu().numpy()[0:9], args.snap_dir + 'reconstruction/', 'real', size_x=3, size_y=3) # VISUALIZATION: plot reconstructions if loss_type == 'bpd': fname = str(epoch) + '_bpd_%5.3f' % loss elif loss_type == 'elbo': fname = str(epoch) + '_elbo_%6.4f' % loss plot_images(args, recon_mean.data.cpu().numpy()[0:9], args.snap_dir + 'reconstruction/', fname, size_x=3, size_y=3) def plot_images(args, x_sample, dir, file_name, size_x=3, size_y=3): fig = plt.figure(figsize=(size_x, size_y)) # fig = plt.figure(1) gs = gridspec.GridSpec(size_x, size_y) gs.update(wspace=0.05, hspace=0.05) for i, sample in enumerate(x_sample): ax = plt.subplot(gs[i]) plt.axis('off') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_aspect('equal') sample = sample.reshape((args.input_size[0], args.input_size[1], args.input_size[2])) sample = sample.swapaxes(0, 2) sample = sample.swapaxes(0, 1) if (args.input_type == 'binary') or (args.input_type in ['multinomial'] and args.input_size[0] == 1): sample = sample[:, :, 0] plt.imshow(sample, cmap='gray', vmin=0, vmax=1) else: plt.imshow(sample) plt.savefig(dir + file_name + '.png', bbox_inches='tight') plt.close(fig)
2,063
37.222222
119
py
steer
steer-master/ffjord/datasets/power.py
import numpy as np import datasets class POWER: class Data: def __init__(self, data): self.x = data.astype(np.float32) self.N = self.x.shape[0] def __init__(self): trn, val, tst = load_data_normalised() self.trn = self.Data(trn) self.val = self.Data(val) self.tst = self.Data(tst) self.n_dims = self.trn.x.shape[1] def load_data(): return np.load(datasets.root + 'power/data.npy') def load_data_split_with_noise(): rng = np.random.RandomState(42) data = load_data() rng.shuffle(data) N = data.shape[0] data = np.delete(data, 3, axis=1) data = np.delete(data, 1, axis=1) ############################ # Add noise ############################ # global_intensity_noise = 0.1*rng.rand(N, 1) voltage_noise = 0.01 * rng.rand(N, 1) # grp_noise = 0.001*rng.rand(N, 1) gap_noise = 0.001 * rng.rand(N, 1) sm_noise = rng.rand(N, 3) time_noise = np.zeros((N, 1)) # noise = np.hstack((gap_noise, grp_noise, voltage_noise, global_intensity_noise, sm_noise, time_noise)) # noise = np.hstack((gap_noise, grp_noise, voltage_noise, sm_noise, time_noise)) noise = np.hstack((gap_noise, voltage_noise, sm_noise, time_noise)) data = data + noise N_test = int(0.1 * data.shape[0]) data_test = data[-N_test:] data = data[0:-N_test] N_validate = int(0.1 * data.shape[0]) data_validate = data[-N_validate:] data_train = data[0:-N_validate] return data_train, data_validate, data_test def load_data_normalised(): data_train, data_validate, data_test = load_data_split_with_noise() data = np.vstack((data_train, data_validate)) mu = data.mean(axis=0) s = data.std(axis=0) data_train = (data_train - mu) / s data_validate = (data_validate - mu) / s data_test = (data_test - mu) / s return data_train, data_validate, data_test
1,940
24.88
108
py
steer
steer-master/ffjord/datasets/hepmass.py
import pandas as pd import numpy as np from collections import Counter from os.path import join import datasets class HEPMASS: """ The HEPMASS data set. http://archive.ics.uci.edu/ml/datasets/HEPMASS """ class Data: def __init__(self, data): self.x = data.astype(np.float32) self.N = self.x.shape[0] def __init__(self): path = datasets.root + 'hepmass/' trn, val, tst = load_data_no_discrete_normalised_as_array(path) self.trn = self.Data(trn) self.val = self.Data(val) self.tst = self.Data(tst) self.n_dims = self.trn.x.shape[1] def load_data(path): data_train = pd.read_csv(filepath_or_buffer=join(path, "1000_train.csv"), index_col=False) data_test = pd.read_csv(filepath_or_buffer=join(path, "1000_test.csv"), index_col=False) return data_train, data_test def load_data_no_discrete(path): """ Loads the positive class examples from the first 10 percent of the dataset. """ data_train, data_test = load_data(path) # Gets rid of any background noise examples i.e. class label 0. data_train = data_train[data_train[data_train.columns[0]] == 1] data_train = data_train.drop(data_train.columns[0], axis=1) data_test = data_test[data_test[data_test.columns[0]] == 1] data_test = data_test.drop(data_test.columns[0], axis=1) # Because the data set is messed up! data_test = data_test.drop(data_test.columns[-1], axis=1) return data_train, data_test def load_data_no_discrete_normalised(path): data_train, data_test = load_data_no_discrete(path) mu = data_train.mean() s = data_train.std() data_train = (data_train - mu) / s data_test = (data_test - mu) / s return data_train, data_test def load_data_no_discrete_normalised_as_array(path): data_train, data_test = load_data_no_discrete_normalised(path) data_train, data_test = data_train.as_matrix(), data_test.as_matrix() i = 0 # Remove any features that have too many re-occurring real values. features_to_remove = [] for feature in data_train.T: c = Counter(feature) max_count = np.array([v for k, v in sorted(c.items())])[0] if max_count > 5: features_to_remove.append(i) i += 1 data_train = data_train[:, np.array([i for i in range(data_train.shape[1]) if i not in features_to_remove])] data_test = data_test[:, np.array([i for i in range(data_test.shape[1]) if i not in features_to_remove])] N = data_train.shape[0] N_validate = int(N * 0.1) data_validate = data_train[-N_validate:] data_train = data_train[0:-N_validate] return data_train, data_validate, data_test
2,730
28.365591
112
py